name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_HiveTableUtil_validateConstraint | // returns a constraint trait that requires VALIDATE
public static byte validateConstraint(byte trait) {
return (byte) (trait | HIVE_CONSTRAINT_VALIDATE);
} | 3.68 |
framework_ThemeResource_equals | /**
* Tests if the given object equals this Resource.
*
* @param obj
* the object to be tested for equality.
* @return <code>true</code> if the given object equals this Icon,
* <code>false</code> if not.
* @see java.lang.Object#equals(Object)
*/
@Override
public boolean equals(Object obj) {
return obj instanceof ThemeResource
&& resourceID.equals(((ThemeResource) obj).resourceID);
} | 3.68 |
hbase_SplitTableRegionProcedure_checkSplittable | /**
* Check whether the region is splittable
* @param env MasterProcedureEnv
* @param regionToSplit parent Region to be split
*/
private void checkSplittable(final MasterProcedureEnv env, final RegionInfo regionToSplit)
throws IOException {
// Ask the remote RS if this region is splittable.
// If we get an IOE, report it along w/ the failure so can see why we are not splittable at
// this time.
if (regionToSplit.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
throw new IllegalArgumentException("Can't invoke split on non-default regions directly");
}
RegionStateNode node =
env.getAssignmentManager().getRegionStates().getRegionStateNode(getParentRegion());
IOException splittableCheckIOE = null;
boolean splittable = false;
if (node != null) {
try {
GetRegionInfoResponse response;
if (!hasBestSplitRow()) {
LOG.info(
"{} splitKey isn't explicitly specified, will try to find a best split key from RS {}",
node.getRegionInfo().getRegionNameAsString(), node.getRegionLocation());
response = AssignmentManagerUtil.getRegionInfoResponse(env, node.getRegionLocation(),
node.getRegionInfo(), true);
bestSplitRow =
response.hasBestSplitRow() ? response.getBestSplitRow().toByteArray() : null;
} else {
response = AssignmentManagerUtil.getRegionInfoResponse(env, node.getRegionLocation(),
node.getRegionInfo(), false);
}
splittable = response.hasSplittable() && response.getSplittable();
if (LOG.isDebugEnabled()) {
LOG.debug("Splittable=" + splittable + " " + node.toShortString());
}
} catch (IOException e) {
splittableCheckIOE = e;
}
}
if (!splittable) {
IOException e =
new DoNotRetryIOException(regionToSplit.getShortNameToLog() + " NOT splittable");
if (splittableCheckIOE != null) {
e.initCause(splittableCheckIOE);
}
throw e;
}
if (!hasBestSplitRow()) {
throw new DoNotRetryIOException("Region not splittable because bestSplitPoint = null, "
+ "maybe table is too small for auto split. For force split, try specifying split row");
}
if (Bytes.equals(regionToSplit.getStartKey(), bestSplitRow)) {
throw new DoNotRetryIOException(
"Split row is equal to startkey: " + Bytes.toStringBinary(bestSplitRow));
}
if (!regionToSplit.containsRow(bestSplitRow)) {
throw new DoNotRetryIOException("Split row is not inside region key range splitKey:"
+ Bytes.toStringBinary(bestSplitRow) + " region: " + regionToSplit);
}
} | 3.68 |
graphhopper_BitUtil_toLong | /**
* See the counterpart {@link #fromLong(long)}
*/
public final long toLong(byte[] b) {
return toLong(b, 0);
} | 3.68 |
flink_EventsGenerator_nextInvalid | /**
* Creates an event for an illegal state transition of one of the internal state machines. If
* the generator has not yet started any state machines (for example, because no call to {@link
* #next(int, int)} was made, yet), this will return null.
*
* @return An event for a illegal state transition, or null, if not possible.
*/
@Nullable
public Event nextInvalid() {
final Iterator<Entry<Integer, State>> iter = states.entrySet().iterator();
if (iter.hasNext()) {
final Entry<Integer, State> entry = iter.next();
State currentState = entry.getValue();
int address = entry.getKey();
iter.remove();
EventType event = currentState.randomInvalidTransition(rnd);
return new Event(event, address);
} else {
return null;
}
} | 3.68 |
hbase_MetricsTableRequests_getMetricRegistryInfo | // Visible for testing
public MetricRegistryInfo getMetricRegistryInfo() {
return registryInfo;
} | 3.68 |
shardingsphere-elasticjob_JobNodeStorage_addDataListener | /**
* Add data listener.
*
* @param listener data listener
*/
public void addDataListener(final DataChangedEventListener listener) {
Executor executor = ListenerNotifierManager.getInstance().getJobNotifyExecutor(jobName);
regCenter.watch("/" + jobName, listener, executor);
} | 3.68 |
framework_VTree_getNavigationStartKey | /**
* Get the key the moves the selection to the beginning of the table. By
* default this is the Home key but by overriding this you can change the
* key to whatever you want.
*
* @return
*/
protected int getNavigationStartKey() {
return KeyCodes.KEY_HOME;
} | 3.68 |
open-banking-gateway_PsuSecureStorage_registerPsu | /**
* Registers PSU in Datasafe
* @param psu PSU data
* @param password PSU KeyStore/Datasafe password.
*/
public void registerPsu(Psu psu, Supplier<char[]> password) {
this.userProfile()
.createDocumentKeystore(
psu.getUserIdAuth(password),
config.defaultPrivateTemplate(psu.getUserIdAuth(password)).buildPrivateProfile()
);
} | 3.68 |
hbase_RegionCoprocessorHost_preScannerNext | /**
* @param s the scanner
* @param results the result set returned by the region server
* @param limit the maximum number of results to return
* @return 'has next' indication to client if bypassing default behavior, or null otherwise
* @exception IOException Exception
*/
public Boolean preScannerNext(final InternalScanner s, final List<Result> results,
final int limit) throws IOException {
boolean bypassable = true;
boolean defaultResult = false;
if (coprocEnvironments.isEmpty()) {
return null;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Boolean>(
regionObserverGetter, defaultResult, bypassable) {
@Override
public Boolean call(RegionObserver observer) throws IOException {
return observer.preScannerNext(this, s, results, limit, getResult());
}
});
} | 3.68 |
hadoop_CurrentJHParser_canParse | /**
* Can this parser parse the input?
*
* @param input
* @return Whether this parser can parse the input.
* @throws IOException
*/
public static boolean canParse(InputStream input) throws IOException {
final DataInputStream in = new ForkedDataInputStream(input);
try {
final EventReader reader = new EventReader(in);
try {
reader.getNextEvent();
} catch (IOException e) {
return false;
} finally {
reader.close();
}
} catch (IOException e) {
return false;
}
return true;
} | 3.68 |
hbase_AbstractFSWAL_getNumRolledLogFiles | // public only until class moves to o.a.h.h.wal
/** Returns the number of rolled log files */
public int getNumRolledLogFiles() {
return walFile2Props.size();
} | 3.68 |
morf_SqlUtils_selectDistinct | /**
* Constructs a distinct Select Statement which optionally selects on a subset of fields.
* If no fields are specified then this is equivalent of selecting all
* fields (i.e. {@code SELECT DISTINCT * FROM x}).
*
* <p>Usage is discouraged; this method will be deprecated at some point. Use
* {@link SelectStatement#select(AliasedFieldBuilder...)} for preference. For
* example:</p>
*
* <pre>SelectStatement.select(myFields).distinct().from(foo).build();</pre>
*
* @param fields fields to be selected
* @return {@link SelectStatement}
*/
public static SelectStatement selectDistinct(Iterable<? extends AliasedFieldBuilder> fields) {
return new SelectStatement(fields, true);
} | 3.68 |
pulsar_Topics_delete | /**
* @see Topics#delete(String, boolean, boolean)
* IMPORTANT NOTICE: the application is not able to connect to the topic(delete then re-create with same name) again
* if the schema auto uploading is disabled. Besides, users should to use the truncate method to clean up
* data of the topic instead of delete method if users continue to use this topic later.
*/
default void delete(String topic, boolean force) throws PulsarAdminException {
delete(topic, force, true);
} | 3.68 |
dubbo_ExpiringMap_getTimeToLive | /**
* get time to live
*
* @return time to live
*/
public int getTimeToLive() {
return (int) timeToLiveMillis / 1000;
} | 3.68 |
framework_ExpandingContainer_addItemAt | /**
* @throws UnsupportedOperationException
* always
*/
@Override
public Item addItemAt(int index, Object newItemId) {
throw new UnsupportedOperationException();
} | 3.68 |
hadoop_Lz4Codec_getDecompressorType | /**
* Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
*
* @return the type of decompressor needed by this codec.
*/
@Override
public Class<? extends Decompressor> getDecompressorType() {
return Lz4Decompressor.class;
} | 3.68 |
framework_VAbstractCalendarPanel_isAcceptedByRangeStart | /**
* Accepts dates greater than or equal to rangeStart, depending on the
* resolution. If the resolution is set to DAY, the range will compare on a
* day-basis. If the resolution is set to YEAR, only years are compared. So
* even if the range is set to one millisecond in next year, also next year
* will be included.
*
* @param date
* @param minResolution
* @return
*/
private boolean isAcceptedByRangeStart(Date date, R minResolution) {
assert (date != null);
// rangeStart == null means that we accept all values below rangeEnd
if (rangeStart == null) {
return true;
}
String dateStrResolution = dateStrResolution(date, minResolution);
return rangeStart.substring(0, dateStrResolution.length())
.compareTo(dateStrResolution) <= 0;
} | 3.68 |
hadoop_CoderUtil_getValidIndexes | /**
* Picking up indexes of valid inputs.
* @param inputs decoding input buffers
* @param <T>
*/
static <T> int[] getValidIndexes(T[] inputs) {
int[] validIndexes = new int[inputs.length];
int idx = 0;
for (int i = 0; i < inputs.length; i++) {
if (inputs[i] != null) {
validIndexes[idx++] = i;
}
}
return Arrays.copyOf(validIndexes, idx);
} | 3.68 |
querydsl_MetaDataExporter_setNamePrefix | /**
* Override the name prefix for the classes (default: Q)
*
* @param namePrefix name prefix for querydsl-types (default: Q)
*/
public void setNamePrefix(String namePrefix) {
module.bind(CodegenModule.PREFIX, namePrefix);
} | 3.68 |
hadoop_ReadStatistics_getTotalEcDecodingTimeMillis | /**
* Return the total time in milliseconds used for erasure coding decoding.
*/
public synchronized long getTotalEcDecodingTimeMillis() {
return totalEcDecodingTimeMillis;
} | 3.68 |
hadoop_QuotaUsage_isTypeQuotaSet | /**
* Return true if any storage type quota has been set.
*
* @return if any storage type quota has been set true, not false.
* */
public boolean isTypeQuotaSet() {
if (typeQuota != null) {
for (StorageType t : StorageType.getTypesSupportingQuota()) {
if (typeQuota[t.ordinal()] > 0L) {
return true;
}
}
}
return false;
} | 3.68 |
framework_AbstractSplitPanel_isUserOriginated | /**
* {@inheritDoc}
*
* @since 8.1
*/
@Override
public boolean isUserOriginated() {
return userOriginated;
} | 3.68 |
hudi_InstantRange_builder | /**
* Returns the builder.
*/
public static Builder builder() {
return new Builder();
} | 3.68 |
hbase_RootProcedureState_loadStack | /**
* Called on store load by the ProcedureExecutor to load part of the stack. Each procedure has its
* own stack-positions. Which means we have to write to the store only the Procedure we executed,
* and nothing else. on load we recreate the full stack by aggregating each procedure
* stack-positions.
*/
protected synchronized void loadStack(Procedure<TEnvironment> proc) {
addSubProcedure(proc);
int[] stackIndexes = proc.getStackIndexes();
if (stackIndexes != null) {
if (subprocStack == null) {
subprocStack = new ArrayList<>();
}
int diff = (1 + stackIndexes[stackIndexes.length - 1]) - subprocStack.size();
if (diff > 0) {
subprocStack.ensureCapacity(1 + stackIndexes[stackIndexes.length - 1]);
while (diff-- > 0) {
subprocStack.add(null);
}
}
for (int i = 0; i < stackIndexes.length; ++i) {
subprocStack.set(stackIndexes[i], proc);
}
}
if (proc.getState() == ProcedureState.ROLLEDBACK) {
state = State.ROLLINGBACK;
} else if (proc.isFailed()) {
state = State.FAILED;
}
} | 3.68 |
morf_XmlDataSetConsumer_emptyElement | /**
* Output an empty (self-closing) element: <foo/>
*
* @param contentHandler The content handler
* @param name The element name
* @param attributes The attributes
* @throws SAXException When there's a writer error
*/
private void emptyElement(ContentHandler contentHandler, String name, Attributes attributes) throws SAXException {
contentHandler.startElement(XmlDataSetNode.URI, name, name, attributes);
contentHandler.endElement(XmlDataSetNode.URI, name, name);
} | 3.68 |
framework_VComboBox_setAllowNewItems | /**
* Sets whether creation of new items when there is no match is allowed or
* not.
*
* @param allowNewItems
* true to allow creation of new items, false to only allow
* selection of existing items
*/
public void setAllowNewItems(boolean allowNewItems) {
this.allowNewItems = allowNewItems;
} | 3.68 |
rocketmq-connect_JdbcSinkTask_start | /**
* Start the component
* @param keyValue
*/
@Override
public void start(KeyValue keyValue) {
originalConfig = keyValue;
config = new JdbcSinkConfig(keyValue);
remainingRetries = config.getMaxRetries();
this.dialect = DatabaseDialectLoader.getDatabaseDialect(config);
log.info("Initializing writer using SQL dialect: {}", dialect.getClass().getSimpleName());
this.jdbcWriter = new JdbcWriter(config, dialect);
} | 3.68 |
hbase_AvlUtil_prepend | /**
* Prepend a node to the tree before a specific node
* @param head the head of the linked list
* @param base the node which we want to add the {@code node} before it
* @param node the node which we want to add it before the {@code base} node
*/
public static <TNode extends AvlLinkedNode> TNode prepend(TNode head, TNode base, TNode node) {
assert !isLinked(node) : node + " is already linked";
node.iterNext = base;
node.iterPrev = base.iterPrev;
base.iterPrev.iterNext = node;
base.iterPrev = node;
return head == base ? node : head;
} | 3.68 |
querydsl_NumberExpression_ceil | /**
* Create a {@code ceil(this)} expression
*
* <p>Returns the smallest (closest to negative infinity)
* {@code double} value that is greater than or equal to the
* argument and is equal to a mathematical integer</p>
*
* @return ceil(this)
* @see java.lang.Math#ceil(double)
*/
public NumberExpression<T> ceil() {
if (ceil == null) {
ceil = Expressions.numberOperation(getType(), MathOps.CEIL, mixin);
}
return ceil;
} | 3.68 |
flink_TieredStorageConfiguration_getMemoryTierNumBytesPerSegment | /**
* Get the segment size of memory tier.
*
* @return segment size.
*/
public int getMemoryTierNumBytesPerSegment() {
return memoryTierNumBytesPerSegment;
} | 3.68 |
hbase_LruBlockCache_containsBlock | /**
* Whether the cache contains block with specified cacheKey
* @return true if contains the block
*/
@Override
public boolean containsBlock(BlockCacheKey cacheKey) {
return map.containsKey(cacheKey);
} | 3.68 |
flink_PojoSerializerSnapshot_previousSerializerHasNonRegisteredSubclasses | /**
* Checks whether the previous serializer, represented by this snapshot, has non-registered
* subclasses.
*/
private static boolean previousSerializerHasNonRegisteredSubclasses(
LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>>
nonRegisteredSubclassSerializerSnapshots) {
return nonRegisteredSubclassSerializerSnapshots.size() > 0;
} | 3.68 |
hbase_AdaptiveLifoCoDelCallQueue_updateTunables | /**
* Update tunables.
* @param newCodelTargetDelay new CoDel target delay
* @param newCodelInterval new CoDel interval
* @param newLifoThreshold new Adaptive Lifo threshold
*/
public void updateTunables(int newCodelTargetDelay, int newCodelInterval,
double newLifoThreshold) {
this.codelTargetDelay = newCodelTargetDelay;
this.codelInterval = newCodelInterval;
this.lifoThreshold = newLifoThreshold;
} | 3.68 |
hbase_BucketAllocator_freeCount | /**
* How many more items can be allocated from the currently claimed blocks of this bucket size
*/
public long freeCount() {
return freeCount;
} | 3.68 |
hbase_MasterObserver_postEnableTable | /**
* Called after the enableTable operation has been requested. Called as part of enable table RPC
* call.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
*/
default void postEnableTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName) throws IOException {
} | 3.68 |
flink_Tuple11_toString | /**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10), where the individual fields are the value returned by calling {@link
* Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "("
+ StringUtils.arrayAwareToString(this.f0)
+ ","
+ StringUtils.arrayAwareToString(this.f1)
+ ","
+ StringUtils.arrayAwareToString(this.f2)
+ ","
+ StringUtils.arrayAwareToString(this.f3)
+ ","
+ StringUtils.arrayAwareToString(this.f4)
+ ","
+ StringUtils.arrayAwareToString(this.f5)
+ ","
+ StringUtils.arrayAwareToString(this.f6)
+ ","
+ StringUtils.arrayAwareToString(this.f7)
+ ","
+ StringUtils.arrayAwareToString(this.f8)
+ ","
+ StringUtils.arrayAwareToString(this.f9)
+ ","
+ StringUtils.arrayAwareToString(this.f10)
+ ")";
} | 3.68 |
framework_Calendar_isClientChangeAllowed | /**
* Is the user allowed to trigger events which alters the events.
*
* @return true if the client is allowed to send changes to server
* @see #isEventClickAllowed()
*/
protected boolean isClientChangeAllowed() {
return !isReadOnly();
} | 3.68 |
flink_TestcontainersSettings_network | /**
* Sets the {@code network} and returns a reference to this Builder enabling method
* chaining.
*
* @param network The {@code network} to set.
* @return A reference to this Builder.
*/
public Builder network(Network network) {
this.network = network;
return this;
} | 3.68 |
hudi_TableSizeStats_readConfigFromFileSystem | /**
* Reads config from the file system.
*
* @param jsc {@link JavaSparkContext} instance.
* @param cfg {@link Config} instance.
* @return the {@link TypedProperties} instance.
*/
private TypedProperties readConfigFromFileSystem(JavaSparkContext jsc, Config cfg) {
return UtilHelpers.readConfig(jsc.hadoopConfiguration(), new Path(cfg.propsFilePath), cfg.configs)
.getProps(true);
} | 3.68 |
flink_SessionWithGap_on | /**
* Specifies the time attribute on which rows are grouped.
*
* <p>For streaming tables you can specify grouping by a event-time or processing-time
* attribute.
*
* <p>For batch tables you can specify grouping on a timestamp or long attribute.
*
* @param timeField time attribute for streaming and batch tables
* @return a tumbling window on event-time
*/
public SessionWithGapOnTime on(Expression timeField) {
return new SessionWithGapOnTime(timeField, gap);
} | 3.68 |
morf_AbstractSelectStatementBuilder_getFromSelects | /**
* @return the fromSelects
*/
List<SelectStatement> getFromSelects() {
return fromSelects;
} | 3.68 |
hudi_SparkRDDReadClient_getPendingCompactions | /**
* Return all pending compactions with instant time for clients to decide what to compact next.
*
* @return
*/
public List<Pair<String, HoodieCompactionPlan>> getPendingCompactions() {
HoodieTableMetaClient metaClient =
HoodieTableMetaClient.builder().setConf(hadoopConf).setBasePath(hoodieTable.getMetaClient().getBasePath()).setLoadActiveTimelineOnLoad(true).build();
return CompactionUtils.getAllPendingCompactionPlans(metaClient).stream()
.map(
instantWorkloadPair -> Pair.of(instantWorkloadPair.getKey().getTimestamp(), instantWorkloadPair.getValue()))
.collect(Collectors.toList());
} | 3.68 |
flink_PropertiesUtil_flatten | /**
* Flatten a recursive {@link Properties} to a first level property map.
*
* <p>In some cases, {@code KafkaProducer#propsToMap} for example, Properties is used purely as
* a HashTable without considering its default properties.
*
* @param config Properties to be flattened
* @return Properties without defaults; all properties are put in the first-level
*/
public static Properties flatten(Properties config) {
final Properties flattenProperties = new Properties();
Collections.list(config.propertyNames()).stream()
.forEach(
name -> {
Preconditions.checkArgument(name instanceof String);
flattenProperties.setProperty(
(String) name, config.getProperty((String) name));
});
return flattenProperties;
} | 3.68 |
hadoop_ServiceRecord_set | /**
* Handle unknown attributes by storing them in the
* {@link #attributes} map
* @param key attribute name
* @param value attribute value.
*/
@JsonAnySetter
public void set(String key, Object value) {
attributes.put(key, value.toString());
} | 3.68 |
graphhopper_VectorTile_clearUintValue | /**
* <code>optional uint64 uint_value = 5;</code>
*/
public Builder clearUintValue() {
bitField0_ = (bitField0_ & ~0x00000010);
uintValue_ = 0L;
onChanged();
return this;
} | 3.68 |
flink_SlotSharingGroup_setTaskOffHeapMemory | /** Set the task off-heap memory for this SlotSharingGroup. */
public Builder setTaskOffHeapMemory(MemorySize taskOffHeapMemory) {
this.taskOffHeapMemory = taskOffHeapMemory;
return this;
} | 3.68 |
pulsar_ProducerConfiguration_getEncryptionKeys | /**
*
* @return encryptionKeys
*
*/
public Set<String> getEncryptionKeys() {
return conf.getEncryptionKeys();
} | 3.68 |
hudi_FormatUtils_getBooleanWithAltKeys | /**
* Gets the boolean value for a {@link ConfigProperty} config from Flink configuration. The key and
* alternative keys are used to fetch the config. The default value of {@link ConfigProperty}
* config, if exists, is returned if the config is not found in the configuration.
*
* @param conf Configs in Flink {@link Configuration}.
* @param configProperty {@link ConfigProperty} config to fetch.
* @return boolean value if the config exists; default boolean value if the config does not exist
* and there is default value defined in the {@link ConfigProperty} config; {@code false} otherwise.
*/
public static boolean getBooleanWithAltKeys(org.apache.flink.configuration.Configuration conf,
ConfigProperty<?> configProperty) {
Option<String> rawValue = getRawValueWithAltKeys(conf, configProperty);
boolean defaultValue = configProperty.hasDefaultValue()
? Boolean.parseBoolean(configProperty.defaultValue().toString()) : false;
return rawValue.map(Boolean::parseBoolean).orElse(defaultValue);
} | 3.68 |
druid_SQLColumnDefinition_setIdentity | // for sqlserver
public void setIdentity(Identity identity) {
if (identity != null) {
identity.setParent(this);
}
this.identity = identity;
} | 3.68 |
framework_FlyweightRow_setSkipNext | /**
* Sets the number of cells to skip when {@link #next()} is called the
* next time. Cell hiding is also handled eagerly in this method.
*
* @param colspan
* the number of cells to skip on next invocation of
* {@link #next()}
*/
public void setSkipNext(final int colspan) {
assert colspan > 0 : "Number of cells didn't make sense: "
+ colspan;
skipNext = colspan;
} | 3.68 |
dubbo_InternalThread_setThreadLocalMap | /**
* Sets the internal data structure that keeps the threadLocal variables bound to this thread.
* Note that this method is for internal use only, and thus is subject to change at any time.
*/
public final void setThreadLocalMap(InternalThreadLocalMap threadLocalMap) {
this.threadLocalMap = threadLocalMap;
} | 3.68 |
shardingsphere-elasticjob_JobFacade_getShardingContexts | /**
* Get sharding contexts.
*
* @return sharding contexts
*/
public ShardingContexts getShardingContexts() {
boolean isFailover = configService.load(true).isFailover();
if (isFailover) {
List<Integer> failoverShardingItems = failoverService.getLocalFailoverItems();
if (!failoverShardingItems.isEmpty()) {
return executionContextService.getJobShardingContext(failoverShardingItems);
}
}
shardingService.shardingIfNecessary();
List<Integer> shardingItems = shardingService.getLocalShardingItems();
if (isFailover) {
shardingItems.removeAll(failoverService.getLocalTakeOffItems());
}
shardingItems.removeAll(executionService.getDisabledItems(shardingItems));
return executionContextService.getJobShardingContext(shardingItems);
} | 3.68 |
hbase_RequestConverter_buildWarmupRegionRequest | /**
* Create a WarmupRegionRequest for a given region name
* @param regionInfo Region we are warming up
*/
public static WarmupRegionRequest buildWarmupRegionRequest(final RegionInfo regionInfo) {
WarmupRegionRequest.Builder builder = WarmupRegionRequest.newBuilder();
builder.setRegionInfo(ProtobufUtil.toRegionInfo(regionInfo));
return builder.build();
} | 3.68 |
framework_TouchScrollDelegate_addElement | /**
* Registers the given element as scrollable.
*/
public void addElement(Element scrollable) {
scrollable.addClassName(SCROLLABLE_CLASSNAME);
if (requiresDelegate()) {
delegate.scrollableElements.add(scrollable);
}
} | 3.68 |
hadoop_AbstractConfigurableFederationPolicy_getIsDirty | /**
* Returns true whether the last reinitialization requires actual changes, or
* was "free" as the weights have not changed. This is used by subclasses
* overriding reinitialize and calling super.reinitialize() to know whether to
* quit early.
*
* @return whether more work is needed to initialize.
*/
public boolean getIsDirty() {
return isDirty;
} | 3.68 |
morf_ValueConverters_booleanValue | /**
* Always returns true or false in line with the contract of {@link Boolean#valueOf(String)}.
*/
@Override
public Boolean booleanValue(T value) {
return Boolean.valueOf(value.toString());
} | 3.68 |
pulsar_ManagedLedgerConfig_getMinimumRolloverTimeMs | /**
* @return the minimum rollover time
*/
public int getMinimumRolloverTimeMs() {
return minimumRolloverTimeMs;
} | 3.68 |
hbase_StorageClusterVersionModel_getVersion | /** Returns the storage cluster version */
@XmlAttribute(name = "Version")
public String getVersion() {
return version;
} | 3.68 |
flink_UserDefinedFunctionHelper_prepareInstance | /** Prepares a {@link UserDefinedFunction} instance for usage in the API. */
public static void prepareInstance(ReadableConfig config, UserDefinedFunction function) {
validateClass(function.getClass(), false);
cleanFunction(config, function);
} | 3.68 |
hadoop_HsController_taskCounters | /*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#taskCounters()
*/
@Override
public void taskCounters() {
super.taskCounters();
} | 3.68 |
pulsar_TopicsBase_getSchemaData | // Build schemaData from passed in schema string.
private SchemaData getSchemaData(String keySchema, String valueSchema) {
try {
SchemaInfoImpl valueSchemaInfo = (valueSchema == null || valueSchema.isEmpty())
? (SchemaInfoImpl) StringSchema.utf8().getSchemaInfo() :
SCHEMA_INFO_READER.readValue(valueSchema);
if (null == valueSchemaInfo.getName()) {
valueSchemaInfo.setName(valueSchemaInfo.getType().toString());
}
// Value schema only
if (keySchema == null || keySchema.isEmpty()) {
return SchemaData.builder()
.data(valueSchemaInfo.getSchema())
.isDeleted(false)
.user("Rest Producer")
.timestamp(System.currentTimeMillis())
.type(valueSchemaInfo.getType())
.props(valueSchemaInfo.getProperties())
.build();
} else {
// Key_Value schema
SchemaInfoImpl keySchemaInfo = SCHEMA_INFO_READER.readValue(keySchema);
if (null == keySchemaInfo.getName()) {
keySchemaInfo.setName(keySchemaInfo.getType().toString());
}
SchemaInfo schemaInfo = KeyValueSchemaInfo.encodeKeyValueSchemaInfo("KVSchema-"
+ topicName.getPartitionedTopicName(),
keySchemaInfo, valueSchemaInfo,
KeyValueEncodingType.SEPARATED);
return SchemaData.builder()
.data(schemaInfo.getSchema())
.isDeleted(false)
.user("Rest Producer")
.timestamp(System.currentTimeMillis())
.type(schemaInfo.getType())
.props(schemaInfo.getProperties())
.build();
}
} catch (IOException e) {
if (log.isDebugEnabled()) {
log.debug("Fail to parse schema info for rest produce request with key schema {} and value schema {}"
, keySchema, valueSchema);
}
return null;
}
} | 3.68 |
zilla_ManyToOneRingBuffer_tryClaim | /**
* {@inheritDoc}
*/
public int tryClaim(final int msgTypeId, final int length)
{
checkTypeId(msgTypeId);
checkMsgLength(length);
final AtomicBuffer buffer = this.buffer;
final int recordLength = length + HEADER_LENGTH;
final int recordIndex = claimCapacity(buffer, recordLength);
if (INSUFFICIENT_CAPACITY == recordIndex)
{
return recordIndex;
}
buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength);
UnsafeAccess.UNSAFE.storeFence();
buffer.putInt(typeOffset(recordIndex), msgTypeId);
return encodedMsgOffset(recordIndex);
} | 3.68 |
hbase_CheckAndMutate_getQualifier | /** Returns the qualifier to check */
public byte[] getQualifier() {
return qualifier;
} | 3.68 |
hudi_HoodieAsyncService_shutdownCallback | /**
* Add shutdown callback for the completable future.
*
* @param callback The callback
*/
@SuppressWarnings("unchecked")
private void shutdownCallback(Function<Boolean, Boolean> callback) {
if (future == null) {
return;
}
future.whenComplete((resp, error) -> {
if (null != callback) {
callback.apply(null != error);
}
this.started = false;
});
} | 3.68 |
framework_PopupDateField_setAssistiveText | /**
* Set a description that explains the usage of the Widget for users of
* assistive devices.
*
* @param description
* String with the description
*/
public void setAssistiveText(String description) {
getState().descriptionForAssistiveDevices = description;
} | 3.68 |
hbase_WALPrettyPrinter_run | /**
* Pass one or more log file names and formatting options and it will dump out a text version of
* the contents on <code>stdout</code>. Command line arguments Thrown upon file system errors etc.
*/
public static void run(String[] args) throws IOException {
// create options
Options options = new Options();
options.addOption("h", "help", false, "Output help message");
options.addOption("j", "json", false, "Output JSON");
options.addOption("p", "printvals", false, "Print values");
options.addOption("t", "tables", true,
"Table names (comma separated) to filter by; eg: test1,test2,test3 ");
options.addOption("r", "region", true,
"Region to filter by. Pass encoded region name; e.g. '9192caead6a5a20acb4454ffbc79fa14'");
options.addOption("s", "sequence", true, "Sequence to filter by. Pass sequence number.");
options.addOption("k", "outputOnlyRowKey", false, "Print only row keys");
options.addOption("w", "row", true, "Row to filter by. Pass row name.");
options.addOption("f", "rowPrefix", true, "Row prefix to filter by.");
options.addOption("g", "goto", true, "Position to seek to in the file");
WALPrettyPrinter printer = new WALPrettyPrinter();
CommandLineParser parser = new PosixParser();
List<?> files = null;
try {
CommandLine cmd = parser.parse(options, args);
files = cmd.getArgList();
if (files.isEmpty() || cmd.hasOption("h")) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("WAL <filename...>", options, true);
System.exit(-1);
}
// configure the pretty printer using command line options
if (cmd.hasOption("p")) {
printer.enableValues();
}
if (cmd.hasOption("j")) {
printer.enableJSON();
}
if (cmd.hasOption("k")) {
printer.setOutputOnlyRowKey();
}
if (cmd.hasOption("t")) {
printer.setTableFilter(cmd.getOptionValue("t"));
}
if (cmd.hasOption("r")) {
printer.setRegionFilter(cmd.getOptionValue("r"));
}
if (cmd.hasOption("s")) {
printer.setSequenceFilter(Long.parseLong(cmd.getOptionValue("s")));
}
if (cmd.hasOption("w")) {
if (cmd.hasOption("f")) {
throw new ParseException("Row and Row-prefix cannot be supplied together");
}
printer.setRowFilter(cmd.getOptionValue("w"));
}
if (cmd.hasOption("f")) {
if (cmd.hasOption("w")) {
throw new ParseException("Row and Row-prefix cannot be supplied together");
}
printer.setRowPrefixFilter(cmd.getOptionValue("f"));
}
if (cmd.hasOption("g")) {
printer.setPosition(Long.parseLong(cmd.getOptionValue("g")));
}
} catch (ParseException e) {
LOG.error("Failed to parse commandLine arguments", e);
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("HFile filename(s) ", options, true);
System.exit(-1);
}
// get configuration, file system, and process the given files
Configuration conf = HBaseConfiguration.create();
CommonFSUtils.setFsDefault(conf, CommonFSUtils.getRootDir(conf));
// begin output
printer.beginPersistentOutput();
for (Object f : files) {
Path file = new Path((String) f);
FileSystem fs = file.getFileSystem(conf);
if (!fs.exists(file)) {
System.err.println("ERROR, file doesnt exist: " + file);
return;
}
printer.processFile(conf, file);
}
printer.endPersistentOutput();
} | 3.68 |
hmily_PropertyKeyParse_parse | /**
* Parse property key [ ].
*
* @param propertyName the property name
* @return the property key [ ]
*/
public PropertyKey[] parse(final String propertyName) {
// Use a local copy in case another thread changes things
LastKey<String> last = this.lastKeyStr;
if (last != null && last.isFrom(propertyName)) {
return last.getKeys();
}
PropertyKey[] mapping = tryMap(propertyName);
this.lastKeyStr = new LastKey<>(propertyName, mapping);
return mapping;
} | 3.68 |
framework_VTree_handleKeyNavigation | /**
* Handles the keyboard navigation.
*
* @param keycode
* The keycode of the pressed key
* @param ctrl
* Was ctrl pressed
* @param shift
* Was shift pressed
* @return Returns true if the key was handled, else false
*/
protected boolean handleKeyNavigation(int keycode, boolean ctrl,
boolean shift) {
// Navigate down
if (keycode == getNavigationDownKey()) {
TreeNode node = null;
// If node is open and has children then move in to the children
if (!focusedNode.isLeaf() && focusedNode.getState()
&& !focusedNode.getChildren().isEmpty()) {
node = focusedNode.getChildren().get(0);
} else {
// Move down to the next sibling
node = getNextSibling(focusedNode);
if (node == null) {
// Jump to the parent and try to select the next
// sibling there
TreeNode current = focusedNode;
while (node == null && current.getParentNode() != null) {
node = getNextSibling(current.getParentNode());
current = current.getParentNode();
}
}
}
if (node != null) {
setFocusedNode(node);
if (selectable) {
if (!ctrl && !shift) {
selectNode(node, true);
} else if (shift && isMultiselect) {
deselectAll();
selectNodeRange(lastSelection.key, node.key);
} else if (shift) {
selectNode(node, true);
}
}
showTooltipForKeyboardNavigation(node);
}
return true;
}
// Navigate up
if (keycode == getNavigationUpKey()) {
TreeNode prev = getPreviousSibling(focusedNode);
TreeNode node = null;
if (prev != null) {
node = getLastVisibleChildInTree(prev);
} else if (focusedNode.getParentNode() != null) {
node = focusedNode.getParentNode();
}
if (node != null) {
setFocusedNode(node);
if (selectable) {
if (!ctrl && !shift) {
selectNode(node, true);
} else if (shift && isMultiselect) {
deselectAll();
selectNodeRange(lastSelection.key, node.key);
} else if (shift) {
selectNode(node, true);
}
}
showTooltipForKeyboardNavigation(node);
}
return true;
}
// Navigate left (close branch)
if (keycode == getNavigationLeftKey()) {
if (!focusedNode.isLeaf() && focusedNode.getState()) {
focusedNode.setState(false, true);
} else if (focusedNode.getParentNode() != null
&& (focusedNode.isLeaf() || !focusedNode.getState())) {
if (ctrl || !selectable) {
setFocusedNode(focusedNode.getParentNode());
} else if (shift) {
doRelationSelection(focusedNode.getParentNode(),
focusedNode);
setFocusedNode(focusedNode.getParentNode());
} else {
focusAndSelectNode(focusedNode.getParentNode());
}
}
showTooltipForKeyboardNavigation(focusedNode);
return true;
}
// Navigate right (open branch)
if (keycode == getNavigationRightKey()) {
if (!focusedNode.isLeaf() && !focusedNode.getState()) {
focusedNode.setState(true, true);
} else if (!focusedNode.isLeaf()) {
if (ctrl || !selectable) {
setFocusedNode(focusedNode.getChildren().get(0));
} else if (shift) {
setSelected(focusedNode, true);
setFocusedNode(focusedNode.getChildren().get(0));
setSelected(focusedNode, true);
} else {
focusAndSelectNode(focusedNode.getChildren().get(0));
}
}
showTooltipForKeyboardNavigation(focusedNode);
return true;
}
// Selection
if (keycode == getNavigationSelectKey()) {
if (!focusedNode.isSelected()) {
selectNode(focusedNode,
(!isMultiselect
|| multiSelectMode == MULTISELECT_MODE_SIMPLE)
&& selectable);
} else {
deselectNode(focusedNode);
}
return true;
}
// Home selection
if (keycode == getNavigationStartKey()) {
TreeNode node = getFirstRootNode();
if (ctrl || !selectable) {
setFocusedNode(node);
} else if (shift) {
deselectAll();
selectNodeRange(focusedNode.key, node.key);
} else {
selectNode(node, true);
}
sendSelectionToServer();
showTooltipForKeyboardNavigation(node);
return true;
}
// End selection
if (keycode == getNavigationEndKey()) {
TreeNode lastNode = getLastRootNode();
TreeNode node = getLastVisibleChildInTree(lastNode);
if (ctrl || !selectable) {
setFocusedNode(node);
} else if (shift) {
deselectAll();
selectNodeRange(focusedNode.key, node.key);
} else {
selectNode(node, true);
}
sendSelectionToServer();
showTooltipForKeyboardNavigation(node);
return true;
}
return false;
} | 3.68 |
flink_ServerConnection_createEstablishedConnection | /**
* Creates an established connection from the given channel.
*
* @param channel Channel to create an established connection from
*/
private InternalConnection<REQ, RESP> createEstablishedConnection(Channel channel) {
if (failureCause != null || !running) {
// Close the channel and we are done. Any queued requests
// are removed on the close/failure call and after that no
// new ones can be enqueued.
channel.close();
return this;
} else {
final EstablishedConnection<REQ, RESP> establishedConnection =
connectionFactory.apply(channel);
while (!queuedRequests.isEmpty()) {
final PendingConnection.PendingRequest<REQ, RESP> pending =
queuedRequests.poll();
FutureUtils.forward(
establishedConnection.sendRequest(pending.getRequest()), pending);
}
return establishedConnection;
}
} | 3.68 |
AreaShop_GeneralRegion_getIntegerSetting | /**
* Get a boolean setting for this region, defined as follows
* - If the region has the setting in its own file (/regions/regionName.yml), use that
* - If the region has groups, use the setting defined by the most important group, if any
* - Otherwise fallback to the default.yml file setting
* @param path The path to get the setting of
* @return The value of the setting (strings are handled as booleans)
*/
public int getIntegerSetting(String path) {
if(config.isSet(path)) {
return config.getInt(path);
}
int result = 0;
int priority = Integer.MIN_VALUE;
boolean found = false;
for(RegionGroup group : plugin.getFileManager().getGroups()) {
if(group.isMember(this) && group.getSettings().isSet(path) && group.getPriority() > priority) {
result = group.getSettings().getInt(path);
priority = group.getPriority();
found = true;
}
}
if(found) {
return result;
}
if(this.getFileManager().getRegionSettings().isSet(path)) {
return this.getFileManager().getRegionSettings().getInt(path);
} else {
return this.getFileManager().getFallbackRegionSettings().getInt(path);
}
} | 3.68 |
flink_WindowedOperatorTransformation_apply | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Note that this function requires that all data in the windows is buffered until the window
* is evaluated, as the function provides no means of incremental aggregation.
*
* @param function The window function.
* @param resultType Type information for the result type of the window function
* @return The data stream that is the result of applying the window function to the window.
*/
public <R> BootstrapTransformation<T> apply(
WindowFunction<T, R, K, W> function, TypeInformation<R> resultType) {
function = input.clean(function);
WindowOperator<K, T, ?, R, W> operator = builder.apply(function);
SavepointWriterOperatorFactory factory =
(timestamp, path) -> new StateBootstrapWrapperOperator<>(timestamp, path, operator);
return new BootstrapTransformation<>(
input, operatorMaxParallelism, timestamper, factory, keySelector, keyType);
} | 3.68 |
framework_Form_isModified | /*
* Is the object modified but not committed? Don't add a JavaDoc comment
* here, we use the default one from the interface.
*/
@Override
public boolean isModified() {
for (final Object id : propertyIds) {
final Field<?> f = fields.get(id);
if (f != null && f.isModified()) {
return true;
}
}
return false;
} | 3.68 |
hadoop_XAttrCodec_decodeValue | /**
* Decode string representation of a value and check whether it's
* encoded. If the given string begins with 0x or 0X, it expresses
* a hexadecimal number. If the given string begins with 0s or 0S,
* base64 encoding is expected. If the given string is enclosed in
* double quotes, the inner string is treated as text. Otherwise
* the given string is treated as text.
* @param value string representation of the value.
* @return byte[] the value
* @throws IOException raised on errors performing I/O.
*/
public static byte[] decodeValue(String value) throws IOException {
byte[] result = null;
if (value != null) {
if (value.length() >= 2) {
String en = value.substring(0, 2);
if (value.startsWith("\"") && value.endsWith("\"")) {
value = value.substring(1, value.length()-1);
result = value.getBytes("utf-8");
} else if (en.equalsIgnoreCase(HEX_PREFIX)) {
value = value.substring(2, value.length());
try {
result = Hex.decodeHex(value.toCharArray());
} catch (DecoderException e) {
throw new IOException(e);
}
} else if (en.equalsIgnoreCase(BASE64_PREFIX)) {
value = value.substring(2, value.length());
result = base64.decode(value);
}
}
if (result == null) {
result = value.getBytes("utf-8");
}
}
return result;
} | 3.68 |
flink_TimeWindow_getStart | /**
* Gets the starting timestamp of the window. This is the first timestamp that belongs to this
* window.
*
* @return The starting timestamp of this window.
*/
public long getStart() {
return start;
} | 3.68 |
flink_CreditBasedSequenceNumberingViewReader_getAvailabilityAndBacklog | /**
* Returns true only if the next buffer is an event or the reader has both available credits and
* buffers.
*
* @implSpec BEWARE: this must be in sync with {@link #getNextDataType(BufferAndBacklog)}, such
* that {@code getNextDataType(bufferAndBacklog) != NONE <=>
* AvailabilityWithBacklog#isAvailable()}!
*/
@Override
public ResultSubpartitionView.AvailabilityWithBacklog getAvailabilityAndBacklog() {
return subpartitionView.getAvailabilityAndBacklog(numCreditsAvailable);
} | 3.68 |
flink_ExecutionConfig_disableGenericTypes | /**
* Disables the use of generic types (types that would be serialized via Kryo). If this option
* is used, Flink will throw an {@code UnsupportedOperationException} whenever it encounters a
* data type that would go through Kryo for serialization.
*
* <p>Disabling generic types can be helpful to eagerly find and eliminate the use of types that
* would go through Kryo serialization during runtime. Rather than checking types individually,
* using this option will throw exceptions eagerly in the places where generic types are used.
*
* <p><b>Important:</b> We recommend to use this option only during development and
* pre-production phases, not during actual production use. The application program and/or the
* input data may be such that new, previously unseen, types occur at some point. In that case,
* setting this option would cause the program to fail.
*
* @see #enableGenericTypes()
*/
public void disableGenericTypes() {
setGenericTypes(false);
} | 3.68 |
hibernate-validator_MethodInheritanceTree_hasParallelDefinitions | /**
* Checks if there are any parallel definitions of the method in the hierarchy.
*
* @return {@code true} if there are any parallel definitions of the method in the hierarchy, {@code false} otherwise
*/
public boolean hasParallelDefinitions() {
return topLevelMethods.size() > 1;
} | 3.68 |
open-banking-gateway_AccountInformationRequestCommon_fintech_calls_list_accounts_for_anton_brueckner | // Note that anton.brueckner is typically used for REDIRECT (real REDIRECT that is returned by bank, and not REDIRECT approach in table)
public SELF fintech_calls_list_accounts_for_anton_brueckner(String bankProfileId) {
return fintech_calls_list_accounts_for_anton_brueckner(bankProfileId, false);
} | 3.68 |
hbase_BalancerClusterState_getOrComputeRackLocalities | /**
* Retrieves and lazily initializes a field storing the locality of every region/server
* combination
*/
public float[][] getOrComputeRackLocalities() {
if (rackLocalities == null || regionsToMostLocalEntities == null) {
computeCachedLocalities();
}
return rackLocalities;
} | 3.68 |
flink_RocksDBOperationUtils_sanityCheckArenaBlockSize | /**
* Logs a warning if the arena block size is too high causing RocksDB to flush constantly.
* Essentially, the condition <a
* href="https://github.com/dataArtisans/frocksdb/blob/49bc897d5d768026f1eb816d960c1f2383396ef4/include/rocksdb/write_buffer_manager.h#L47">
* here</a> will always be true.
*
* @param writeBufferSize the size of write buffer (bytes)
* @param arenaBlockSizeConfigured the manually configured arena block size, zero or less means
* not configured
* @param writeBufferManagerCapacity the size of the write buffer manager (bytes)
* @return true if sanity check passes, false otherwise
*/
static boolean sanityCheckArenaBlockSize(
long writeBufferSize, long arenaBlockSizeConfigured, long writeBufferManagerCapacity)
throws IllegalStateException {
long defaultArenaBlockSize =
RocksDBMemoryControllerUtils.calculateRocksDBDefaultArenaBlockSize(writeBufferSize);
long arenaBlockSize =
arenaBlockSizeConfigured <= 0 ? defaultArenaBlockSize : arenaBlockSizeConfigured;
long mutableLimit =
RocksDBMemoryControllerUtils.calculateRocksDBMutableLimit(
writeBufferManagerCapacity);
if (RocksDBMemoryControllerUtils.validateArenaBlockSize(arenaBlockSize, mutableLimit)) {
return true;
} else {
LOG.warn(
"RocksDBStateBackend performance will be poor because of the current Flink memory configuration! "
+ "RocksDB will flush memtable constantly, causing high IO and CPU. "
+ "Typically the easiest fix is to increase task manager managed memory size. "
+ "If running locally, see the parameter taskmanager.memory.managed.size. "
+ "Details: arenaBlockSize {} > mutableLimit {} (writeBufferSize = {}, arenaBlockSizeConfigured = {},"
+ " defaultArenaBlockSize = {}, writeBufferManagerCapacity = {})",
arenaBlockSize,
mutableLimit,
writeBufferSize,
arenaBlockSizeConfigured,
defaultArenaBlockSize,
writeBufferManagerCapacity);
return false;
}
} | 3.68 |
morf_SchemaUtils_namesOfColumns | /**
* Turn a list of columns into a list of the columns' names.
*
* @param columns The columns
* @return The columns' names.
*/
public static List<String> namesOfColumns(List<Column> columns) {
return Lists.transform(columns, new Function<Column, String>() {
@Override
public String apply(Column column) {
return column.getName();
}
});
} | 3.68 |
flink_CheckpointProperties_discardOnJobFinished | /**
* Returns whether the checkpoint should be discarded when the owning job reaches the {@link
* JobStatus#FINISHED} state.
*
* @return <code>true</code> if the checkpoint should be discarded when the owning job reaches
* the {@link JobStatus#FINISHED} state; <code>false</code> otherwise.
* @see CompletedCheckpointStore
*/
boolean discardOnJobFinished() {
return discardFinished;
} | 3.68 |
rocketmq-connect_Worker_checkStoppedTasks | /**
* check stopped tasks
*/
private void checkStoppedTasks() {
for (Runnable runnable : stoppedTasks) {
WorkerTask workerTask = (WorkerTask) runnable;
Future future = taskToFutureMap.get(runnable);
try {
if (null != future) {
future.get(workerConfig.getMaxStartTimeoutMills(), TimeUnit.MILLISECONDS);
} else {
log.error("[BUG] stopped Tasks reference not found in taskFutureMap");
}
} catch (ExecutionException e) {
Throwable t = e.getCause();
log.info("[BUG] Stopped Tasks should not throw any exception");
t.printStackTrace();
} catch (CancellationException e) {
log.info("[BUG] Stopped Tasks throws PrintStackTrace");
e.printStackTrace();
} catch (TimeoutException e) {
log.info("[BUG] Stopped Tasks should not throw any exception");
e.printStackTrace();
} catch (InterruptedException e) {
log.info("[BUG] Stopped Tasks should not throw any exception");
e.printStackTrace();
} finally {
// remove committer offset
sourceTaskOffsetCommitter.ifPresent(commiter -> commiter.remove(workerTask.id()));
workerTask.cleanup();
future.cancel(true);
taskToFutureMap.remove(runnable);
stoppedTasks.remove(runnable);
cleanedStoppedTasks.add(runnable);
}
}
} | 3.68 |
hudi_AvroSchemaUtils_isAtomicSchemasCompatibleEvolution | /**
* Establishes whether {@code newReaderSchema} is compatible w/ {@code prevWriterSchema}, as
* defined by Avro's {@link AvroSchemaCompatibility}.
* {@code newReaderSchema} is considered compatible to {@code prevWriterSchema}, iff data written using {@code prevWriterSchema}
* could be read by {@code newReaderSchema}
* @param newReaderSchema new reader schema instance.
* @param prevWriterSchema prev writer schema instance.
* @return true if its compatible. else false.
*/
private static boolean isAtomicSchemasCompatibleEvolution(Schema newReaderSchema, Schema prevWriterSchema) {
// NOTE: Checking for compatibility of atomic types, we should ignore their
// corresponding fully-qualified names (as irrelevant)
return isSchemaCompatible(prevWriterSchema, newReaderSchema, false, true);
} | 3.68 |
dubbo_AbstractDynamicConfiguration_getTimeout | /**
* Get the timeout from {@link URL the specified connection URL}
*
* @param url {@link URL the specified connection URL}
* @return non-null
* @since 2.7.8
*/
protected static long getTimeout(URL url) {
return getParameter(url, TIMEOUT_PARAM_NAME, -1L);
} | 3.68 |
flink_ProgramOptionsUtils_isPythonEntryPoint | /**
* @return True if the commandline contains "-py" or "-pym" options or comes from PyFlink shell,
* false otherwise.
*/
public static boolean isPythonEntryPoint(CommandLine line) {
return line.hasOption(PY_OPTION.getOpt())
|| line.hasOption(PYMODULE_OPTION.getOpt())
|| "org.apache.flink.client.python.PythonGatewayServer"
.equals(line.getOptionValue(CLASS_OPTION.getOpt()));
} | 3.68 |
framework_VScrollTable_setAlign | /**
* Set alignment of the text in the cell.
*
* @param c
* The alignment which can be ALIGN_CENTER, ALIGN_LEFT,
* ALIGN_RIGHT
*/
public void setAlign(char c) {
if (align != c) {
switch (c) {
case ALIGN_CENTER:
captionContainer.getStyle().setTextAlign(TextAlign.CENTER);
break;
case ALIGN_RIGHT:
captionContainer.getStyle().setTextAlign(TextAlign.RIGHT);
break;
default:
captionContainer.getStyle().setTextAlign(TextAlign.LEFT);
break;
}
}
align = c;
} | 3.68 |
framework_TreeConnector_updateActionMap | /**
* Registers action for the root and also for individual nodes
*
* @param uidl
*/
private void updateActionMap(UIDL uidl) {
for (final Object child : uidl) {
final UIDL action = (UIDL) child;
final String key = action.getStringAttribute("key");
final String caption = action
.getStringAttribute(TreeConstants.ATTRIBUTE_ACTION_CAPTION);
String iconUrl = null;
if (action.hasAttribute(TreeConstants.ATTRIBUTE_ACTION_ICON)) {
iconUrl = getConnection()
.translateVaadinUri(action.getStringAttribute(
TreeConstants.ATTRIBUTE_ACTION_ICON));
}
getWidget().registerAction(key, caption, iconUrl);
}
} | 3.68 |
hudi_HoodieSparkQuickstart_pointInTimeQuery | /**
* Lets look at how to query data as of a specific time.
* The specific time can be represented by pointing endTime to a specific commit time
* and beginTime to “000” (denoting earliest possible commit time).
*/
public static void pointInTimeQuery(SparkSession spark, String tablePath, String tableName) {
List<String> commits =
spark.sql("select distinct(_hoodie_commit_time) as commitTime from hudi_ro_table order by commitTime")
.toJavaRDD()
.map((Function<Row, String>) row -> row.getString(0))
.take(50);
String beginTime = "000"; // Represents all commits > this time.
String endTime = commits.get(commits.size() - 1); // commit time we are interested in
//incrementally query data
Dataset<Row> incViewDF = spark.read().format("hudi")
.option("hoodie.datasource.query.type", "incremental")
.option("hoodie.datasource.read.begin.instanttime", beginTime)
.option("hoodie.datasource.read.end.instanttime", endTime)
.load(tablePath);
incViewDF.createOrReplaceTempView("hudi_incr_table");
spark.sql("select `_hoodie_commit_time`, fare, begin_lon, begin_lat, ts from hudi_incr_table where fare > 20.0")
.show();
} | 3.68 |
hbase_BucketCache_disableCache | /**
* Used to shut down the cache -or- turn it off in the case of something broken.
*/
private void disableCache() {
if (!cacheEnabled) return;
LOG.info("Disabling cache");
cacheEnabled = false;
ioEngine.shutdown();
this.scheduleThreadPool.shutdown();
for (int i = 0; i < writerThreads.length; ++i)
writerThreads[i].interrupt();
this.ramCache.clear();
if (!ioEngine.isPersistent() || persistencePath == null) {
// If persistent ioengine and a path, we will serialize out the backingMap.
this.backingMap.clear();
this.fullyCachedFiles.clear();
this.regionCachedSizeMap.clear();
}
} | 3.68 |
hbase_RpcExecutor_getQueues | /** Returns the list of request queues */
protected List<BlockingQueue<CallRunner>> getQueues() {
return queues;
} | 3.68 |
morf_SchemaValidator_isSQLReservedWord | /**
* Method to establish if a given string is an SQL Reserved Word
*
* @param word the string to establish if its a SQL Reserved Word
* @return true if its a SQL Reserved Word otherwise false.
*/
boolean isSQLReservedWord(String word){
return sqlReservedWords.get().contains(word.toUpperCase());
} | 3.68 |
flink_SkipListUtils_getKeyDataOffset | /**
* Returns the offset of key data in the key space.
*
* @param level level of the key.
*/
public static int getKeyDataOffset(int level) {
return SkipListUtils.getKeyMetaLen(level);
} | 3.68 |
querydsl_MathExpressions_tan | /**
* Create a {@code tan(num)} expression
*
* <p>Returns the tangent of an angle of num radians.</p>
*
* @param num numeric expression
* @return tan(num)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Double> tan(Expression<A> num) {
return Expressions.numberOperation(Double.class, Ops.MathOps.TAN, num);
} | 3.68 |
framework_GridKeyPressEvent_getCharCode | /**
* Gets the char code for this event.
*
* @return the char code
*/
public char getCharCode() {
return (char) getUnicodeCharCode();
} | 3.68 |
hbase_BlockCache_getFullyCachedFiles | /**
* Returns an Optional containing the map of files that have been fully cached (all its blocks are
* present in the cache. This method may not be overridden by all implementing classes. In such
* cases, the returned Optional will be empty.
* @return empty optional if this method is not supported, otherwise the returned optional
* contains a map of all files that have been fully cached.
*/
default Optional<Map<String, Pair<String, Long>>> getFullyCachedFiles() {
return Optional.empty();
} | 3.68 |
hbase_ClientMetaTableAccessor_getRegionLocation | /** Returns the HRegionLocation from meta for the given region */
public static CompletableFuture<Optional<HRegionLocation>>
getRegionLocation(AsyncTable<?> metaTable, byte[] regionName) {
CompletableFuture<Optional<HRegionLocation>> future = new CompletableFuture<>();
try {
RegionInfo parsedRegionInfo = CatalogFamilyFormat.parseRegionInfoFromRegionName(regionName);
addListener(metaTable.get(new Get(CatalogFamilyFormat.getMetaKeyForRegion(parsedRegionInfo))
.addFamily(HConstants.CATALOG_FAMILY)), (r, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
future.complete(getRegionLocations(r)
.map(locations -> locations.getRegionLocation(parsedRegionInfo.getReplicaId())));
});
} catch (IOException parseEx) {
LOG.warn("Failed to parse the passed region name: " + Bytes.toStringBinary(regionName));
future.completeExceptionally(parseEx);
}
return future;
} | 3.68 |
AreaShop_AreaShop_getFileManager | /**
* Method to get the FileManager (loads/save regions and can be used to get regions).
* @return The fileManager
*/
public FileManager getFileManager() {
return fileManager;
} | 3.68 |
morf_SqlServerDialect_alterTableChangeColumnStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#alterTableChangeColumnStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Column, org.alfasoftware.morf.metadata.Column)
*/
@Override
public Collection<String> alterTableChangeColumnStatements(Table table, final Column oldColumn, Column newColumn) {
List<String> statements = new ArrayList<>();
// If we are removing the autonumber then we must completely rebuild the table
// without the autonumber (identity) property before we do anything else
// PLEASE NOTE - THIS DOES NOT COPY VIEWS OR INDEXES -- See WEB-23759
if (oldColumn.isAutoNumbered() && !newColumn.isAutoNumbered()) {
// Create clone of table
Table clone = table(table.getName() + "Clone")
.columns(table.columns().toArray(new Column[table.columns().size()]));
Collection<String> cloneTableStatements = tableDeploymentStatements(clone);
statements.addAll(tableDeploymentStatements(clone));
// Meta data switch of the data from the original table to the cloned table
statements.add("ALTER TABLE " + schemaNamePrefix() + table.getName() + " SWITCH TO " + schemaNamePrefix() + clone.getName());
// Drop original table
statements.add("DROP TABLE " + schemaNamePrefix() + table.getName());
// Rename clone to make it look like the original table
statements.add(String.format("EXECUTE sp_rename '%s%s', '%s%s'",
schemaNamePrefix(),
clone.getName(),
schemaNamePrefix(),
table.getName()
));
if (containsPrimaryKeyConstraint(cloneTableStatements, clone.getName())) {
statements.add(String.format("EXECUTE sp_rename '%s%s_PK', '%s%s_PK', 'OBJECT'",
schemaNamePrefix(),
clone.getName(),
schemaNamePrefix(),
table.getName()
));
}
}
// build the old version of the table
Table oldTable = oldTableForChangeColumn(table, oldColumn, newColumn);
// If we are dropping or changing a column, drop indexes containing that column
for (Index index : oldTable.indexes()) {
for (String column : index.columnNames()) {
if (column.equalsIgnoreCase(oldColumn.getName())) {
statements.addAll(indexDropStatements(oldTable, index));
}
}
}
// Drop any defaults for the old column
if (StringUtils.isNotBlank(oldColumn.getDefaultValue()))
statements.add(dropDefaultForColumn(table, oldColumn));
// -- Rename the column if we need to
//
if (!oldColumn.getName().equals(newColumn.getName())) {
statements.add(String.format("EXEC sp_rename '%s%s.%s', '%s', 'COLUMN'",
schemaNamePrefix(),
table.getName(),
oldColumn.getName(),
newColumn.getName()
));
}
// Drop and re-create the primary key if either new or old columns are part of the PK
boolean recreatePrimaryKey = oldColumn.isPrimaryKey() || newColumn.isPrimaryKey();
// only drop if there actually was a PK though...
if (recreatePrimaryKey && !primaryKeysForTable(oldTable).isEmpty()) {
statements.add(dropPrimaryKey(table));
}
statements.add(new StringBuilder()
.append("ALTER TABLE ")
.append(schemaNamePrefix())
.append(table.getName())
.append(" ALTER COLUMN ")
.append(newColumn.getName())
.append(' ')
.append(sqlRepresentationOfColumnType(table, newColumn, true))
.toString());
// Create the indexes we dropped previously
for (Index index : table.indexes()) {
for (String column : index.columnNames()) {
if (column.equalsIgnoreCase(newColumn.getName())) {
statements.addAll(addIndexStatements(table, index));
}
}
}
List<Column> primaryKeyColumns = primaryKeysForTable(table);
// Recreate the primary key if necessary
if (recreatePrimaryKey && !primaryKeyColumns.isEmpty()) {
statements.add(new StringBuilder()
.append("ALTER TABLE ").append(schemaNamePrefix()).append(table.getName()).append(" ADD ")
.append(buildPrimaryKeyConstraint(table.getName(), namesOfColumns(primaryKeyColumns)))
.toString()
);
}
return statements;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.