name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ConfigOptions_booleanType | /** Defines that the value of the option should be of {@link Boolean} type. */
public TypedConfigOptionBuilder<Boolean> booleanType() {
return new TypedConfigOptionBuilder<>(key, Boolean.class);
} | 3.68 |
hudi_HoodieMetaSyncOperations_tableExists | /**
* Check if table exists in metastore.
*/
default boolean tableExists(String tableName) {
return false;
} | 3.68 |
morf_SqlScriptExecutor_execute | /**
* @param sqlStatement The SQL statement to execute
* @param parameterMetadata The metadata of the parameters being supplied.
* @param parameterData The values of the parameters.
* @return The number of rows updated/affected by this statement
* @see #execute(String, Connection, Iterable, DataValueLookup)
*/
public int execute(final String sqlStatement, final Iterable<SqlParameter> parameterMetadata, final DataValueLookup parameterData) {
final Holder<Integer> holder = new Holder<>();
doWork(new Work() {
@Override
public void execute(Connection connection) throws SQLException {
holder.set(SqlScriptExecutor.this.execute(sqlStatement, connection, parameterMetadata, parameterData));
}
});
return holder.get();
} | 3.68 |
flink_ConfigurationUtils_createConfiguration | /**
* Creates a new {@link Configuration} from the given {@link Properties}.
*
* @param properties to convert into a {@link Configuration}
* @return {@link Configuration} which has been populated by the values of the given {@link
* Properties}
*/
@Nonnull
public static Configuration createConfiguration(Properties properties) {
final Configuration configuration = new Configuration();
final Set<String> propertyNames = properties.stringPropertyNames();
for (String propertyName : propertyNames) {
configuration.setString(propertyName, properties.getProperty(propertyName));
}
return configuration;
} | 3.68 |
zxing_MatrixUtil_maybeEmbedPositionAdjustmentPatterns | // Embed position adjustment patterns if need be.
private static void maybeEmbedPositionAdjustmentPatterns(Version version, ByteMatrix matrix) {
if (version.getVersionNumber() < 2) { // The patterns appear if version >= 2
return;
}
int index = version.getVersionNumber() - 1;
int[] coordinates = POSITION_ADJUSTMENT_PATTERN_COORDINATE_TABLE[index];
for (int y : coordinates) {
if (y >= 0) {
for (int x : coordinates) {
if (x >= 0 && isEmpty(matrix.get(x, y))) {
// If the cell is unset, we embed the position adjustment pattern here.
// -2 is necessary since the x/y coordinates point to the center of the pattern, not the
// left top corner.
embedPositionAdjustmentPattern(x - 2, y - 2, matrix);
}
}
}
}
} | 3.68 |
flink_SqlCreateTable_getColumnSqlString | /**
* Returns the projection format of the DDL columns(including computed columns). i.e. the
* following DDL:
*
* <pre>
* create table tbl1(
* col1 int,
* col2 varchar,
* col3 as to_timestamp(col2)
* ) with (
* 'connector' = 'csv'
* )
* </pre>
*
* <p>is equivalent with query "col1, col2, to_timestamp(col2) as col3", caution that the
* "computed column" operands have been reversed.
*/
public String getColumnSqlString() {
SqlPrettyWriter writer =
new SqlPrettyWriter(
SqlPrettyWriter.config()
.withDialect(AnsiSqlDialect.DEFAULT)
.withAlwaysUseParentheses(true)
.withSelectListItemsOnSeparateLines(false)
.withIndentation(0));
writer.startList("", "");
for (SqlNode column : columnList) {
writer.sep(",");
SqlTableColumn tableColumn = (SqlTableColumn) column;
if (tableColumn instanceof SqlComputedColumn) {
SqlComputedColumn computedColumn = (SqlComputedColumn) tableColumn;
computedColumn.getExpr().unparse(writer, 0, 0);
writer.keyword("AS");
}
tableColumn.getName().unparse(writer, 0, 0);
}
return writer.toString();
} | 3.68 |
hadoop_CyclicIteration_remove | /** Not supported */
@Override
public void remove() {
throw new UnsupportedOperationException("Not supported");
} | 3.68 |
druid_CalciteMySqlNodeVisitor_convertToSingleValuesIfNeed | /**
* If there are multiple VALUES, and all values in VALUES CLAUSE are literal,
* convert the value clauses to a single value clause.
*
* @param valuesClauseList
* @return
*/
public static List<SQLInsertStatement.ValuesClause> convertToSingleValuesIfNeed(List<SQLInsertStatement.ValuesClause> valuesClauseList) {
if (valuesClauseList.size() <= 1) {
return valuesClauseList;
}
// If they are all literals
for (SQLInsertStatement.ValuesClause clause : valuesClauseList) {
for (SQLExpr expr : clause.getValues()) {
if (expr instanceof SQLVariantRefExpr) {
if (((SQLVariantRefExpr) expr).getName().equals("?")) {
continue;
}
}
return valuesClauseList;
}
}
// Return only the first values clause.
return Arrays.asList(valuesClauseList.get(0));
} | 3.68 |
hbase_Result_copyFrom | /**
* Copy another Result into this one. Needed for the old Mapred framework
* @throws UnsupportedOperationException if invoked on instance of EMPTY_RESULT (which is supposed
* to be immutable).
*/
public void copyFrom(Result other) {
checkReadonly();
this.row = null;
this.familyMap = null;
this.cells = other.cells;
} | 3.68 |
hadoop_HdfsFileStatus_ecPolicy | /**
* Set the erasure coding policy for this entity (default = null).
* @param ecPolicy Erasure coding policy
* @return This Builder instance
*/
public Builder ecPolicy(ErasureCodingPolicy ecPolicy) {
this.ecPolicy = ecPolicy;
return this;
} | 3.68 |
hudi_QuickstartUtils_generateUpdates | /**
* Generates new updates, randomly distributed across the keys above. There can be duplicates within the returned
* list
*
* @param n Number of updates (including dups)
* @return list of hoodie record updates
*/
public List<HoodieRecord> generateUpdates(Integer n) {
if (numExistingKeys == 0) {
throw new HoodieException("Data must have been written before performing the update operation");
}
String randomString = generateRandomString();
return IntStream.range(0, n).boxed().map(x -> {
try {
return generateUpdateRecord(existingKeys.get(rand.nextInt(numExistingKeys)), randomString);
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
}).collect(Collectors.toList());
} | 3.68 |
hbase_TagUtil_asList | /**
* Creates list of tags from given byte array, expected that it is in the expected tag format.
* @param b The byte array
* @param offset The offset in array where tag bytes begin
* @param length Total length of all tags bytes
* @return List of tags
*/
public static List<Tag> asList(byte[] b, int offset, int length) {
List<Tag> tags = new ArrayList<>();
int pos = offset;
while (pos < offset + length) {
int tagLen = Bytes.readAsInt(b, pos, Tag.TAG_LENGTH_SIZE);
tags.add(new ArrayBackedTag(b, pos, tagLen + Tag.TAG_LENGTH_SIZE));
pos += Tag.TAG_LENGTH_SIZE + tagLen;
}
return tags;
} | 3.68 |
hadoop_TypedBytesOutput_writeMap | /**
* Writes a map as a typed bytes sequence.
*
* @param map the map to be written
* @throws IOException
*/
@SuppressWarnings("unchecked")
public void writeMap(Map map) throws IOException {
writeMapHeader(map.size());
Set<Entry> entries = map.entrySet();
for (Entry entry : entries) {
write(entry.getKey());
write(entry.getValue());
}
} | 3.68 |
rocketmq-connect_HudiSinkTask_start | /**
* Remember always close the CqlSession according to
* https://docs.datastax.com/en/developer/java-driver/4.5/manual/core/
* @param props
*/
@Override
public void start(KeyValue props) {
try {
ConfigUtil.load(props, this.hudiConnectConfig);
log.info("init data source success");
} catch (Exception e) {
log.error("Cannot start Hudi Sink Task because of configuration error{}", e);
}
try {
updater = new Updater(hudiConnectConfig);
updater.start();
} catch (Throwable e) {
log.error("fail to start updater{}", e);
}
} | 3.68 |
flink_HybridShuffleConfiguration_getRegionGroupSizeInBytes | /** Segment size of hybrid spilled file data index. */
public int getRegionGroupSizeInBytes() {
return regionGroupSizeInBytes;
} | 3.68 |
hbase_SequenceIdAccounting_resetHighest | /**
* Reset the accounting of highest sequenceid by regionname.
* @return Return the previous accounting Map of regions to the last sequence id written into
* each.
*/
Map<byte[], Long> resetHighest() {
Map<byte[], Long> old = this.highestSequenceIds;
this.highestSequenceIds = new HashMap<>();
return old;
} | 3.68 |
pulsar_BrokerService_updateConfigurationAndRegisterListeners | /**
* Update dynamic-ServiceConfiguration with value present into zk-configuration-map and register listeners on
* dynamic-ServiceConfiguration field to take appropriate action on change of zk-configuration-map.
*/
private void updateConfigurationAndRegisterListeners() {
// (1) Dynamic-config value validation: add validator if updated value required strict check before considering
// validate configured load-manager classname present into classpath
addDynamicConfigValidator("loadManagerClassName", (className) -> {
try {
Class.forName(className);
} catch (ClassNotFoundException | NoClassDefFoundError e) {
log.warn("Configured load-manager class {} not found {}", className, e.getMessage());
return false;
}
return true;
});
// (2) Listener Registration
// add listener on "maxConcurrentLookupRequest" value change
registerConfigurationListener("maxConcurrentLookupRequest",
(maxConcurrentLookupRequest) -> lookupRequestSemaphore.set(
new Semaphore((int) maxConcurrentLookupRequest, false)));
// add listener on "maxConcurrentTopicLoadRequest" value change
registerConfigurationListener("maxConcurrentTopicLoadRequest",
(maxConcurrentTopicLoadRequest) -> topicLoadRequestSemaphore.set(
new Semaphore((int) maxConcurrentTopicLoadRequest, false)));
registerConfigurationListener("loadManagerClassName", className -> {
pulsar.getExecutor().execute(() -> {
LoadManager newLoadManager = null;
try {
newLoadManager = LoadManager.create(pulsar);
log.info("Created load manager: {}", className);
pulsar.getLoadManager().get().stop();
newLoadManager.start();
} catch (Exception ex) {
log.warn("Failed to change load manager", ex);
try {
if (newLoadManager != null) {
newLoadManager.stop();
newLoadManager = null;
}
} catch (PulsarServerException e) {
log.warn("Failed to close created load manager", e);
}
}
if (newLoadManager != null) {
pulsar.getLoadManager().set(newLoadManager);
}
});
});
// add listener to notify broker managedLedgerCacheSizeMB dynamic config
registerConfigurationListener("managedLedgerCacheSizeMB", (managedLedgerCacheSizeMB) -> {
managedLedgerFactory.getEntryCacheManager()
.updateCacheSizeAndThreshold(((int) managedLedgerCacheSizeMB) * 1024L * 1024L);
});
// add listener to notify broker managedLedgerCacheEvictionWatermark dynamic config
registerConfigurationListener(
"managedLedgerCacheEvictionWatermark", (cacheEvictionWatermark) -> {
managedLedgerFactory.getEntryCacheManager()
.updateCacheEvictionWatermark((double) cacheEvictionWatermark);
});
// add listener to notify broker managedLedgerCacheEvictionTimeThresholdMillis dynamic config
registerConfigurationListener(
"managedLedgerCacheEvictionTimeThresholdMillis", (cacheEvictionTimeThresholdMills) -> {
managedLedgerFactory.updateCacheEvictionTimeThreshold(TimeUnit.MILLISECONDS
.toNanos((long) cacheEvictionTimeThresholdMills));
});
// add listener to update message-dispatch-rate in msg for topic
registerConfigurationListener("dispatchThrottlingRatePerTopicInMsg", (dispatchRatePerTopicInMsg) -> {
updateTopicMessageDispatchRate();
});
// add listener to update message-dispatch-rate in byte for topic
registerConfigurationListener("dispatchThrottlingRatePerTopicInByte", (dispatchRatePerTopicInByte) -> {
updateTopicMessageDispatchRate();
});
// add listener to update managed-ledger config to skipNonRecoverableLedgers
registerConfigurationListener("autoSkipNonRecoverableData", (skipNonRecoverableLedger) -> {
updateManagedLedgerConfig();
});
// add listener to update message-dispatch-rate in msg for subscription
registerConfigurationListener("dispatchThrottlingRatePerSubscriptionInMsg", (dispatchRatePerTopicInMsg) -> {
updateSubscriptionMessageDispatchRate();
});
// add listener to update message-dispatch-rate in byte for subscription
registerConfigurationListener("dispatchThrottlingRatePerSubscriptionInByte", (dispatchRatePerTopicInByte) -> {
updateSubscriptionMessageDispatchRate();
});
// add listener to update message-dispatch-rate in msg for replicator
registerConfigurationListener("dispatchThrottlingRatePerReplicatorInMsg",
(dispatchRatePerTopicInMsg) -> {
updateReplicatorMessageDispatchRate();
});
// add listener to update message-dispatch-rate in byte for replicator
registerConfigurationListener("dispatchThrottlingRatePerReplicatorInByte",
(dispatchRatePerTopicInByte) -> {
updateReplicatorMessageDispatchRate();
});
// add listener to notify broker publish-rate monitoring
registerConfigurationListener("brokerPublisherThrottlingTickTimeMillis",
(publisherThrottlingTickTimeMillis) -> {
setupBrokerPublishRateLimiterMonitor();
});
// add listener to update topic publish-rate dynamic config
registerConfigurationListener("maxPublishRatePerTopicInMessages",
maxPublishRatePerTopicInMessages -> updateMaxPublishRatePerTopicInMessages()
);
registerConfigurationListener("maxPublishRatePerTopicInBytes",
maxPublishRatePerTopicInMessages -> updateMaxPublishRatePerTopicInMessages()
);
// add listener to update subscribe-rate dynamic config
registerConfigurationListener("subscribeThrottlingRatePerConsumer",
subscribeThrottlingRatePerConsumer -> updateSubscribeRate());
registerConfigurationListener("subscribeRatePeriodPerConsumerInSecond",
subscribeRatePeriodPerConsumerInSecond -> updateSubscribeRate());
// add listener to notify broker publish-rate dynamic config
registerConfigurationListener("brokerPublisherThrottlingMaxMessageRate",
(brokerPublisherThrottlingMaxMessageRate) ->
updateBrokerPublisherThrottlingMaxRate());
registerConfigurationListener("brokerPublisherThrottlingMaxByteRate",
(brokerPublisherThrottlingMaxByteRate) ->
updateBrokerPublisherThrottlingMaxRate());
// add listener to notify broker dispatch-rate dynamic config
registerConfigurationListener("dispatchThrottlingRateInMsg",
(dispatchThrottlingRateInMsg) ->
updateBrokerDispatchThrottlingMaxRate());
registerConfigurationListener("dispatchThrottlingRateInByte",
(dispatchThrottlingRateInByte) ->
updateBrokerDispatchThrottlingMaxRate());
// add listener to notify topic publish-rate monitoring
if (!preciseTopicPublishRateLimitingEnable) {
registerConfigurationListener("topicPublisherThrottlingTickTimeMillis",
(publisherThrottlingTickTimeMillis) -> {
setupTopicPublishRateLimiterMonitor();
});
}
// add listener to notify topic subscriptionTypesEnabled changed.
registerConfigurationListener("subscriptionTypesEnabled", this::updateBrokerSubscriptionTypesEnabled);
// add listener to notify partitioned topic defaultNumPartitions changed
registerConfigurationListener("defaultNumPartitions", defaultNumPartitions -> {
this.updateDefaultNumPartitions((int) defaultNumPartitions);
});
// add listener to notify partitioned topic maxNumPartitionsPerPartitionedTopic changed
registerConfigurationListener("maxNumPartitionsPerPartitionedTopic", maxNumPartitions -> {
this.updateMaxNumPartitionsPerPartitionedTopic((int) maxNumPartitions);
});
// add listener to notify web service httpRequestsFailOnUnknownPropertiesEnabled changed.
registerConfigurationListener("httpRequestsFailOnUnknownPropertiesEnabled", enabled -> {
pulsar.getWebService().updateHttpRequestsFailOnUnknownPropertiesEnabled((boolean) enabled);
});
// add more listeners here
// (3) create dynamic-config if not exist.
createDynamicConfigPathIfNotExist();
// (4) update ServiceConfiguration value by reading zk-configuration-map and trigger corresponding listeners.
handleDynamicConfigurationUpdates();
} | 3.68 |
hadoop_IteratorSelector_setPartition | /**
* Set partition for this iterator selector.
* @param p partition
*/
public void setPartition(String p) {
this.partition = p;
} | 3.68 |
pulsar_KeySharedPolicy_setAllowOutOfOrderDelivery | /**
* If enabled, it will relax the ordering requirement, allowing the broker to send out-of-order messages in case of
* failures. This will make it faster for new consumers to join without being stalled by an existing slow consumer.
*
* <p>In this case, a single consumer will still receive all the keys, but they may be coming in different orders.
*
* @param allowOutOfOrderDelivery
* whether to allow for out of order delivery
* @return KeySharedPolicy instance
*/
public KeySharedPolicy setAllowOutOfOrderDelivery(boolean allowOutOfOrderDelivery) {
this.allowOutOfOrderDelivery = allowOutOfOrderDelivery;
return this;
} | 3.68 |
hbase_RegionInfo_hasEncodedName | /**
* Does region name contain its encoded name?
* @param regionName region name
* @return boolean indicating if this a new format region name which contains its encoded name.
*/
@InterfaceAudience.Private
static boolean hasEncodedName(final byte[] regionName) {
// check if region name ends in ENC_SEPARATOR
return (regionName.length >= 1)
&& (regionName[regionName.length - 1] == RegionInfo.ENC_SEPARATOR);
} | 3.68 |
hudi_ImmutableTriple_getMiddle | /**
* {@inheritDoc}
*/
@Override
public M getMiddle() {
return middle;
} | 3.68 |
Activiti_DelegateInvocation_getInvocationResult | /**
* @return the result of the invocation (can be null if the invocation does not return a result)
*/
public Object getInvocationResult() {
return invocationResult;
} | 3.68 |
flink_ResourceManager_setFailUnfulfillableRequest | /**
* Set {@link SlotManager} whether to fail unfulfillable slot requests.
*
* @param failUnfulfillableRequest whether to fail unfulfillable requests
*/
protected void setFailUnfulfillableRequest(boolean failUnfulfillableRequest) {
slotManager.setFailUnfulfillableRequest(failUnfulfillableRequest);
} | 3.68 |
hbase_HFileReaderImpl_indexSize | /**
* @return the total heap size of data and meta block indexes in bytes. Does not take into account
* non-root blocks of a multilevel data index.
*/
@Override
public long indexSize() {
return (dataBlockIndexReader != null ? dataBlockIndexReader.heapSize() : 0)
+ ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize() : 0);
} | 3.68 |
flink_DynamicSourceUtils_isSourceChangeEventsDuplicate | /** Returns true if the table source produces duplicate change events. */
public static boolean isSourceChangeEventsDuplicate(
ResolvedSchema resolvedSchema,
DynamicTableSource tableSource,
TableConfig tableConfig) {
if (!(tableSource instanceof ScanTableSource)) {
return false;
}
ChangelogMode mode = ((ScanTableSource) tableSource).getChangelogMode();
boolean isCDCSource =
!mode.containsOnly(RowKind.INSERT) && !isUpsertSource(resolvedSchema, tableSource);
boolean changeEventsDuplicate =
tableConfig.get(ExecutionConfigOptions.TABLE_EXEC_SOURCE_CDC_EVENTS_DUPLICATE);
boolean hasPrimaryKey = resolvedSchema.getPrimaryKey().isPresent();
return isCDCSource && changeEventsDuplicate && hasPrimaryKey;
} | 3.68 |
hbase_MasterRpcServices_execProcedureWithRet | /**
* Triggers a synchronous attempt to run a distributed procedure and sets return data in response.
* {@inheritDoc}
*/
@Override
public ExecProcedureResponse execProcedureWithRet(RpcController controller,
ExecProcedureRequest request) throws ServiceException {
rpcPreCheck("execProcedureWithRet");
try {
ProcedureDescription desc = request.getProcedure();
MasterProcedureManager mpm =
server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature());
if (mpm == null) {
throw new ServiceException("The procedure is not registered: " + desc.getSignature());
}
LOG.info(server.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature());
byte[] data = mpm.execProcedureWithRet(desc);
ExecProcedureResponse.Builder builder = ExecProcedureResponse.newBuilder();
// set return data if available
if (data != null) {
builder.setReturnData(UnsafeByteOperations.unsafeWrap(data));
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.68 |
framework_VTabsheet_getTooltipInfo | /**
* Returns the tab caption's tooltip info if it has been configured.
*
* @return the tooltip info, or {@code null} if no tooltip configuration
* found
*/
public TooltipInfo getTooltipInfo() {
return tabCaption.getTooltipInfo();
} | 3.68 |
framework_StringToEnumConverter_enumToString | /**
* Converts the given enum to a human readable string using the given
* locale.
* <p>
* Compatible with {@link #stringToEnum(String, Class, Locale)}
*
* @param value
* The enum value to convert
* @param locale
* The locale to use for conversion. If null, the JVM default
* locale will be used
* @return A human readable string based on the enum
* @throws ConversionException
* if the conversion fails
*/
public static String enumToString(Enum<?> value, Locale locale) {
if (locale == null) {
locale = Locale.getDefault();
}
String enumString = value.toString();
if (enumString.equals(value.name())) {
// FOO -> Foo
// FOO_BAR -> Foo bar
// _FOO -> _foo
String result = enumString.substring(0, 1).toUpperCase(locale);
result += enumString.substring(1).toLowerCase(locale).replace('_',
' ');
return result;
} else {
return enumString;
}
} | 3.68 |
flink_RowTimeIntervalJoin_getMaxOutputDelay | /**
* Get the maximum interval between receiving a row and emitting it (as part of a joined
* result). This is the time interval by which watermarks need to be held back.
*
* @return the maximum delay for the outputs
*/
public long getMaxOutputDelay() {
return Math.max(leftRelativeSize, rightRelativeSize) + allowedLateness;
} | 3.68 |
dubbo_AdaptiveClassCodeGenerator_getMethodAdaptiveValue | /**
* get value of adaptive annotation or if empty return splitted simple name
*/
private String[] getMethodAdaptiveValue(Adaptive adaptiveAnnotation) {
String[] value = adaptiveAnnotation.value();
// value is not set, use the value generated from class name as the key
if (value.length == 0) {
String splitName = StringUtils.camelToSplitName(type.getSimpleName(), ".");
value = new String[] {splitName};
}
return value;
} | 3.68 |
flink_UnsortedGrouping_withPartitioner | /**
* Uses a custom partitioner for the grouping.
*
* @param partitioner The custom partitioner.
* @return The grouping object itself, to allow for method chaining.
*/
public UnsortedGrouping<T> withPartitioner(Partitioner<?> partitioner) {
Preconditions.checkNotNull(partitioner);
getKeys().validateCustomPartitioner(partitioner, null);
this.customPartitioner = partitioner;
return this;
} | 3.68 |
hudi_HoodieCombineHiveInputFormat_getPath | /**
* Returns the i<sup>th</sup> Path.
*/
@Override
public Path getPath(int i) {
return inputSplitShim.getPath(i);
} | 3.68 |
querydsl_SQLTemplates_serializeUpdate | /**
* template method for UPDATE serialization
*
* @param metadata
* @param entity
* @param updates
* @param context
*/
public void serializeUpdate(QueryMetadata metadata, RelationalPath<?> entity,
Map<Path<?>, Expression<?>> updates, SQLSerializer context) {
context.serializeForUpdate(metadata, entity, updates);
// limit
if (metadata.getModifiers().isRestricting()) {
serializeModifiers(metadata, context);
}
if (!metadata.getFlags().isEmpty()) {
context.serialize(Position.END, metadata.getFlags());
}
} | 3.68 |
hbase_ProcedureExecutor_nextProcId | // ==========================================================================
// Procedure IDs helpers
// ==========================================================================
private long nextProcId() {
long procId = lastProcId.incrementAndGet();
if (procId < 0) {
while (!lastProcId.compareAndSet(procId, 0)) {
procId = lastProcId.get();
if (procId >= 0) {
break;
}
}
while (procedures.containsKey(procId)) {
procId = lastProcId.incrementAndGet();
}
}
assert procId >= 0 : "Invalid procId " + procId;
return procId;
} | 3.68 |
hbase_TableMapReduceUtil_resetCacheConfig | /**
* Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on direct
* memory will likely cause the map tasks to OOM when opening the region. This is done here
* instead of in TableSnapshotRegionRecordReader in case an advanced user wants to override this
* behavior in their job.
*/
public static void resetCacheConfig(Configuration conf) {
conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0f);
conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY);
} | 3.68 |
hbase_MultiVersionConcurrencyControl_completeAndWait | /**
* Complete a {@link WriteEntry} that was created by {@link #begin()} then wait until the read
* point catches up to our write. At the end of this call, the global read point is at least as
* large as the write point of the passed in WriteEntry. Thus, the write is visible to MVCC
* readers.
*/
public void completeAndWait(WriteEntry e) {
if (!complete(e)) {
waitForRead(e);
}
} | 3.68 |
hbase_VersionModel_setOSVersion | /**
* @param version the OS version string
*/
public void setOSVersion(String version) {
this.osVersion = version;
} | 3.68 |
framework_BindingValidationStatus_getResult | /**
* Gets the validation result if status is either {@link Status#OK} or
* {@link Status#ERROR} or an empty optional if status is
* {@link Status#UNRESOLVED}.
*
* @return the validation result
*/
public Optional<ValidationResult> getResult() {
if (result == null) {
return Optional.empty();
}
return Optional.of(result.isError()
? ValidationResult.error(result.getMessage().orElse(""))
: ValidationResult.ok());
} | 3.68 |
hbase_MobFileName_getStartKeyFromName | /**
* get startKey from MobFileName.
* @param fileName file name.
*/
public static String getStartKeyFromName(final String fileName) {
return fileName.substring(0, STARTKEY_END_INDEX);
} | 3.68 |
framework_VSlider_getNavigationDownKey | /**
* Get the key that decreases the vertical slider. By default it is the down
* arrow key but by overriding this you can change the key to whatever you
* want.
*
* @return The keycode of the key
*/
protected int getNavigationDownKey() {
return KeyCodes.KEY_DOWN;
} | 3.68 |
framework_Tree_expandItemsRecursively | /**
* Expands the items recursively
*
* Expands all the children recursively starting from an item. Operation
* succeeds only if all expandable items are expanded.
*
* @param startItemId
* ID of the initial item
* @return True if the expand operation succeeded
*/
public boolean expandItemsRecursively(Object startItemId) {
boolean result = true;
// Initial stack
final Stack<Object> todo = new Stack<Object>();
todo.add(startItemId);
// Expands recursively
while (!todo.isEmpty()) {
final Object id = todo.pop();
if (areChildrenAllowed(id) && !expandItem(id, false)) {
result = false;
}
if (hasChildren(id)) {
todo.addAll(getChildren(id));
}
}
markAsDirty();
return result;
} | 3.68 |
hbase_ByteBufferListOutputStream_releaseResources | /**
* Release the resources it uses (The ByteBuffers) which are obtained from pool. Call this only
* when all the data is fully used. And it must be called at the end of usage else we will leak
* ByteBuffers from pool.
*/
public void releaseResources() {
try {
close();
} catch (IOException e) {
LOG.debug(e.toString(), e);
}
// Return back all the BBs to pool
for (ByteBuff buf : this.allBufs) {
buf.release();
}
this.allBufs = null;
this.curBuf = null;
} | 3.68 |
hadoop_ApplicationServiceRecordProcessor_createSRVInfo | /**
* Create an application SRV record descriptor.
*
* @param serviceRecord the service record.
* @throws Exception if there is an issue during descriptor creation.
*/
protected void createSRVInfo(ServiceRecord serviceRecord) throws Exception {
List<Endpoint> endpoints = serviceRecord.external;
List<RecordDescriptor> recordDescriptors = new ArrayList<>();
SRVApplicationRecordDescriptor srvInfo;
for (Endpoint endpoint : endpoints) {
srvInfo = new SRVApplicationRecordDescriptor(
serviceRecord, endpoint);
recordDescriptors.add(srvInfo);
}
registerRecordDescriptor(Type.SRV, recordDescriptors);
} | 3.68 |
AreaShop_RegionGroup_getLowerCaseName | /**
* Get the lowercase name of the group (used for getting the config etc).
* @return The name of the group in lowercase
*/
public String getLowerCaseName() {
return getName().toLowerCase();
} | 3.68 |
hbase_Result_containsEmptyColumn | /**
* Checks if the specified column contains an empty value (a zero-length byte array).
* @param family family name
* @param foffset family offset
* @param flength family length
* @param qualifier column qualifier
* @param qoffset qualifier offset
* @param qlength qualifier length
* @return whether or not a latest value exists and is empty
*/
public boolean containsEmptyColumn(byte[] family, int foffset, int flength, byte[] qualifier,
int qoffset, int qlength) {
Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength);
return (kv != null) && (kv.getValueLength() == 0);
} | 3.68 |
hbase_VersionInfoUtil_getVersionComponents | /**
* Returns the version components Examples: "1.4.3" returns [1, 4, 3], "4.5.6-SNAPSHOT" returns
* [4, 5, 6, "SNAPSHOT"]
* @return the components of the version string
*/
private static String[] getVersionComponents(final HBaseProtos.VersionInfo versionInfo) {
return versionInfo.getVersion().split("[\\.-]");
} | 3.68 |
dubbo_MetadataService_toSortedStrings | /**
* Convert the specified {@link Stream} of {@link URL URLs} to be the {@link URL#toFullString() strings} presenting
* the {@link URL URLs}
*
* @param stream {@link Stream} of {@link URL}
* @return the non-null read-only {@link SortedSet sorted set} of {@link URL#toFullString() strings} presenting
* @see URL#toFullString()
*/
static SortedSet<String> toSortedStrings(Stream<URL> stream) {
return unmodifiableSortedSet(stream.map(URL::toFullString).collect(TreeSet::new, Set::add, Set::addAll));
} | 3.68 |
hudi_FlinkClusteringConfig_toFlinkConfig | /**
* Transforms a {@code FlinkClusteringConfig.config} into {@code Configuration}.
* The latter is more suitable for the table APIs. It reads all the properties
* in the properties file (set by `--props` option) and cmd line options
* (set by `--hoodie-conf` option).
*/
public static Configuration toFlinkConfig(FlinkClusteringConfig config) {
Map<String, String> propsMap = new HashMap<String, String>((Map) getProps(config));
org.apache.flink.configuration.Configuration conf = fromMap(propsMap);
conf.setString(FlinkOptions.PATH, config.path);
conf.setInteger(FlinkOptions.ARCHIVE_MAX_COMMITS, config.archiveMaxCommits);
conf.setInteger(FlinkOptions.ARCHIVE_MIN_COMMITS, config.archiveMinCommits);
conf.setString(FlinkOptions.CLEAN_POLICY, config.cleanPolicy);
conf.setInteger(FlinkOptions.CLEAN_RETAIN_COMMITS, config.cleanRetainCommits);
conf.setInteger(FlinkOptions.CLEAN_RETAIN_HOURS, config.cleanRetainHours);
conf.setInteger(FlinkOptions.CLEAN_RETAIN_FILE_VERSIONS, config.cleanRetainFileVersions);
conf.setInteger(FlinkOptions.CLUSTERING_DELTA_COMMITS, config.clusteringDeltaCommits);
conf.setInteger(FlinkOptions.CLUSTERING_TASKS, config.clusteringTasks);
conf.setString(FlinkOptions.CLUSTERING_PLAN_STRATEGY_CLASS, config.planStrategyClass);
conf.setString(FlinkOptions.CLUSTERING_PLAN_PARTITION_FILTER_MODE_NAME, config.planPartitionFilterMode);
conf.setLong(FlinkOptions.CLUSTERING_PLAN_STRATEGY_TARGET_FILE_MAX_BYTES, config.targetFileMaxBytes);
conf.setLong(FlinkOptions.CLUSTERING_PLAN_STRATEGY_SMALL_FILE_LIMIT, config.smallFileLimit);
conf.setInteger(FlinkOptions.CLUSTERING_PLAN_STRATEGY_SKIP_PARTITIONS_FROM_LATEST, config.skipFromLatestPartitions);
conf.setString(FlinkOptions.CLUSTERING_PLAN_STRATEGY_CLUSTER_BEGIN_PARTITION, config.clusterBeginPartition);
conf.setString(FlinkOptions.CLUSTERING_PLAN_STRATEGY_CLUSTER_END_PARTITION, config.clusterEndPartition);
conf.setString(FlinkOptions.CLUSTERING_PLAN_STRATEGY_PARTITION_REGEX_PATTERN, config.partitionRegexPattern);
conf.setString(FlinkOptions.CLUSTERING_PLAN_STRATEGY_PARTITION_SELECTED, config.partitionSelected);
conf.setString(FlinkOptions.CLUSTERING_SORT_COLUMNS, config.sortColumns);
conf.setInteger(FlinkOptions.WRITE_SORT_MEMORY, config.sortMemory);
conf.setInteger(FlinkOptions.CLUSTERING_MAX_NUM_GROUPS, config.maxNumGroups);
conf.setInteger(FlinkOptions.CLUSTERING_TARGET_PARTITIONS, config.targetPartitions);
conf.setBoolean(FlinkOptions.CLEAN_ASYNC_ENABLED, config.cleanAsyncEnable);
// use synchronous clustering always
conf.setBoolean(FlinkOptions.CLUSTERING_ASYNC_ENABLED, false);
conf.setBoolean(FlinkOptions.CLUSTERING_SCHEDULE_ENABLED, config.schedule);
// bulk insert conf
HoodieTableConfig tableConfig = createMetaClient(conf).getTableConfig();
conf.setBoolean(FlinkOptions.URL_ENCODE_PARTITIONING, Boolean.parseBoolean(tableConfig.getUrlEncodePartitioning()));
conf.setBoolean(FlinkOptions.HIVE_STYLE_PARTITIONING, Boolean.parseBoolean(tableConfig.getHiveStylePartitioningEnable()));
return conf;
} | 3.68 |
hadoop_CacheStats_getCacheUsed | /**
* Get the approximate amount of cache space used.
*/
public long getCacheUsed() {
return usedBytesCount.get();
} | 3.68 |
framework_VAbstractCalendarPanel_setAssistiveLabelNextYear | /**
* Set assistive label for the next year element.
*
* @param label
* the label to set
* @since 8.4
*/
public void setAssistiveLabelNextYear(String label) {
nextYearAssistiveLabel = label;
} | 3.68 |
morf_DatabaseSchemaManager_invalidateCache | /**
* Invalidate the cache of database tables. Use when the schema has changed underneath this schema manager.
*/
public final void invalidateCache() {
if (log.isDebugEnabled()) {
StackTraceElement stack = new Throwable().getStackTrace()[1];
log.debug("Cache invalidated at " + stack.getClassName() + "." + stack.getMethodName() + ":" + stack.getLineNumber());
}
clearCache();
} | 3.68 |
hbase_KeyValue_checkParameters | /**
* Checks the parameters passed to a constructor.
* @param row row key
* @param rlength row length
* @param family family name
* @param flength family length
* @param qlength qualifier length
* @param vlength value length
* @throws IllegalArgumentException an illegal value was passed
*/
static void checkParameters(final byte[] row, final int rlength, final byte[] family, int flength,
int qlength, int vlength) throws IllegalArgumentException {
if (rlength > Short.MAX_VALUE) {
throw new IllegalArgumentException("Row > " + Short.MAX_VALUE);
}
if (row == null) {
throw new IllegalArgumentException("Row is null");
}
// Family length
flength = family == null ? 0 : flength;
if (flength > Byte.MAX_VALUE) {
throw new IllegalArgumentException("Family > " + Byte.MAX_VALUE);
}
// Qualifier length
if (qlength > Integer.MAX_VALUE - rlength - flength) {
throw new IllegalArgumentException("Qualifier > " + Integer.MAX_VALUE);
}
// Key length
long longKeyLength = getKeyDataStructureSize(rlength, flength, qlength);
if (longKeyLength > Integer.MAX_VALUE) {
throw new IllegalArgumentException("keylength " + longKeyLength + " > " + Integer.MAX_VALUE);
}
// Value length
if (vlength > HConstants.MAXIMUM_VALUE_LENGTH) { // FindBugs INT_VACUOUS_COMPARISON
throw new IllegalArgumentException(
"Value length " + vlength + " > " + HConstants.MAXIMUM_VALUE_LENGTH);
}
} | 3.68 |
framework_HierarchyMapper_fetchItems | /**
* Gets a stream of children for the given item in the form of a flattened
* hierarchy from the back-end and filter the wanted results from the list.
*
* @param parent
* the parent item for the fetch
* @param range
* the requested item range
* @return the stream of items
*/
public Stream<T> fetchItems(T parent, Range range) {
return getHierarchy(parent, false).skip(range.getStart())
.limit(range.length());
} | 3.68 |
graphhopper_Distributions_logNormalDistribution | /**
* Use this function instead of Math.log(normalDistribution(sigma, x)) to avoid an
* arithmetic underflow for very small probabilities.
*/
public static double logNormalDistribution(double sigma, double x) {
return Math.log(1.0 / (sqrt(2.0 * PI) * sigma)) + (-0.5 * pow(x / sigma, 2));
} | 3.68 |
hadoop_StorageStatistics_getScheme | /**
* @return the associated file system scheme if this is scheme specific,
* else return null.
*/
public String getScheme() {
return null;
} | 3.68 |
framework_Escalator_getRowTop | /**
* <em>Calculates</em> the correct top position of a row at a logical
* index, regardless if there is one there or not.
* <p>
* A correct result requires that both {@link #getDefaultRowHeight()} is
* consistent, and the placement and height of all spacers above the
* given logical index are consistent.
*
* @param logicalIndex
* the logical index of the row for which to calculate the
* top position
* @return the position at which to place a row in {@code logicalIndex}
* @see #getRowTop(TableRowElement)
*/
private double getRowTop(int logicalIndex) {
double top = spacerContainer
.getSpacerHeightsSumUntilIndex(logicalIndex);
return top + (logicalIndex * getDefaultRowHeight());
} | 3.68 |
streampipes_OpcUaUtil_retrieveDataTypesFromServer | /**
* connects to each node individually and updates the data type in accordance to the data from the server.
*
* @param opcNodes List of opcNodes where the data type is not determined appropriately
*/
public static void retrieveDataTypesFromServer(OpcUaClient client, List<OpcNode> opcNodes) throws AdapterException {
for (OpcNode opcNode : opcNodes) {
try {
UInteger dataTypeId =
(UInteger) client.getAddressSpace().getVariableNode(opcNode.getNodeId()).getDataType()
.getIdentifier();
OpcUaTypes.getType(dataTypeId);
opcNode.setType(OpcUaTypes.getType(dataTypeId));
} catch (UaException e) {
throw new AdapterException("Could not guess schema for opc node! " + e.getMessage());
}
}
} | 3.68 |
framework_ContainerOrderedWrapper_lastItemId | /*
* Gets the last item stored in the ordered container Don't add a JavaDoc
* comment here, we use the default documentation from implemented
* interface.
*/
@Override
public Object lastItemId() {
if (ordered) {
return ((Container.Ordered) container).lastItemId();
}
return last;
} | 3.68 |
hadoop_Check_gt0 | /**
* Verifies an long is greater than zero.
*
* @param value long value.
* @param name the name to use in the exception message.
*
* @return the value.
*
* @throws IllegalArgumentException if the long is zero or less.
*/
public static long gt0(long value, String name) {
if (value <= 0) {
throw new IllegalArgumentException(
MessageFormat.format("parameter [{0}] = [{1}] must be greater than zero", name, value));
}
return value;
} | 3.68 |
flink_ZooKeeperUtils_createLeaderRetrievalDriverFactory | /**
* Creates a {@link LeaderRetrievalDriverFactory} implemented by ZooKeeper.
*
* @param client The {@link CuratorFramework} ZooKeeper client to use
* @param path The path for the leader zNode
* @param configuration configuration for further config options
* @return {@link LeaderRetrievalDriverFactory} instance.
*/
public static ZooKeeperLeaderRetrievalDriverFactory createLeaderRetrievalDriverFactory(
final CuratorFramework client, final String path, final Configuration configuration) {
final ZooKeeperLeaderRetrievalDriver.LeaderInformationClearancePolicy
leaderInformationClearancePolicy;
if (configuration.get(HighAvailabilityOptions.ZOOKEEPER_TOLERATE_SUSPENDED_CONNECTIONS)) {
leaderInformationClearancePolicy =
ZooKeeperLeaderRetrievalDriver.LeaderInformationClearancePolicy
.ON_LOST_CONNECTION;
} else {
leaderInformationClearancePolicy =
ZooKeeperLeaderRetrievalDriver.LeaderInformationClearancePolicy
.ON_SUSPENDED_CONNECTION;
}
return new ZooKeeperLeaderRetrievalDriverFactory(
client, path, leaderInformationClearancePolicy);
} | 3.68 |
framework_Slider_getMax | /**
* Gets the maximum slider value. The default value is 100.0.
*
* @return the largest value the slider can have
*/
public double getMax() {
return getState(false).maxValue;
} | 3.68 |
morf_XmlDataSetProducer_getUpperCaseName | /**
* @see org.alfasoftware.morf.metadata.Column#getUpperCaseName()
*/
@Override
public String getUpperCaseName() {
return upperCaseColumnName.get();
} | 3.68 |
hbase_MobUtils_getQualifiedMobRootDir | /**
* Gets the qualified root dir of the mob files.
* @param conf The current configuration.
* @return The qualified root dir.
*/
public static Path getQualifiedMobRootDir(Configuration conf) throws IOException {
Path hbaseDir = new Path(conf.get(HConstants.HBASE_DIR));
Path mobRootDir = new Path(hbaseDir, MobConstants.MOB_DIR_NAME);
FileSystem fs = mobRootDir.getFileSystem(conf);
return mobRootDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
} | 3.68 |
hbase_ReplicationSourceShipper_shipEdits | /**
* Do the shipping logic
*/
private void shipEdits(WALEntryBatch entryBatch) {
List<Entry> entries = entryBatch.getWalEntries();
int sleepMultiplier = 0;
if (entries.isEmpty()) {
updateLogPosition(entryBatch);
return;
}
int currentSize = (int) entryBatch.getHeapSize();
source.getSourceMetrics()
.setTimeStampNextToReplicate(entries.get(entries.size() - 1).getKey().getWriteTime());
while (isActive()) {
try {
try {
source.tryThrottle(currentSize);
} catch (InterruptedException e) {
LOG.debug("Interrupted while sleeping for throttling control");
Thread.currentThread().interrupt();
// current thread might be interrupted to terminate
// directly go back to while() for confirm this
continue;
}
// create replicateContext here, so the entries can be GC'd upon return from this call
// stack
ReplicationEndpoint.ReplicateContext replicateContext =
new ReplicationEndpoint.ReplicateContext();
replicateContext.setEntries(entries).setSize(currentSize);
replicateContext.setWalGroupId(walGroupId);
replicateContext.setTimeout(getAdaptiveTimeout(this.shipEditsTimeout, sleepMultiplier));
long startTimeNs = System.nanoTime();
// send the edits to the endpoint. Will block until the edits are shipped and acknowledged
boolean replicated = source.getReplicationEndpoint().replicate(replicateContext);
long endTimeNs = System.nanoTime();
if (!replicated) {
continue;
} else {
sleepMultiplier = Math.max(sleepMultiplier - 1, 0);
}
// Clean up hfile references
for (Entry entry : entries) {
cleanUpHFileRefs(entry.getEdit());
LOG.trace("shipped entry {}: ", entry);
}
// Log and clean up WAL logs
updateLogPosition(entryBatch);
// offsets totalBufferUsed by deducting shipped batchSize (excludes bulk load size)
// this sizeExcludeBulkLoad has to use same calculation that when calling
// acquireBufferQuota() in ReplicationSourceWALReader because they maintain
// same variable: totalBufferUsed
source.postShipEdits(entries, entryBatch.getUsedBufferSize());
// FIXME check relationship between wal group and overall
source.getSourceMetrics().shipBatch(entryBatch.getNbOperations(), currentSize,
entryBatch.getNbHFiles());
source.getSourceMetrics().setAgeOfLastShippedOp(
entries.get(entries.size() - 1).getKey().getWriteTime(), walGroupId);
source.getSourceMetrics().updateTableLevelMetrics(entryBatch.getWalEntriesWithSize());
if (LOG.isTraceEnabled()) {
LOG.debug("Replicated {} entries or {} operations in {} ms", entries.size(),
entryBatch.getNbOperations(), (endTimeNs - startTimeNs) / 1000000);
}
break;
} catch (Exception ex) {
source.getSourceMetrics().incrementFailedBatches();
LOG.warn("{} threw unknown exception:",
source.getReplicationEndpoint().getClass().getName(), ex);
if (
sleepForRetries("ReplicationEndpoint threw exception", sleepForRetries, sleepMultiplier,
maxRetriesMultiplier)
) {
sleepMultiplier++;
}
}
}
} | 3.68 |
hadoop_FederationStateStoreUtils_filterHomeSubCluster | /**
* Filter HomeSubCluster based on Filter SubCluster.
*
* @param filterSubCluster filter query conditions
* @param homeSubCluster homeSubCluster
* @return return true, if match filter conditions,
* return false, if not match filter conditions.
*/
public static boolean filterHomeSubCluster(SubClusterId filterSubCluster,
SubClusterId homeSubCluster) {
// If the filter condition is empty,
// it means that homeSubCluster needs to be added
if (filterSubCluster == null) {
return true;
}
// If the filter condition filterSubCluster is not empty,
// and filterSubCluster is equal to homeSubCluster, it needs to be added
if (filterSubCluster.equals(homeSubCluster)) {
return true;
}
return false;
} | 3.68 |
framework_AbstractDateFieldElement_getISOValue | /**
* Gets the value of the date field as a ISO8601 compatible string
* (yyyy-MM-dd or yyyy-MM-dd'T'HH:mm:ss depending on whether the element
* supports time).
*
* @return the date in ISO-8601 format
* @since 8.1.0
*/
protected String getISOValue() {
return (String) getCommandExecutor()
.executeScript("return arguments[0].getISOValue();", this);
} | 3.68 |
morf_H2Dialect_getSqlForYYYYMMDDToDate | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForYYYYMMDDToDate(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForYYYYMMDDToDate(Function function) {
AliasedField field = function.getArguments().get(0);
return "CAST(SUBSTRING(" + getSqlFrom(field) + ", 1, 4)||'-'||SUBSTRING(" + getSqlFrom(field) + ", 5, 2)||'-'||SUBSTRING(" + getSqlFrom(field) + ", 7, 2) AS DATE)";
} | 3.68 |
incubator-hugegraph-toolchain_JDBCVendor_buildGteClauseInCombined | /**
* For database which support to select by where (a, b, c) >= (va, vb, vc)
*/
public String buildGteClauseInCombined(Line nextStartRow) {
E.checkNotNull(nextStartRow, "nextStartRow");
StringBuilder builder = new StringBuilder();
String[] names = nextStartRow.names();
Object[] values = nextStartRow.values();
builder.append("(");
for (int i = 0, n = names.length; i < n; i++) {
builder.append(names[i]);
if (i != n - 1) {
builder.append(", ");
}
}
builder.append(") >= (");
for (int i = 0, n = values.length; i < n; i++) {
Object value = values[i];
builder.append(this.escapeIfNeeded(value));
if (i != n - 1) {
builder.append(", ");
}
}
builder.append(")");
return builder.toString();
} | 3.68 |
flink_CoGroupOperator_sortSecondGroup | /**
* Sorts Pojo or {@link org.apache.flink.api.java.tuple.Tuple} elements within a
* group in the second input on the specified field in the specified {@link Order}.
*
* <p>Groups can be sorted by multiple fields by chaining {@link
* #sortSecondGroup(String, Order)} calls.
*
* @param fieldExpression The expression to the field on which the group is to be
* sorted.
* @param order The Order in which the specified Tuple field is sorted.
* @return A SortedGrouping with specified order of group element.
* @see Order
*/
public CoGroupOperatorWithoutFunction sortSecondGroup(
String fieldExpression, Order order) {
ExpressionKeys<I2> ek = new ExpressionKeys<>(fieldExpression, input2.getType());
int[] groupOrderKeys = ek.computeLogicalKeyPositions();
for (int key : groupOrderKeys) {
this.groupSortKeyOrderSecond.add(new ImmutablePair<>(key, order));
}
return this;
} | 3.68 |
hadoop_AuditReplayThread_getException | /**
* Get the Exception that caused this thread to stop running, if any, else
* null. Should not be called until this thread has already completed (i.e.,
* after {@link #join()} has been called).
*
* @return The exception which was thrown, if any.
*/
Exception getException() {
return exception;
} | 3.68 |
hbase_BufferedMutator_disableWriteBufferPeriodicFlush | /**
* Disable periodic flushing of the write buffer.
*/
default void disableWriteBufferPeriodicFlush() {
setWriteBufferPeriodicFlush(0, MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS);
} | 3.68 |
hudi_StreamerUtil_getTimeGeneratorConfig | /**
* Returns the timeGenerator config with given configuration.
*/
public static HoodieTimeGeneratorConfig getTimeGeneratorConfig(Configuration conf) {
TypedProperties properties = flinkConf2TypedProperties(conf);
// Set lock configure, which is needed in TimeGenerator.
Option<HoodieLockConfig> lockConfig = getLockConfig(conf);
if (lockConfig.isPresent()) {
properties.putAll(lockConfig.get().getProps());
}
return HoodieTimeGeneratorConfig.newBuilder()
.withPath(conf.getString(FlinkOptions.PATH))
.fromProperties(properties)
.build();
} | 3.68 |
hadoop_FileStatusAcceptor_accept | /**
* Accept all prefixes except the one for the base path, "self".
* @param keyPath qualified path to the entry
* @param prefix common prefix in listing.
* @return true if the entry is accepted (i.e. that a status entry
* should be generated.
*/
@Override
public boolean accept(Path keyPath, String prefix) {
return !keyPath.equals(qualifiedPath);
} | 3.68 |
hbase_AnnotationReadingPriorityFunction_getPriority | /**
* Returns a 'priority' based on the request type.
* <p/>
* Currently the returned priority is used for queue selection.
* <p/>
* See the {@code SimpleRpcScheduler} as example. It maintains a queue per 'priority type':
* <ul>
* <li>HIGH_QOS (meta requests)</li>
* <li>REPLICATION_QOS (replication requests)</li>
* <li>NORMAL_QOS (user requests).</li>
* </ul>
*/
@Override
public int getPriority(RequestHeader header, Message param, User user) {
int priorityByAnnotation = getAnnotatedPriority(header);
if (priorityByAnnotation >= 0) {
return priorityByAnnotation;
}
if (param == null) {
return HConstants.NORMAL_QOS;
}
return getBasePriority(header, param);
} | 3.68 |
hbase_HMaster_getMasterActiveTime | /** Returns timestamp in millis when HMaster became the active master. */
public long getMasterActiveTime() {
return masterActiveTime;
} | 3.68 |
pulsar_ProducerInterceptor_onPartitionsChange | /**
* This method is called when partitions of the topic (partitioned-topic) changes.
*
* @param topicName topic name
* @param partitions new updated partitions
*/
default void onPartitionsChange(String topicName, int partitions) {
} | 3.68 |
framework_EventCellReference_isFooter | /**
* Is the cell reference for a cell in the footer of the Grid.
*
* @since 7.5
* @return <code>true</code> if referenced cell is in the footer,
* <code>false</code> if not
*/
public boolean isFooter() {
return section == Section.FOOTER;
} | 3.68 |
morf_Function_least | /**
* Helper method to create an instance of the "least" SQL function.
*
* @param fields the fields to evaluate.
* @return an instance of the "least" function.
*/
public static Function least(Iterable<? extends AliasedField> fields) {
return new Function(FunctionType.LEAST, fields);
} | 3.68 |
hibernate-validator_ClassVisitor_visitTypeAsClass | /**
* Doesn't perform any checks at the moment but calls a visit methods on its own elements.
*
* @param element a class element to check
* @param aVoid
*/
@Override
public Void visitTypeAsClass(TypeElement element, Void aVoid) {
visitAllMyElements( element );
return null;
} | 3.68 |
hadoop_CommitContext_abortSingleCommit | /**
* See {@link CommitOperations#abortSingleCommit(SinglePendingCommit)}.
* @param commit pending commit to abort
* @throws FileNotFoundException if the abort ID is unknown
* @throws IOException on any failure
*/
public void abortSingleCommit(final SinglePendingCommit commit)
throws IOException {
commitOperations.abortSingleCommit(commit);
} | 3.68 |
querydsl_JTSCurveExpression_isClosed | /**
* Returns 1 (TRUE) if this Curve is closed [StartPoint ( ) = EndPoint ( )].
*
* @return closed
*/
public BooleanExpression isClosed() {
if (closed == null) {
closed = Expressions.booleanOperation(SpatialOps.IS_CLOSED, mixin);
}
return closed;
} | 3.68 |
dubbo_DubboBeanUtils_registerPlaceholderConfigurerBeanIfNotExists | /**
* Register a placeholder configurer beans if not exists.
* Call this method in BeanDefinitionRegistryPostProcessor,
* in order to enable the registered BeanFactoryPostProcessor bean to be loaded and executed.
*
* @param beanFactory
* @param registry
* @see DubboInfraBeanRegisterPostProcessor
* @see org.springframework.context.support.PostProcessorRegistrationDelegate#invokeBeanFactoryPostProcessors(org.springframework.beans.factory.config.ConfigurableListableBeanFactory, java.util.List)
*/
static void registerPlaceholderConfigurerBeanIfNotExists(
ConfigurableListableBeanFactory beanFactory, BeanDefinitionRegistry registry) {
// Auto register a PropertyPlaceholderConfigurer bean to resolve placeholders with Spring Environment
// PropertySources
// when loading dubbo xml config with @ImportResource
if (!checkBeanExists(beanFactory, PropertySourcesPlaceholderConfigurer.class)) {
Map<String, Object> propertySourcesPlaceholderPropertyValues = new HashMap<>();
propertySourcesPlaceholderPropertyValues.put("ignoreUnresolvablePlaceholders", true);
registerBeanDefinition(
registry,
PropertySourcesPlaceholderConfigurer.class.getName(),
PropertySourcesPlaceholderConfigurer.class,
propertySourcesPlaceholderPropertyValues);
}
} | 3.68 |
hibernate-validator_AnnotationTypeMemberCheck_checkGroupsAttribute | /**
* Checks that the given type element
* <p/>
* <ul>
* <li>has a method with name "groups",</li>
* <li>the return type of this method is {@code Class<?>[]},</li>
* <li>the default value of this method is {@code {}}.</li>
* </ul>
*
* @param element The element of interest.
*
* @return A possibly non-empty set of constraint check errors, never null.
*/
private Set<ConstraintCheckIssue> checkGroupsAttribute(TypeElement element) {
ExecutableElement groupsMethod = getMethod( element, "groups" );
if ( groupsMethod == null ) {
return CollectionHelper.asSet(
ConstraintCheckIssue.error( element, null, "CONSTRAINT_TYPE_MUST_DECLARE_GROUPS_MEMBER" )
);
}
DeclaredType type = getComponentTypeOfArrayReturnType( groupsMethod );
if ( type == null ) {
return CollectionHelper.asSet(
ConstraintCheckIssue.error( groupsMethod, null, "RETURN_TYPE_MUST_BE_CLASS_ARRAY" )
);
}
boolean typeHasNameClass = type.asElement().getSimpleName().contentEquals( "Class" );
boolean typeHasExactlyOneTypeArgument = type.getTypeArguments().size() == 1;
boolean typeArgumentIsUnboundWildcard = validateWildcardBounds( type.getTypeArguments().get( 0 ), null, null );
if ( !( typeHasNameClass && typeHasExactlyOneTypeArgument && typeArgumentIsUnboundWildcard ) ) {
return CollectionHelper.asSet(
ConstraintCheckIssue.error( groupsMethod, null, "RETURN_TYPE_MUST_BE_CLASS_ARRAY" )
);
}
if ( !isEmptyArray( groupsMethod.getDefaultValue() ) ) {
return CollectionHelper.asSet(
ConstraintCheckIssue.error( groupsMethod, null, "DEFAULT_VALUE_MUST_BE_EMPTY_ARRAY" )
);
}
return Collections.emptySet();
} | 3.68 |
shardingsphere-elasticjob_JobAPIFactory_createShardingStatisticsAPI | /**
* Create sharding statistics API.
*
* @param connectString registry center connect string
* @param namespace registry center namespace
* @param digest registry center digest
* @return job sharding statistics API
*/
public static ShardingStatisticsAPI createShardingStatisticsAPI(final String connectString, final String namespace, final String digest) {
return new ShardingStatisticsAPIImpl(RegistryCenterFactory.createCoordinatorRegistryCenter(connectString, namespace, digest));
} | 3.68 |
flink_CopyOnWriteStateMap_putEntry | /** Helper method that is the basis for operations that add mappings. */
private StateMapEntry<K, N, S> putEntry(K key, N namespace) {
final int hash = computeHashForOperationAndDoIncrementalRehash(key, namespace);
final StateMapEntry<K, N, S>[] tab = selectActiveTable(hash);
int index = hash & (tab.length - 1);
for (StateMapEntry<K, N, S> e = tab[index]; e != null; e = e.next) {
if (e.hash == hash && key.equals(e.key) && namespace.equals(e.namespace)) {
// copy-on-write check for entry
if (e.entryVersion < highestRequiredSnapshotVersion) {
e = handleChainedEntryCopyOnWrite(tab, index, e);
}
return e;
}
}
++modCount;
if (size() > threshold) {
doubleCapacity();
}
return addNewStateMapEntry(tab, key, namespace, hash);
} | 3.68 |
open-banking-gateway_FintechRegistrar_registerFintech | /**
* Register Fintech in the OBG database.
* @param fintechId Fintech ID to register
* @param finTechPassword Fintechs' KeyStore password
* @return Newly created FinTech
*/
@Transactional
public Fintech registerFintech(String fintechId, Supplier<char[]> finTechPassword) {
Fintech fintech = fintechRepository.save(Fintech.builder().globalId(fintechId).build());
fintechSecureStorage.registerFintech(fintech, finTechPassword);
for (int i = 0; i < fintechOnlyKeyPairConfig.getPairCount(); ++i) {
UUID id = UUID.randomUUID();
KeyPair pair = fintechOnlyEncryptionServiceProvider.generateKeyPair();
fintechSecureStorage.fintechOnlyPrvKeyToPrivate(id, new PubAndPrivKey(pair.getPublic(), pair.getPrivate()), fintech, finTechPassword);
FintechPubKey pubKey = FintechPubKey.builder()
.prvKey(entityManager.find(FintechPrvKey.class, id))
.build();
pubKey.setKey(pair.getPublic());
pubKeyRepository.save(pubKey);
}
return fintech;
} | 3.68 |
framework_FilesystemContainer_getChildren | /*
* Gets the ID's of all Items who are children of the specified Item. Don't
* add a JavaDoc comment here, we use the default documentation from
* implemented interface.
*/
@Override
public Collection<File> getChildren(Object itemId) {
if (!(itemId instanceof File)) {
return Collections.unmodifiableCollection(new LinkedList<File>());
}
File[] f;
if (filter != null) {
f = ((File) itemId).listFiles(filter);
} else {
f = ((File) itemId).listFiles();
}
if (f == null) {
return Collections.unmodifiableCollection(new LinkedList<File>());
}
final List<File> l = Arrays.asList(f);
Collections.sort(l);
return Collections.unmodifiableCollection(l);
} | 3.68 |
hadoop_PageBlobFormatHelpers_toShort | /**
* Retrieves a short from the given two bytes.
*/
public static short toShort(byte firstByte, byte secondByte) {
return ByteBuffer.wrap(new byte[] { firstByte, secondByte })
.getShort();
} | 3.68 |
morf_MySqlDialect_alterTableDropColumnStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#alterTableDropColumnStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Column)
*/
@Override
public Collection<String> alterTableDropColumnStatements(Table table, Column column) {
List<String> result = new ArrayList<>();
StringBuilder statement = new StringBuilder().append("ALTER TABLE `").append(table.getName()).append("` ")
.append("DROP").append(' ');
statement.append('`').append(column.getName()).append('`');
result.add(statement.toString());
return result;
} | 3.68 |
hbase_ScannerModel_setMaxVersions | /**
* @param maxVersions maximum number of versions to return
*/
public void setMaxVersions(int maxVersions) {
this.maxVersions = maxVersions;
} | 3.68 |
hudi_InternalSchemaChangeApplier_applyColumnTypeChange | /**
* Update col type for hudi table.
*
* @param colName col name to be changed. if we want to change col from a nested filed, the fullName should be specify
* @param newType .
*/
public InternalSchema applyColumnTypeChange(String colName, Type newType) {
TableChanges.ColumnUpdateChange updateChange = TableChanges.ColumnUpdateChange.get(latestSchema);
updateChange.updateColumnType(colName, newType);
return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, updateChange);
} | 3.68 |
hbase_SaslServerAuthenticationProviders_getSimpleProvider | /**
* Extracts the SIMPLE authentication provider.
*/
public SaslServerAuthenticationProvider getSimpleProvider() {
Optional<SaslServerAuthenticationProvider> opt = providers.values().stream()
.filter((p) -> p instanceof SimpleSaslServerAuthenticationProvider).findFirst();
if (!opt.isPresent()) {
throw new RuntimeException("SIMPLE authentication provider not available when it should be");
}
return opt.get();
} | 3.68 |
hadoop_HsCountersPage_postHead | /*
* (non-Javadoc)
* @see org.apache.hadoop.yarn.webapp.view.TwoColumnLayout#postHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void postHead(Page.HTML<__> html) {
html.
style("#counters, .dt-counters { table-layout: fixed }",
"#counters th { overflow: hidden; vertical-align: middle }",
"#counters .dataTables_wrapper { min-height: 1em }",
"#counters .group { width: 15em }",
"#counters .name { width: 30em }");
} | 3.68 |
morf_DatabaseMetaDataProvider_loadAllViewNames | /**
* Creates a map of all view names,
* indexed by their case-agnostic names.
*
* @return Map of real view names.
*/
protected Map<AName, RealName> loadAllViewNames() {
final ImmutableMap.Builder<AName, RealName> viewNameMappings = ImmutableMap.builder();
try {
final DatabaseMetaData databaseMetaData = connection.getMetaData();
try (ResultSet viewResultSet = databaseMetaData.getTables(null, schemaName, null, tableTypesForViews())) {
while (viewResultSet.next()) {
RealName viewName = readViewName(viewResultSet);
if (isSystemView(viewName) || isIgnoredView(viewName)){
if (log.isDebugEnabled()) {
log.debug("Skipped system/ignored view [" + viewName + "]" );
}
} else {
if (log.isDebugEnabled()) {
log.debug("Found view [" + viewName + "]");
}
viewNameMappings.put(viewName, viewName);
}
}
return viewNameMappings.build();
}
} catch (SQLException e) {
throw new RuntimeSqlException("Error reading metadata for views", e);
}
} | 3.68 |
hbase_ParseFilter_convertByteArrayToBoolean | /**
* Converts a boolean expressed in a byte array to an actual boolean
* <p>
* This doesn't used Bytes.toBoolean because Bytes.toBoolean(byte []) assumes that 1 stands for
* true and 0 for false. Here, the byte array representing "true" and "false" is parsed
* <p>
* @param booleanAsByteArray the boolean value expressed as a byte array
* @return the boolean value
*/
public static boolean convertByteArrayToBoolean(byte[] booleanAsByteArray) {
if (booleanAsByteArray == null) {
throw new IllegalArgumentException("convertByteArrayToBoolean called with a null array");
}
if (
booleanAsByteArray.length == 4
&& (booleanAsByteArray[0] == 't' || booleanAsByteArray[0] == 'T')
&& (booleanAsByteArray[1] == 'r' || booleanAsByteArray[1] == 'R')
&& (booleanAsByteArray[2] == 'u' || booleanAsByteArray[2] == 'U')
&& (booleanAsByteArray[3] == 'e' || booleanAsByteArray[3] == 'E')
) {
return true;
} else if (
booleanAsByteArray.length == 5
&& (booleanAsByteArray[0] == 'f' || booleanAsByteArray[0] == 'F')
&& (booleanAsByteArray[1] == 'a' || booleanAsByteArray[1] == 'A')
&& (booleanAsByteArray[2] == 'l' || booleanAsByteArray[2] == 'L')
&& (booleanAsByteArray[3] == 's' || booleanAsByteArray[3] == 'S')
&& (booleanAsByteArray[4] == 'e' || booleanAsByteArray[4] == 'E')
) {
return false;
} else {
throw new IllegalArgumentException("Incorrect Boolean Expression");
}
} | 3.68 |
hadoop_ClientThrottlingIntercept_errorReceivingResponse | /**
* Called when a network error occurs before the HTTP status and response
* headers are received. Client-side throttling uses this to collect metrics.
*
* @param event The connection, operation, and request state.
*/
public static void errorReceivingResponse(ErrorReceivingResponseEvent event) {
updateMetrics((HttpURLConnection) event.getConnectionObject(),
event.getRequestResult());
} | 3.68 |
querydsl_SurfaceExpression_centroid | /**
* The mathematical centroid for this Surface as a Point. The result is not guaranteed to
* be on this Surface.
*
* @return centroid
*/
public PointExpression<Point> centroid() {
if (centroid == null) {
centroid = GeometryExpressions.pointOperation(SpatialOps.CENTROID, mixin);
}
return centroid;
} | 3.68 |
framework_VAbstractTextualDate_setISODate | /**
* Sets the value of the date field as a locale independent ISO date
* (yyyy-MM-dd'T'HH:mm:ss or yyyy-MM-dd depending on whether this is a date
* field or a date and time field).
*
* @param isoDate
* the date to set in ISO8601 format, or null to clear the date
* value
* @since 8.1
*/
public void setISODate(String isoDate) {
Date date = null;
if (isoDate != null) {
date = getIsoFormatter().parse(isoDate);
}
setDate(date);
updateBufferedResolutions();
sendBufferedValues();
} | 3.68 |
hbase_HRegion_rewriteCellTags | /**
* Possibly rewrite incoming cell tags.
*/
private void rewriteCellTags(Map<byte[], List<Cell>> familyMap, final Mutation m) {
// Check if we have any work to do and early out otherwise
// Update these checks as more logic is added here
if (m.getTTL() == Long.MAX_VALUE) {
return;
}
// From this point we know we have some work to do
for (Map.Entry<byte[], List<Cell>> e : familyMap.entrySet()) {
List<Cell> cells = e.getValue();
assert cells instanceof RandomAccess;
int listSize = cells.size();
for (int i = 0; i < listSize; i++) {
Cell cell = cells.get(i);
List<Tag> newTags = TagUtil.carryForwardTags(null, cell);
newTags = TagUtil.carryForwardTTLTag(newTags, m.getTTL());
// Rewrite the cell with the updated set of tags
cells.set(i, PrivateCellUtil.createCell(cell, newTags));
}
}
} | 3.68 |
morf_AbstractSqlDialectTest_testAddStringColumn | /**
* Test adding a string column.
*/
@Test
public void testAddStringColumn() {
testAlterTableColumn(AlterationType.ADD, column("stringField_new", DataType.STRING, 6).nullable(), expectedAlterTableAddStringColumnStatement());
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.