name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_KeyValue_getQualifierArray | /**
* Returns the backing array of the entire KeyValue (all KeyValue fields are in a single array)
*/
@Override
public byte[] getQualifierArray() {
return bytes;
} | 3.68 |
framework_AbstractInMemoryContainer_doFilterContainer | /**
* Filters the data in the container and updates internal data structures.
* This method should reset any internal data structures and then repopulate
* them so {@link #getItemIds()} and other methods only return the filtered
* items.
*
* @param hasFilters
* true if filters has been set for the container, false
* otherwise
* @return true if the item set has changed as a result of the filtering
*/
protected boolean doFilterContainer(boolean hasFilters) {
if (!hasFilters) {
boolean changed = getAllItemIds().size() != getVisibleItemIds()
.size();
setFilteredItemIds(null);
return changed;
}
// Reset filtered list
List<ITEMIDTYPE> originalFilteredItemIds = getFilteredItemIds();
boolean wasUnfiltered = false;
if (originalFilteredItemIds == null) {
originalFilteredItemIds = Collections.emptyList();
wasUnfiltered = true;
}
setFilteredItemIds(new ListSet<ITEMIDTYPE>());
// Filter
boolean equal = true;
Iterator<ITEMIDTYPE> origIt = originalFilteredItemIds.iterator();
for (final ITEMIDTYPE id : getAllItemIds()) {
if (passesFilters(id)) {
// filtered list comes from the full list, can use ==
equal = equal && origIt.hasNext() && origIt.next() == id;
getFilteredItemIds().add(id);
}
}
return (wasUnfiltered && !getAllItemIds().isEmpty()) || !equal
|| origIt.hasNext();
} | 3.68 |
Activiti_ProcessEngines_unregister | /**
* Unregisters the given process engine.
*/
public static void unregister(ProcessEngine processEngine) {
processEngines.remove(processEngine.getName());
} | 3.68 |
framework_DateCell_recalculateTimeBarPosition | // date methods are not deprecated in GWT
@SuppressWarnings("deprecation")
private void recalculateTimeBarPosition() {
int h = today.getHours();
int m = today.getMinutes();
if (h >= firstHour && h <= lastHour) {
int pixelTop = weekgrid.getPixelTopFor(m + 60 * h);
todaybar.getStyle().clearDisplay();
todaybar.getStyle().setTop(pixelTop, Unit.PX);
} else {
todaybar.getStyle().setDisplay(Display.NONE);
}
} | 3.68 |
morf_SqlDialect_addIndexStatements | /**
* Generates the SQL to add an index to an existing table.
*
* @param table The existing table.
* @param index The new index being added.
* @return A collection of SQL statements.
*/
public Collection<String> addIndexStatements(Table table, Index index) {
return indexDeploymentStatements(table, index);
} | 3.68 |
flink_AbstractStreamOperatorV2_initializeState | /**
* Stream operators with state which can be restored need to override this hook method.
*
* @param context context that allows to register different states.
*/
@Override
public void initializeState(StateInitializationContext context) throws Exception {} | 3.68 |
hadoop_RollingFileSystemSink_getRollInterval | /**
* Extract the roll interval from the configuration and return it in
* milliseconds.
*
* @return the roll interval in millis
*/
@VisibleForTesting
protected long getRollInterval() {
String rollInterval =
properties.getString(ROLL_INTERVAL_KEY, DEFAULT_ROLL_INTERVAL);
Pattern pattern = Pattern.compile("^\\s*(\\d+)\\s*([A-Za-z]*)\\s*$");
Matcher match = pattern.matcher(rollInterval);
long millis;
if (match.matches()) {
String flushUnit = match.group(2);
int rollIntervalInt;
try {
rollIntervalInt = Integer.parseInt(match.group(1));
} catch (NumberFormatException ex) {
throw new MetricsException("Unrecognized flush interval: "
+ rollInterval + ". Must be a number followed by an optional "
+ "unit. The unit must be one of: minute, hour, day", ex);
}
if ("".equals(flushUnit)) {
millis = TimeUnit.HOURS.toMillis(rollIntervalInt);
} else {
switch (flushUnit.toLowerCase()) {
case "m":
case "min":
case "minute":
case "minutes":
millis = TimeUnit.MINUTES.toMillis(rollIntervalInt);
break;
case "h":
case "hr":
case "hour":
case "hours":
millis = TimeUnit.HOURS.toMillis(rollIntervalInt);
break;
case "d":
case "day":
case "days":
millis = TimeUnit.DAYS.toMillis(rollIntervalInt);
break;
default:
throw new MetricsException("Unrecognized unit for flush interval: "
+ flushUnit + ". Must be one of: minute, hour, day");
}
}
} else {
throw new MetricsException("Unrecognized flush interval: "
+ rollInterval + ". Must be a number followed by an optional unit."
+ " The unit must be one of: minute, hour, day");
}
if (millis < 60000) {
throw new MetricsException("The flush interval property must be "
+ "at least 1 minute. Value was " + rollInterval);
}
return millis;
} | 3.68 |
flink_PlannerContext_getSqlToRelConverterConfig | /**
* Returns the {@link SqlToRelConverter} config.
*
* <p>`expand` is set as false, and each sub-query becomes a
* [[org.apache.calcite.rex.RexSubQuery]].
*/
private SqlToRelConverter.Config getSqlToRelConverterConfig() {
return JavaScalaConversionUtil.<SqlToRelConverter.Config>toJava(
getCalciteConfig().getSqlToRelConverterConfig())
.orElseGet(
() -> {
SqlToRelConverter.Config config =
SqlToRelConverter.config()
.withTrimUnusedFields(false)
.withHintStrategyTable(
FlinkHintStrategies.createHintStrategyTable())
.withInSubQueryThreshold(Integer.MAX_VALUE)
.withExpand(false)
.withRelBuilderFactory(
FlinkRelFactories.FLINK_REL_BUILDER());
// disable project merge in sql2rel phase, let it done by the optimizer
boolean mergeProjectsDuringSqlToRel =
context.getTableConfig()
.getConfiguration()
.getBoolean(
OptimizerConfigOptions
.TABLE_OPTIMIZER_SQL2REL_PROJECT_MERGE_ENABLED);
if (!mergeProjectsDuringSqlToRel) {
config = config.addRelBuilderConfigTransform(c -> c.withBloat(-1));
}
return config;
});
} | 3.68 |
morf_SqlDialect_buildSQLToStopTracing | /**
* @return Sql required to turn on tracing, or null if tracing is not
* supported.
*/
public List<String> buildSQLToStopTracing() {
return null;
} | 3.68 |
hadoop_AggregateAppResourceUsage_getMemorySeconds | /**
* @return the memorySeconds
*/
public long getMemorySeconds() {
return RMServerUtils.getOrDefault(resourceSecondsMap,
ResourceInformation.MEMORY_MB.getName(), 0L);
} | 3.68 |
hudi_HoodieBaseFileGroupRecordBuffer_doProcessNextDataRecord | /**
* Merge two log data records if needed.
*
* @param record
* @param metadata
* @param existingRecordMetadataPair
* @return
* @throws IOException
*/
protected Option<Pair<T, Map<String, Object>>> doProcessNextDataRecord(T record,
Map<String, Object> metadata,
Pair<Option<T>, Map<String, Object>> existingRecordMetadataPair) throws IOException {
if (existingRecordMetadataPair != null) {
// Merge and store the combined record
// Note that the incoming `record` is from an older commit, so it should be put as
// the `older` in the merge API
Option<Pair<HoodieRecord, Schema>> combinedRecordAndSchemaOpt = enablePartialMerging
? recordMerger.partialMerge(
readerContext.constructHoodieRecord(Option.of(record), metadata),
(Schema) metadata.get(INTERNAL_META_SCHEMA),
readerContext.constructHoodieRecord(
existingRecordMetadataPair.getLeft(), existingRecordMetadataPair.getRight()),
(Schema) existingRecordMetadataPair.getRight().get(INTERNAL_META_SCHEMA),
readerSchema,
payloadProps)
: recordMerger.merge(
readerContext.constructHoodieRecord(Option.of(record), metadata),
(Schema) metadata.get(INTERNAL_META_SCHEMA),
readerContext.constructHoodieRecord(
existingRecordMetadataPair.getLeft(), existingRecordMetadataPair.getRight()),
(Schema) existingRecordMetadataPair.getRight().get(INTERNAL_META_SCHEMA),
payloadProps);
if (!combinedRecordAndSchemaOpt.isPresent()) {
return Option.empty();
}
Pair<HoodieRecord, Schema> combinedRecordAndSchema = combinedRecordAndSchemaOpt.get();
HoodieRecord<T> combinedRecord = combinedRecordAndSchema.getLeft();
// If pre-combine returns existing record, no need to update it
if (combinedRecord.getData() != existingRecordMetadataPair.getLeft().get()) {
return Option.of(Pair.of(
combinedRecord.getData(),
enablePartialMerging
? readerContext.updateSchemaAndResetOrderingValInMetadata(metadata, combinedRecordAndSchema.getRight())
: metadata));
}
return Option.empty();
} else {
// Put the record as is
// NOTE: Record have to be cloned here to make sure if it holds low-level engine-specific
// payload pointing into a shared, mutable (underlying) buffer we get a clean copy of
// it since these records will be put into records(Map).
return Option.of(Pair.of(record, metadata));
}
} | 3.68 |
flink_SuperstepBarrier_waitForOtherWorkers | /** Wait on the barrier. */
public void waitForOtherWorkers() throws InterruptedException {
latch.await();
} | 3.68 |
hbase_SimpleRegionNormalizer_computeMergeNormalizationPlans | /**
* Computes the merge plans that should be executed for this table to converge average region
* towards target average or target region count.
*/
private List<NormalizationPlan> computeMergeNormalizationPlans(final NormalizeContext ctx) {
final NormalizerConfiguration configuration = normalizerConfiguration;
if (ctx.getTableRegions().size() < configuration.getMergeMinRegionCount(ctx)) {
LOG.debug(
"Table {} has {} regions, required min number of regions for normalizer to run"
+ " is {}, not computing merge plans.",
ctx.getTableName(), ctx.getTableRegions().size(), configuration.getMergeMinRegionCount());
return Collections.emptyList();
}
final long avgRegionSizeMb = (long) ctx.getAverageRegionSizeMb();
if (avgRegionSizeMb < configuration.getMergeMinRegionSizeMb(ctx)) {
return Collections.emptyList();
}
LOG.debug("Computing normalization plan for table {}. average region size: {} MB, number of"
+ " regions: {}.", ctx.getTableName(), avgRegionSizeMb, ctx.getTableRegions().size());
// this nested loop walks the table's region chain once, looking for contiguous sequences of
// regions that meet the criteria for merge. The outer loop tracks the starting point of the
// next sequence, the inner loop looks for the end of that sequence. A single sequence becomes
// an instance of MergeNormalizationPlan.
final List<NormalizationPlan> plans = new LinkedList<>();
final List<NormalizationTarget> rangeMembers = new LinkedList<>();
long sumRangeMembersSizeMb;
int current = 0;
for (int rangeStart = 0; rangeStart < ctx.getTableRegions().size() - 1
&& current < ctx.getTableRegions().size();) {
// walk the region chain looking for contiguous sequences of regions that can be merged.
rangeMembers.clear();
sumRangeMembersSizeMb = 0;
for (current = rangeStart; current < ctx.getTableRegions().size(); current++) {
final RegionInfo regionInfo = ctx.getTableRegions().get(current);
final long regionSizeMb = getRegionSizeMB(regionInfo);
if (skipForMerge(configuration, ctx, regionInfo)) {
// this region cannot participate in a range. resume the outer loop.
rangeStart = Math.max(current, rangeStart + 1);
break;
}
if (
// when there are no range members, seed the range with whatever we have. this way we're
// prepared in case the next region is 0-size.
rangeMembers.isEmpty()
// when there is only one region and the size is 0, seed the range with whatever we
// have.
|| (rangeMembers.size() == 1 && sumRangeMembersSizeMb == 0)
// add an empty region to the current range only if it doesn't exceed max merge request
// region count
|| (regionSizeMb == 0 && rangeMembers.size() < getMergeRequestMaxNumberOfRegionsCount())
// add region if current range region size is less than avg region size of table
// and current range doesn't exceed max merge request region count
|| ((regionSizeMb + sumRangeMembersSizeMb <= avgRegionSizeMb)
&& (rangeMembers.size() < getMergeRequestMaxNumberOfRegionsCount()))
) {
// add the current region to the range when there's capacity remaining.
rangeMembers.add(new NormalizationTarget(regionInfo, regionSizeMb));
sumRangeMembersSizeMb += regionSizeMb;
continue;
}
// we have accumulated enough regions to fill a range. resume the outer loop.
rangeStart = Math.max(current, rangeStart + 1);
break;
}
if (rangeMembers.size() > 1) {
plans.add(new MergeNormalizationPlan.Builder().setTargets(rangeMembers).build());
}
}
return plans;
} | 3.68 |
cron-utils_FieldQuestionMarkDefinitionBuilder_supportsQuestionMark | /**
* Registers the field supports the LW (LW) special char.
*
* @return this FieldSpecialCharsDefinitionBuilder instance
*/
public FieldQuestionMarkDefinitionBuilder supportsQuestionMark() {
constraints.addQuestionMarkSupport();
return this;
} | 3.68 |
hadoop_DynoInfraUtils_waitForAndGetNameNodeProperties | /**
* Get the set of properties representing information about the launched
* NameNode. This method will wait for the information to be available until
* it is interrupted, or {@code shouldExit} returns true. It polls for a file
* present at {@code nameNodeInfoPath} once a second and uses that file to
* load the NameNode information.
*
* @param shouldExit Should return true iff this should stop waiting.
* @param conf The configuration.
* @param nameNodeInfoPath The path at which to expect the NameNode
* information file to be present.
* @param log Where to log information.
* @return Absent if this exited prematurely (i.e. due to {@code shouldExit}),
* else returns a set of properties representing information about the
* launched NameNode.
*/
static Optional<Properties> waitForAndGetNameNodeProperties(
Supplier<Boolean> shouldExit, Configuration conf, Path nameNodeInfoPath,
Logger log) throws IOException, InterruptedException {
while (!shouldExit.get()) {
try (FSDataInputStream nnInfoInputStream = nameNodeInfoPath
.getFileSystem(conf).open(nameNodeInfoPath)) {
Properties nameNodeProperties = new Properties();
nameNodeProperties.load(nnInfoInputStream);
return Optional.of(nameNodeProperties);
} catch (FileNotFoundException fnfe) {
log.debug("NameNode host information not yet available");
Thread.sleep(1000);
} catch (IOException ioe) {
log.warn("Unable to fetch NameNode host information; retrying", ioe);
Thread.sleep(1000);
}
}
return Optional.empty();
} | 3.68 |
hbase_PermissionStorage_writePermissionsAsBytes | /**
* Writes a set of permissions as {@link org.apache.hadoop.io.Writable} instances and returns the
* resulting byte array. Writes a set of permission [user: table permission]
*/
public static byte[] writePermissionsAsBytes(ListMultimap<String, UserPermission> perms,
Configuration conf) {
return ProtobufUtil
.prependPBMagic(AccessControlUtil.toUserTablePermissions(perms).toByteArray());
} | 3.68 |
hibernate-validator_ParametersMethodOverrideCheck_getEnclosingTypeElementQualifiedNames | /**
* Provides a formatted string containing qualified names of enclosing types of provided methods.
*
* @param methods a collection of methods to convert to string of qualified names of enclosing types
* @return string of qualified names of enclosing types
*/
private String getEnclosingTypeElementQualifiedNames(Set<ExecutableElement> methods) {
List<String> enclosingTypeElementQualifiedNames = CollectionHelper.newArrayList();
for ( ExecutableElement method : methods ) {
enclosingTypeElementQualifiedNames.add( getEnclosingTypeElementQualifiedName( method ) );
}
Collections.sort( enclosingTypeElementQualifiedNames );
return StringHelper.join( enclosingTypeElementQualifiedNames, ", " );
} | 3.68 |
hadoop_CommonAuditContext_noteEntryPoint | /**
* Add the entry point as a context entry with the key
* {@link AuditConstants#PARAM_COMMAND}
* if it has not already been recorded.
* This is called via ToolRunner but may be used at any
* other entry point.
* @param tool object loaded/being launched.
*/
public static void noteEntryPoint(Object tool) {
if (tool != null && !GLOBAL_CONTEXT_MAP.containsKey(PARAM_COMMAND)) {
String classname = tool.getClass().toString();
int lastDot = classname.lastIndexOf('.');
int l = classname.length();
if (lastDot > 0 && lastDot < (l - 1)) {
String name = classname.substring(lastDot + 1, l);
setGlobalContextEntry(PARAM_COMMAND, name);
}
}
} | 3.68 |
flink_InternalWindowProcessFunction_close | /**
* The tear-down method of the function. It is called after the last call to the main working
* methods.
*/
public void close() throws Exception {} | 3.68 |
framework_Table_setColumnHeaderMode | /**
* Setter for property columnHeaderMode.
*
* @param columnHeaderMode
* the New value of property columnHeaderMode.
*/
public void setColumnHeaderMode(ColumnHeaderMode columnHeaderMode) {
if (columnHeaderMode == null) {
throw new IllegalArgumentException(
"Column header mode can not be null");
}
if (columnHeaderMode != this.columnHeaderMode) {
this.columnHeaderMode = columnHeaderMode;
markAsDirty();
}
} | 3.68 |
morf_SqlDialect_renameTableStatements | /**
* Creates SQL to rename a table.
*
* @param from - table to rename
* @param to - table with new name
* @return SQL statements required to change a table name.
*/
public Collection<String> renameTableStatements(Table from, Table to) {
return ImmutableList.of("ALTER TABLE " + schemaNamePrefix(from) + from.getName() + " RENAME TO " + to.getName());
} | 3.68 |
flink_FailureEnricherUtils_labelFailure | /**
* Enriches a Throwable by returning the merged label output of a Set of FailureEnrichers.
*
* @param cause the Throwable to label
* @param context the context of the Throwable
* @param mainThreadExecutor the executor to complete the enricher labeling on
* @param failureEnrichers a collection of FailureEnrichers to enrich the context with
* @return a CompletableFuture that will complete with a map of labels
*/
public static CompletableFuture<Map<String, String>> labelFailure(
final Throwable cause,
final Context context,
final Executor mainThreadExecutor,
final Collection<FailureEnricher> failureEnrichers) {
// list of CompletableFutures to enrich failure with labels from each enricher
final Collection<CompletableFuture<Map<String, String>>> enrichFutures = new ArrayList<>();
for (final FailureEnricher enricher : failureEnrichers) {
enrichFutures.add(
enricher.processFailure(cause, context)
.thenApply(
enricherLabels -> {
final Map<String, String> validLabels = new HashMap<>();
enricherLabels.forEach(
(k, v) -> {
if (!enricher.getOutputKeys().contains(k)) {
LOG.warn(
"Ignoring label with key {} from enricher {}"
+ " violating contract, keys allowed {}.",
k,
enricher.getClass(),
enricher.getOutputKeys());
} else {
validLabels.put(k, v);
}
});
return validLabels;
})
.exceptionally(
t -> {
LOG.warn(
"Enricher {} threw an exception.",
enricher.getClass(),
t);
return Collections.emptyMap();
}));
}
// combine all CompletableFutures into a single CompletableFuture containing a Map of labels
return FutureUtils.combineAll(enrichFutures)
.thenApplyAsync(
labelsToMerge -> {
final Map<String, String> mergedLabels = new HashMap<>();
for (Map<String, String> labels : labelsToMerge) {
labels.forEach(
(k, v) ->
// merge label with existing, throwing an exception
// if there is a key conflict
mergedLabels.merge(
k,
v,
(first, second) -> {
throw new FlinkRuntimeException(
String.format(
MERGE_EXCEPTION_MSG,
k));
}));
}
return mergedLabels;
},
mainThreadExecutor);
} | 3.68 |
Activiti_SpringAsyncExecutor_setTaskExecutor | /**
* Required spring injected {@link TaskExecutor} implementation that will be used to execute runnable jobs.
*
* @param taskExecutor
*/
public void setTaskExecutor(TaskExecutor taskExecutor) {
this.taskExecutor = taskExecutor;
} | 3.68 |
hadoop_ContentCounts_getDirectoryCount | // Get the number of directories.
public long getDirectoryCount() {
return contents.get(Content.DIRECTORY);
} | 3.68 |
streampipes_JdbcClient_connectWithSSL | /**
* WIP
*
* @param host
* @param port
* @param databaseName
* @throws SpRuntimeException
*/
private void connectWithSSL(String host, int port, String databaseName) throws SpRuntimeException {
String url =
"jdbc:" + this.dbDescription.getEngine().getUrlName() + "://" + host + ":" + port + "/" + databaseName
+ "?user="
+ this.dbDescription.getUsername() + "&password=" + this.dbDescription.getPassword()
+ "&ssl=true&sslfactory=" + this.dbDescription.getSslFactory() + "&sslmode=require";
try {
connection = DriverManager.getConnection(url);
ensureDatabaseExists(databaseName);
ensureTableExists(url, "");
} catch (SQLException e) {
throw new SpRuntimeException("Could not establish a connection with the server: " + e.getMessage());
}
} | 3.68 |
hbase_BalanceAction_undoAction | /**
* Returns an Action which would undo this action
*/
BalanceAction undoAction() {
return this;
} | 3.68 |
framework_Color_HSVtoRGB | /**
* <p>
* Converts HSV's hue, saturation and value into an RGB value.
* <p>
* The <code>saturation</code> and <code>value</code> components should be
* floating-point values within the range [0.0-1.0].
* <p>
*
* @param hue
* the hue of the color
* @param saturation
* the saturation of the color
* @param value
* the value of the color
* @return the RGB value of corresponding color
*/
public static int HSVtoRGB(float hue, float saturation, float value) {
int red = 0;
int green = 0;
int blue = 0;
if (saturation == 0) {
red = green = blue = (int) (value * 255.0f + 0.5f);
} else {
float h = (hue - (float) Math.floor(hue)) * 6.0f;
float f = h - (float) Math.floor(h);
float p = value * (1.0f - saturation);
float q = value * (1.0f - saturation * f);
float t = value * (1.0f - (saturation * (1.0f - f)));
switch ((int) h) {
case 0:
red = (int) (value * 255.0f + 0.5f);
green = (int) (t * 255.0f + 0.5f);
blue = (int) (p * 255.0f + 0.5f);
break;
case 1:
red = (int) (q * 255.0f + 0.5f);
green = (int) (value * 255.0f + 0.5f);
blue = (int) (p * 255.0f + 0.5f);
break;
case 2:
red = (int) (p * 255.0f + 0.5f);
green = (int) (value * 255.0f + 0.5f);
blue = (int) (t * 255.0f + 0.5f);
break;
case 3:
red = (int) (p * 255.0f + 0.5f);
green = (int) (q * 255.0f + 0.5f);
blue = (int) (value * 255.0f + 0.5f);
break;
case 4:
red = (int) (t * 255.0f + 0.5f);
green = (int) (p * 255.0f + 0.5f);
blue = (int) (value * 255.0f + 0.5f);
break;
case 5:
red = (int) (value * 255.0f + 0.5f);
green = (int) (p * 255.0f + 0.5f);
blue = (int) (q * 255.0f + 0.5f);
break;
}
}
return 0xff000000 | (red << 16) | (green << 8) | (blue << 0);
} | 3.68 |
framework_VCustomLayout_updateCaption | /**
* Update caption for the given child connector.
*
* @param childConnector
* the child connector whose caption should be updated
*/
public void updateCaption(ComponentConnector childConnector) {
Widget widget = childConnector.getWidget();
if (!widget.isAttached()) {
// Widget has not been added because the location was not found
return;
}
VCaptionWrapper wrapper = childWidgetToCaptionWrapper.get(widget);
if (VCaption.isNeeded(childConnector)) {
if (wrapper == null) {
// Add a wrapper between the layout and the child widget
final String loc = getLocation(widget);
super.remove(widget);
wrapper = new VCaptionWrapper(childConnector, client);
super.add(wrapper, locationToElement.get(loc));
childWidgetToCaptionWrapper.put(widget, wrapper);
}
wrapper.updateCaption();
} else {
if (wrapper != null) {
// Remove the wrapper and add the widget directly to the layout
final String loc = getLocation(widget);
super.remove(wrapper);
super.add(widget, locationToElement.get(loc));
childWidgetToCaptionWrapper.remove(widget);
}
}
} | 3.68 |
hbase_FSTableDescriptors_formatTableInfoSequenceId | /**
* @param number Number to use as suffix.
* @return Returns zero-prefixed decimal version of passed number (Does absolute in case number is
* negative).
*/
private static String formatTableInfoSequenceId(final int number) {
byte[] b = new byte[WIDTH_OF_SEQUENCE_ID];
int d = Math.abs(number);
for (int i = b.length - 1; i >= 0; i--) {
b[i] = (byte) ((d % 10) + '0');
d /= 10;
}
return Bytes.toString(b);
} | 3.68 |
dubbo_ConverterUtil_getConverter | /**
* Get the Converter instance from {@link ExtensionLoader} with the specified source and target type
*
* @param sourceType the source type
* @param targetType the target type
* @return
* @see ExtensionLoader#getSupportedExtensionInstances()
*/
public Converter<?, ?> getConverter(Class<?> sourceType, Class<?> targetType) {
ConcurrentMap<Class<?>, List<Converter>> toTargetMap =
ConcurrentHashMapUtils.computeIfAbsent(converterCache, sourceType, (k) -> new ConcurrentHashMap<>());
List<Converter> converters = ConcurrentHashMapUtils.computeIfAbsent(
toTargetMap,
targetType,
(k) -> frameworkModel.getExtensionLoader(Converter.class).getSupportedExtensionInstances().stream()
.filter(converter -> converter.accept(sourceType, targetType))
.collect(Collectors.toList()));
return converters.size() > 0 ? converters.get(0) : null;
} | 3.68 |
flink_ExecutionEnvironment_fromParallelCollection | // private helper for passing different call location names
private <X> DataSource<X> fromParallelCollection(
SplittableIterator<X> iterator, TypeInformation<X> type, String callLocationName) {
return new DataSource<>(
this, new ParallelIteratorInputFormat<>(iterator), type, callLocationName);
} | 3.68 |
hbase_ZKUtil_multiOrSequential | /**
* Use ZooKeeper's multi-update functionality. If all of the following are true: -
* runSequentialOnMultiFailure is true - on calling multi, we get a ZooKeeper exception that can
* be handled by a sequential call(*) Then: - we retry the operations one-by-one (sequentially)
* Note *: an example is receiving a NodeExistsException from a "create" call. Without multi, a
* user could call "createAndFailSilent" to ensure that a node exists if they don't care who
* actually created the node (i.e. the NodeExistsException from ZooKeeper is caught). This will
* cause all operations in the multi to fail, however, because the NodeExistsException that
* zk.create throws will fail the multi transaction. In this case, if the previous conditions
* hold, the commands are run sequentially, which should result in the correct final state, but
* means that the operations will not run atomically.
* @throws KeeperException if a ZooKeeper operation fails
*/
public static void multiOrSequential(ZKWatcher zkw, List<ZKUtilOp> ops,
boolean runSequentialOnMultiFailure) throws KeeperException {
if (ops == null) {
return;
}
if (useMultiWarn) { // Only check and warn at first use
if (zkw.getConfiguration().get("hbase.zookeeper.useMulti") != null) {
LOG.warn("hbase.zookeeper.useMulti is deprecated. Default to true always.");
}
useMultiWarn = false;
}
List<Op> zkOps = new LinkedList<>();
for (ZKUtilOp op : ops) {
zkOps.add(toZooKeeperOp(zkw, op));
}
try {
zkw.getRecoverableZooKeeper().multi(zkOps);
} catch (KeeperException ke) {
switch (ke.code()) {
case NODEEXISTS:
case NONODE:
case BADVERSION:
case NOAUTH:
case NOTEMPTY:
// if we get an exception that could be solved by running sequentially
// (and the client asked us to), then break out and run sequentially
if (runSequentialOnMultiFailure) {
LOG.info(
"multi exception: {}; running operations sequentially "
+ "(runSequentialOnMultiFailure=true); {}",
ke.toString(), ops.stream().map(o -> o.toString()).collect(Collectors.joining(",")));
processSequentially(zkw, ops);
break;
}
default:
throw ke;
}
} catch (InterruptedException ie) {
zkw.interruptedException(ie);
}
} | 3.68 |
flink_HiveParserTypeCheckCtx_getAllowStatefulFunctions | /** @return whether to allow stateful UDF invocations */
public boolean getAllowStatefulFunctions() {
return allowStatefulFunctions;
} | 3.68 |
hudi_CompactionAdminClient_getCompactionPlan | /**
* Construction Compaction Plan from compaction instant.
*/
private static HoodieCompactionPlan getCompactionPlan(HoodieTableMetaClient metaClient, String compactionInstant)
throws IOException {
return TimelineMetadataUtils.deserializeCompactionPlan(
metaClient.getActiveTimeline().readCompactionPlanAsBytes(
HoodieTimeline.getCompactionRequestedInstant(compactionInstant)).get());
} | 3.68 |
AreaShop_RegionGroup_getManualMembers | /**
* Get all manually added members of the group.
* @return A list with the names of all members of the group (immutable)
*/
public Set<String> getManualMembers() {
return new HashSet<>(regions);
} | 3.68 |
hbase_AbstractFSWAL_computeFilename | /**
* This is a convenience method that computes a new filename with a given file-number.
* @param filenum to use
*/
protected Path computeFilename(final long filenum) {
if (filenum < 0) {
throw new RuntimeException("WAL file number can't be < 0");
}
String child = walFilePrefix + WAL_FILE_NAME_DELIMITER + filenum + walFileSuffix;
return new Path(walDir, child);
} | 3.68 |
hbase_SpaceQuotaSnapshot_getPolicy | /**
* Returns the violation policy, which may be null. It is guaranteed to be non-null if
* {@link #isInViolation()} is {@code true}, but may be null otherwise.
*/
@Override
public Optional<SpaceViolationPolicy> getPolicy() {
return policy;
} | 3.68 |
rocketmq-connect_Sensor_record | /**
* record value
*
* @param value
*/
public void record(long value) {
recordInternal(value);
} | 3.68 |
framework_GridLayoutWithNonIntegerWidth_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 11775;
} | 3.68 |
hbase_RegionServerSpaceQuotaManager_buildFileArchiveRequest | /**
* Builds the protobuf message to inform the Master of files being archived.
* @param tn The table the files previously belonged to.
* @param archivedFiles The files and their size in bytes that were archived.
* @return The protobuf representation
*/
public RegionServerStatusProtos.FileArchiveNotificationRequest
buildFileArchiveRequest(TableName tn, Collection<Entry<String, Long>> archivedFiles) {
RegionServerStatusProtos.FileArchiveNotificationRequest.Builder builder =
RegionServerStatusProtos.FileArchiveNotificationRequest.newBuilder();
HBaseProtos.TableName protoTn = ProtobufUtil.toProtoTableName(tn);
for (Entry<String, Long> archivedFile : archivedFiles) {
RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize fws =
RegionServerStatusProtos.FileArchiveNotificationRequest.FileWithSize.newBuilder()
.setName(archivedFile.getKey()).setSize(archivedFile.getValue()).setTableName(protoTn)
.build();
builder.addArchivedFiles(fws);
}
final RegionServerStatusProtos.FileArchiveNotificationRequest request = builder.build();
if (LOG.isTraceEnabled()) {
LOG.trace("Reporting file archival to Master: " + TextFormat.shortDebugString(request));
}
return request;
} | 3.68 |
hbase_MasterRpcServices_isSnapshotDone | /**
* Checks if the specified snapshot is done.
* @return true if the snapshot is in file system ready to use, false if the snapshot is in the
* process of completing
* @throws ServiceException wrapping UnknownSnapshotException if invalid snapshot, or a wrapped
* HBaseSnapshotException with progress failure reason.
*/
@Override
public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
IsSnapshotDoneRequest request) throws ServiceException {
LOG.debug("Checking to see if snapshot from request:"
+ ClientSnapshotDescriptionUtils.toString(request.getSnapshot()) + " is done");
try {
server.checkInitialized();
IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder();
boolean done = server.snapshotManager.isSnapshotDone(request.getSnapshot());
builder.setDone(done);
return builder.build();
} catch (ForeignException e) {
throw new ServiceException(e.getCause());
} catch (IOException e) {
throw new ServiceException(e);
}
} | 3.68 |
flink_CatalogManager_getCurrentCatalog | /**
* Gets the current catalog that will be used when resolving table path.
*
* @return the current catalog
* @see CatalogManager#qualifyIdentifier(UnresolvedIdentifier)
*/
public String getCurrentCatalog() {
return currentCatalogName;
} | 3.68 |
hbase_MetaTableMetrics_getTableNameFromOp | /**
* Get table name from Ops such as: get, put, delete.
* @param op such as get, put or delete.
*/
private String getTableNameFromOp(Row op) {
final String tableRowKey = Bytes.toString(op.getRow());
if (StringUtils.isEmpty(tableRowKey)) {
return null;
}
final String[] splits = tableRowKey.split(",");
return splits.length > 0 ? splits[0] : null;
} | 3.68 |
hadoop_StagingCommitter_getConflictResolutionMode | /**
* Returns the {@link ConflictResolution} mode for this commit.
*
* @param context the JobContext for this commit
* @param fsConf filesystem config
* @return the ConflictResolution mode
*/
public final ConflictResolution getConflictResolutionMode(
JobContext context,
Configuration fsConf) {
if (conflictResolution == null) {
this.conflictResolution = ConflictResolution.valueOf(
getConfictModeOption(context, fsConf, DEFAULT_CONFLICT_MODE));
}
return conflictResolution;
} | 3.68 |
streampipes_StatementUtils_addLabel | /**
* Add a label to the input event according to the provided statements
*
* @param inputEvent
* @param value
* @param statements
* @return
*/
public static Event addLabel(Event inputEvent, String labelName, double value, List<Statement> statements) {
String label = getLabel(value, statements);
if (label != null) {
inputEvent.addField(labelName, label);
} else {
LOG.info("No condition of statements was fulfilled, add a default case (*) to the statements");
}
return inputEvent;
} | 3.68 |
hadoop_HostSet_match | /**
* The function that checks whether there exists an entry foo in the set
* so that addr <= foo.
*/
boolean match(InetSocketAddress addr) {
int port = addr.getPort();
Collection<Integer> ports = addrs.get(addr.getAddress());
boolean exactMatch = ports.contains(port);
boolean genericMatch = ports.contains(0);
return exactMatch || genericMatch;
} | 3.68 |
hbase_ForeignExceptionDispatcher_addListener | /**
* Listen for failures to a given process. This method should only be used during initialization
* and not added to after exceptions are accepted.
* @param errorable listener for the errors. may be null.
*/
public synchronized void addListener(ForeignExceptionListener errorable) {
this.listeners.add(errorable);
} | 3.68 |
hbase_Interns_info | /**
* Get a metric info object
* @return an interned metric info object
*/
public static MetricsInfo info(String name, String description) {
Map<String, MetricsInfo> map = infoCache.getUnchecked(name);
MetricsInfo info = map.get(description);
if (info == null) {
info = new MetricsInfoImpl(name, description);
map.put(description, info);
}
return info;
} | 3.68 |
framework_Table_addHeaderClickListener | /**
* Adds a header click listener which handles the click events when the user
* clicks on a column header cell in the Table.
* <p>
* The listener will receive events which contain information about which
* column was clicked and some details about the mouse event.
* </p>
*
* @param listener
* The handler which should handle the header click events.
*/
public void addHeaderClickListener(HeaderClickListener listener) {
addListener(TableConstants.HEADER_CLICK_EVENT_ID,
HeaderClickEvent.class, listener,
HeaderClickEvent.HEADER_CLICK_METHOD);
} | 3.68 |
hadoop_BaseService_getServiceConfig | /**
* Returns the service configuration properties. Property
* names are trimmed off from its prefix.
* <p>
* The sevice configuration properties are all properties
* with names starting with <code>#SERVER#.#SERVICE#.</code>
* in the server configuration.
*
* @return the service configuration properties with names
* trimmed off from their <code>#SERVER#.#SERVICE#.</code>
* prefix.
*/
protected Configuration getServiceConfig() {
return serviceConfig;
} | 3.68 |
cron-utils_FieldConstraintsBuilder_withStrictRange | /**
* With strict range.
*
* @return same FieldConstraintsBuilder instance
*/
public FieldConstraintsBuilder withStrictRange() {
this.strictRange = true;
return this;
} | 3.68 |
framework_PointerEvent_getTiltY | /**
* Gets the angle between the X-Z plane and the plane containing both the
* transducer and the X axis. A positive tilt is towards the user.
*
* @return the tilt along the Y axis as degrees in the range of [-90, 90],
* or 0 if the device does not support tilt
*/
public final double getTiltY() {
return getTiltY(getNativeEvent());
} | 3.68 |
hadoop_DecayRpcSchedulerDetailedMetrics_getQueueName | /**
* @return Returns the rate name inside the metric.
* @param priority input priority.
*/
public String getQueueName(int priority) {
return "DecayRPCSchedulerPriority."+priority+".RpcQueueTime";
} | 3.68 |
hadoop_IOUtilsClient_cleanupWithLogger | /**
* Close the Closeable objects and <b>ignore</b> any {@link IOException} or
* null pointers. Must only be used for cleanup in exception handlers.
*
* @param log the log to record problems to at debug level. Can be null.
* @param closeables the objects to close
*/
public static void cleanupWithLogger(Logger log,
java.io.Closeable... closeables) {
for (java.io.Closeable c : closeables) {
if (c != null) {
try {
c.close();
} catch(Throwable e) {
if (log != null && log.isDebugEnabled()) {
log.debug("Exception in closing " + c, e);
}
}
}
}
} | 3.68 |
flink_HiveParserDDLSemanticAnalyzer_validatePartitionValues | /**
* Certain partition values are are used by hive. e.g. the default partition in dynamic
* partitioning and the intermediate partition values used in the archiving process. Naturally,
* prohibit the user from creating partitions with these reserved values. The check that this
* function is more restrictive than the actual limitation, but it's simpler. Should be okay
* since the reserved names are fairly long and uncommon.
*/
private void validatePartitionValues(Map<String, String> partSpec) {
for (Map.Entry<String, String> e : partSpec.entrySet()) {
for (String s : reservedPartitionValues) {
String value = e.getValue();
if (value != null && value.contains(s)) {
throw new ValidationException(
ErrorMsg.RESERVED_PART_VAL.getMsg(
"(User value: "
+ e.getValue()
+ " Reserved substring: "
+ s
+ ")"));
}
}
}
} | 3.68 |
flink_TimestampedValue_getStreamRecord | /** Creates a {@link StreamRecord} from this TimestampedValue. */
public StreamRecord<T> getStreamRecord() {
StreamRecord<T> streamRecord = new StreamRecord<>(value);
if (hasTimestamp) {
streamRecord.setTimestamp(timestamp);
}
return streamRecord;
} | 3.68 |
hadoop_FederationRegistryClient_cleanAllApplications | /**
* For testing, delete all application records in registry.
*/
@VisibleForTesting
public synchronized void cleanAllApplications() {
try {
removeKeyRegistry(this.registry, this.user, getRegistryKey(null, null),
true, false);
} catch (YarnException e) {
LOG.warn("Unexpected exception from removeKeyRegistry", e);
}
} | 3.68 |
hbase_RegionPlacementMaintainer_getRegionAssignmentSnapshot | /** Returns the new RegionAssignmentSnapshot */
public SnapshotOfRegionAssignmentFromMeta getRegionAssignmentSnapshot() throws IOException {
SnapshotOfRegionAssignmentFromMeta currentAssignmentShapshot =
new SnapshotOfRegionAssignmentFromMeta(ConnectionFactory.createConnection(conf));
currentAssignmentShapshot.initialize();
return currentAssignmentShapshot;
} | 3.68 |
morf_DatabaseMetaDataProvider_getActualDefaultValue | /**
* Reads the actual default value in the database.
*
* @param tableName Name of the table.
* @param column Column builder to set to.
* @param columnResultSet Result set to be read.
* @return The default value, usually as an expression.
* @throws SQLException Upon errors.
*/
@SuppressWarnings("unused")
protected String getActualDefaultValue(RealName tableName, ColumnBuilder column, ResultSet columnResultSet) throws SQLException {
final String actualDefaultValue = columnResultSet.getString(COLUMN_DEFAULT_EXPR);
// columns that never had DEFAULT
if (actualDefaultValue == null)
return "";
final String trimedActualDefaultValue = actualDefaultValue.trim();
// columns that previously had DEFAULT and were set to DEFAULT NULL
if ("NULL".equalsIgnoreCase(trimedActualDefaultValue))
return "";
// other values returned with just a bit of trimming
// - note that these are Oracle expressions, not actual default values
// - simple decimals come back as decimals,
// - strings come back wrapped in single quotes,
// - functions come back as expressions,
// - as specified in the last alter statement
return trimedActualDefaultValue;
} | 3.68 |
hbase_RWQueueRpcExecutor_calcNumReaders | /*
* Calculate the number of readers based on the "total count" and the read share. You'll get at
* least one reader.
*/
private static int calcNumReaders(final int count, final float readShare) {
return count - calcNumWriters(count, readShare);
} | 3.68 |
streampipes_BoilerpipeHTMLContentHandler_recycle | /**
* Recycles this instance.
*/
public void recycle() {
tokenBuffer.setLength(0);
textBuffer.setLength(0);
inBody = 0;
inAnchor = 0;
inIgnorableElement = 0;
sbLastWasWhitespace = false;
textElementIdx = 0;
textBlocks.clear();
lastStartTag = null;
lastEndTag = null;
lastEvent = null;
offsetBlocks = 0;
currentContainedTextElements.clear();
flush = false;
inAnchorText = false;
} | 3.68 |
framework_DateField_getCalendar | /**
* Returns new instance calendar used in Date conversions.
*
* Returns new clone of the calendar object initialized using the the
* current date (if available)
*
* If this is no calendar is assigned the <code>Calendar.getInstance</code>
* is used.
*
* @return the Calendar.
* @see #setCalendar(Calendar)
*/
private Calendar getCalendar() {
// Makes sure we have an calendar instance
if (calendar == null) {
calendar = Calendar.getInstance();
// Start by a zeroed calendar to avoid having values for lower
// resolution variables e.g. time when resolution is day
int min, field;
for (Resolution r : Resolution
.getResolutionsLowerThan(resolution)) {
field = r.getCalendarField();
min = calendar.getActualMinimum(field);
calendar.set(field, min);
}
calendar.set(Calendar.MILLISECOND, 0);
}
// Clone the instance
final Calendar newCal = (Calendar) calendar.clone();
final TimeZone currentTimeZone = getTimeZone();
if (currentTimeZone != null) {
newCal.setTimeZone(currentTimeZone);
}
final Date currentDate = getValue();
if (currentDate != null) {
newCal.setTime(currentDate);
}
return newCal;
} | 3.68 |
hadoop_WasbTokenRenewer_isManaged | /**
* Checks if passed token is managed.
* @param token the token being checked
* @return true if it is managed.
* @throws IOException thrown when evaluating if token is managed.
*/
@Override
public boolean isManaged(Token<?> token) throws IOException {
return true;
} | 3.68 |
hadoop_Result_combine | /**
* Returns the combination of this and another result.
* @param other other.
* @return result.
*/
public Result combine(Result other) {
return new Result(this.isPass() && other.isPass(), this.isDescend()
&& other.isDescend());
} | 3.68 |
framework_Notification_getDelayMsec | /**
* Gets the delay before the notification disappears.
*
* @return the delay in milliseconds, {@value #DELAY_FOREVER} indicates the
* message has to be clicked.
*/
public int getDelayMsec() {
return getState(false).delay;
} | 3.68 |
druid_MySqlLexer_isIdentifierCharForVariable | /**
* employee.code=:employee.code 解析异常
* 修复:变量名支持含符号.
* @param c
* @return
*/
public static boolean isIdentifierCharForVariable(char c) {
if (c == '.') {
return true;
}
return isIdentifierChar(c);
} | 3.68 |
framework_AbstractDateField_setZoneId | /**
* Sets the {@link ZoneId}, which is used when {@code z} is included inside
* the {@link #setDateFormat(String)} .
*
* @param zoneId
* the zone id
* @since 8.2
*/
public void setZoneId(ZoneId zoneId) {
if (zoneId != this.zoneId
|| (zoneId != null && !zoneId.equals(this.zoneId))) {
updateTimeZoneJSON(zoneId, getLocale(), getStartYear(),
getEndYear());
}
this.zoneId = zoneId;
} | 3.68 |
hadoop_StorageStatisticsFromIOStatistics_getLongStatistics | /**
* Take a snapshot of the current counter values
* and return an iterator over them.
* @return all the counter statistics.
*/
@Override
public Iterator<LongStatistic> getLongStatistics() {
final Set<Map.Entry<String, Long>> counters = counters()
.entrySet();
final Set<LongStatistic> statisticSet = counters.stream().map(
this::toLongStatistic)
.collect(Collectors.toSet());
// add the gauges
gauges().entrySet().forEach(entry ->
statisticSet.add(toLongStatistic(entry)));
return statisticSet.iterator();
} | 3.68 |
hmily_Binder_bind | /**
* Bind object.
*
* @param <T> the type parameter
* @param name the name
* @param target the target
* @param env the env
* @param allowRecursiveBinding the allow recursive binding
* @return the object
*/
protected <T> Object bind(final PropertyName name, final BindData<T> target, final Env env, final boolean allowRecursiveBinding) {
return bindObject(name, target, env, allowRecursiveBinding);
} | 3.68 |
framework_Heartbeat_setInterval | /**
* Changes the heartbeatInterval in runtime and applies it.
*
* @param heartbeatInterval
* new interval in seconds.
*/
public void setInterval(int heartbeatInterval) {
getLogger().info(
"Setting hearbeat interval to " + heartbeatInterval + "sec.");
interval = heartbeatInterval;
schedule();
} | 3.68 |
hadoop_FindOptions_getMaxDepth | /**
* Returns the maximum depth for applying expressions.
*
* @return maximum depth
*/
public int getMaxDepth() {
return this.maxDepth;
} | 3.68 |
morf_SchemaAdapter_views | /**
* @see org.alfasoftware.morf.metadata.Schema#views()
*/
@Override
public Collection<View> views() {
return delegate.views();
} | 3.68 |
framework_DDEventHandleStrategy_handleEvent | /**
* Final phase of event handling.
*
* @param targetElement
* target element over which DnD event has happened
* @param event
* GWT event for active DnD operation
* @param mediator
* VDragAndDropManager data accessor
*/
public void handleEvent(Element targetElement, NativePreviewEvent event,
DDManagerMediator mediator) {
switch (event.getTypeInt()) {
case Event.ONMOUSEOVER:
handleMouseOver(targetElement, event, mediator);
break;
case Event.ONMOUSEOUT:
handleMouseOut(targetElement, event, mediator);
break;
case Event.ONMOUSEMOVE:
case Event.ONTOUCHMOVE:
handleMouseMove(targetElement, event, mediator);
break;
case Event.ONTOUCHEND:
handleTouchEnd(targetElement, event, mediator);
break;
case Event.ONMOUSEUP:
handleMouseUp(targetElement, event, mediator);
break;
default:
// NOP
break;
}
} | 3.68 |
hbase_TableIntegrityErrorHandlerImpl_handleDuplicateStartKeys | /**
* {@inheritDoc}
*/
@Override
public void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException {
} | 3.68 |
flink_SharedResourceHolder_releaseInternal | /** Visible to unit tests. */
synchronized <T> T releaseInternal(final Resource<T> resource, final T instance) {
final Instance cached = instances.get(resource);
if (cached == null) {
throw new IllegalArgumentException("No cached instance found for " + resource);
}
Preconditions.checkArgument(instance == cached.payload, "Releasing the wrong instance");
Preconditions.checkState(cached.refcount > 0, "Refcount has already reached zero");
cached.refcount--;
if (cached.refcount == 0) {
Preconditions.checkState(cached.destroyTask == null, "Destroy task already scheduled");
// Schedule a delayed task to destroy the resource.
if (destroyer == null) {
destroyer = destroyerFactory.createScheduledExecutor();
}
cached.destroyTask =
destroyer.schedule(
new LogExceptionRunnable(
new Runnable() {
@Override
public void run() {
synchronized (SharedResourceHolder.this) {
// Refcount may have gone up since the task was
// scheduled. Re-check it.
if (cached.refcount == 0) {
try {
resource.close(instance);
} finally {
instances.remove(resource);
if (instances.isEmpty()) {
destroyer.shutdown();
destroyer = null;
}
}
}
}
}
}),
DESTROY_DELAY_SECONDS,
TimeUnit.SECONDS);
}
// Always returning null
return null;
} | 3.68 |
pulsar_SimpleLoadManagerImpl_doNamespaceBundleSplit | /**
* Detect and split hot namespace bundles.
*/
@Override
public void doNamespaceBundleSplit() throws Exception {
int maxBundleCount = pulsar.getConfiguration().getLoadBalancerNamespaceMaximumBundles();
long maxBundleTopics = pulsar.getConfiguration().getLoadBalancerNamespaceBundleMaxTopics();
long maxBundleSessions = pulsar.getConfiguration().getLoadBalancerNamespaceBundleMaxSessions();
long maxBundleMsgRate = pulsar.getConfiguration().getLoadBalancerNamespaceBundleMaxMsgRate();
long maxBundleBandwidth =
pulsar.getConfiguration().getLoadBalancerNamespaceBundleMaxBandwidthMbytes() * MBytes;
log.info(
"Running namespace bundle split with thresholds: topics {}, sessions {},"
+ " msgRate {}, bandwidth {}, maxBundles {}",
maxBundleTopics, maxBundleSessions, maxBundleMsgRate, maxBundleBandwidth, maxBundleCount);
if (this.lastLoadReport == null || this.lastLoadReport.getBundleStats() == null) {
return;
}
Map<String, NamespaceBundleStats> bundleStats = this.lastLoadReport.getBundleStats();
Set<String> bundlesToBeSplit = new HashSet<>();
for (Map.Entry<String, NamespaceBundleStats> statsEntry : bundleStats.entrySet()) {
String bundleName = statsEntry.getKey();
NamespaceBundleStats stats = statsEntry.getValue();
long totalSessions = stats.consumerCount + stats.producerCount;
double totalMsgRate = stats.msgRateIn + stats.msgRateOut;
double totalBandwidth = stats.msgThroughputIn + stats.msgThroughputOut;
boolean needSplit = false;
if (stats.topics > maxBundleTopics || (maxBundleSessions > 0
&& totalSessions > maxBundleSessions) || totalMsgRate > maxBundleMsgRate
|| totalBandwidth > maxBundleBandwidth) {
if (stats.topics <= 1) {
log.info("Unable to split hot namespace bundle {} since there is only one topic.", bundleName);
} else {
NamespaceName namespaceName = NamespaceName
.get(LoadManagerShared.getNamespaceNameFromBundleName(bundleName));
int numBundles = pulsar.getNamespaceService().getBundleCount(namespaceName);
if (numBundles >= maxBundleCount) {
log.info("Unable to split hot namespace bundle {} since the namespace has too many bundles.",
bundleName);
} else {
needSplit = true;
}
}
}
if (needSplit) {
if (this.getLoadBalancerAutoBundleSplitEnabled()) {
log.info(
"Will split hot namespace bundle {}, topics {}, producers+consumers {},"
+ " msgRate in+out {}, bandwidth in+out {}",
bundleName, stats.topics, totalSessions, totalMsgRate, totalBandwidth);
bundlesToBeSplit.add(bundleName);
} else {
log.info(
"DRY RUN - split hot namespace bundle {}, topics {}, producers+consumers {},"
+ " msgRate in+out {}, bandwidth in+out {}",
bundleName, stats.topics, totalSessions, totalMsgRate, totalBandwidth);
}
}
}
if (bundlesToBeSplit.size() > 0) {
for (String bundleName : bundlesToBeSplit) {
try {
pulsar.getAdminClient().namespaces().splitNamespaceBundle(
LoadManagerShared.getNamespaceNameFromBundleName(bundleName),
LoadManagerShared.getBundleRangeFromBundleName(bundleName),
pulsar.getConfiguration().isLoadBalancerAutoUnloadSplitBundlesEnabled(), null);
log.info("Successfully split namespace bundle {}", bundleName);
} catch (Exception e) {
log.error("Failed to split namespace bundle {}", bundleName, e);
}
}
this.setLoadReportForceUpdateFlag();
}
} | 3.68 |
flink_TypeTransformations_timeToSqlTypes | /**
* Returns a type transformation that transforms data type to a new data type whose conversion
* class is {@link java.sql.Timestamp}/{@link java.sql.Time}/{@link java.sql.Date} if the
* original data type is TIMESTAMP/TIME/DATE.
*/
public static TypeTransformation timeToSqlTypes() {
Map<LogicalTypeRoot, Class<?>> conversions = new HashMap<>();
conversions.put(LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE, Timestamp.class);
conversions.put(LogicalTypeRoot.TIME_WITHOUT_TIME_ZONE, Time.class);
conversions.put(LogicalTypeRoot.DATE, Date.class);
return new DataTypeConversionClassTransformation(conversions);
} | 3.68 |
flink_LeaderInformationRegister_forComponentId | /**
* Returns the {@link LeaderInformation} that is stored or an empty {@code Optional} if no entry
* exists for the passed {@code componentId}.
*/
public Optional<LeaderInformation> forComponentId(String componentId) {
return Optional.ofNullable(leaderInformationPerComponentId.get(componentId));
} | 3.68 |
hbase_ZKProcedureUtil_isReachedPathNode | /**
* Is this in the procedure barrier reached znode path
*/
boolean isReachedPathNode(String path) {
return path.startsWith(this.reachedZnode) && !path.equals(reachedZnode)
&& isMemberNode(path, reachedZnode);
} | 3.68 |
framework_ConnectorMap_getConnectors | /**
* @return
*
* @deprecated As of 7.0.1, use {@link #getConnectorsAsJsArray()} for
* improved performance.
*/
@Deprecated
public Collection<? extends ServerConnector> getConnectors() {
Collection<ComponentDetail> values = idToComponentDetail.values();
List<ServerConnector> arrayList = new ArrayList<>(values.size());
for (ComponentDetail componentDetail : values) {
arrayList.add(componentDetail.getConnector());
}
return arrayList;
} | 3.68 |
open-banking-gateway_EncryptionConfigurationConfig_encryptionConfig | /**
* Datasafe configuration, persisted in DB
* @param config Encryption configuration default values
* @return Current Datasafe encryption config
*/
@Bean
@SneakyThrows
@Transactional
public EncryptionConfig encryptionConfig(MutableEncryptionConfig config) {
long dbConfigCount = datasafeConfigRepository.count();
if (dbConfigCount == 1) {
return mapper.readValue(
datasafeConfigRepository.findAll().stream()
.findFirst()
.orElseThrow(() -> new IllegalStateException(INCORRECT_ENCRYPTION_CONFIG_RECORDS_AMOUNT_EXCEPTION))
.getConfig(),
MutableEncryptionConfig.class)
.toEncryptionConfig();
} else if (dbConfigCount == 0) {
storeEncryptionConfigInDb(config);
return config.toEncryptionConfig();
}
throw new IllegalStateException(INCORRECT_ENCRYPTION_CONFIG_RECORDS_AMOUNT_EXCEPTION);
} | 3.68 |
Activiti_TreeValueExpression_isLiteralText | /**
* @return <code>true</code> if this is a literal text expression
*/
@Override
public boolean isLiteralText() {
return node.isLiteralText();
} | 3.68 |
morf_OracleDialect_indexDropStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#indexDropStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Index)
*/
@Override
public Collection<String> indexDropStatements(Table table, Index indexToBeRemoved) {
StringBuilder statement = new StringBuilder();
statement.append("DROP INDEX ")
.append(schemaNamePrefix())
.append(indexToBeRemoved.getName());
return Arrays.asList(statement.toString());
} | 3.68 |
hudi_BaseHoodieWriteClient_archive | /**
* Trigger archival for the table. This ensures that the number of commits do not explode
* and keep increasing unbounded over time.
*/
public void archive() {
// Create a Hoodie table which encapsulated the commits and files visible
HoodieTable table = createTable(config, hadoopConf);
archive(table);
} | 3.68 |
hadoop_FederationStateStoreFacade_updateStoredToken | /**
* The Router Supports Update RMDelegationTokenIdentifier{@link RMDelegationTokenIdentifier}.
*
* @param identifier delegation tokens from the RM
* @param renewDate renewDate
* @param tokenInfo tokenInfo.
* @throws YarnException if the call to the state store is unsuccessful.
* @throws IOException An IO Error occurred.
*/
public void updateStoredToken(RMDelegationTokenIdentifier identifier,
long renewDate, String tokenInfo) throws YarnException, IOException {
LOG.info("updating RMDelegation token with sequence number: {}.",
identifier.getSequenceNumber());
RouterStoreToken storeToken = RouterStoreToken.newInstance(identifier, renewDate, tokenInfo);
RouterRMTokenRequest request = RouterRMTokenRequest.newInstance(storeToken);
stateStore.updateStoredToken(request);
} | 3.68 |
hbase_MasterObserver_preModifyNamespace | /**
* Called prior to modifying a namespace's properties.
* @param ctx the environment to interact with the framework and master
* @param currentNsDescriptor current NamespaceDescriptor of the namespace
* @param newNsDescriptor after modify operation, namespace will have this descriptor
*/
default void preModifyNamespace(final ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor currentNsDescriptor, NamespaceDescriptor newNsDescriptor)
throws IOException {
} | 3.68 |
querydsl_MetaDataExporter_setExportViews | /**
* Set whether views should be exported
*
* @param exportViews
*/
public void setExportViews(boolean exportViews) {
this.exportViews = exportViews;
} | 3.68 |
flink_OptimizerNode_addOutgoingConnection | /**
* Adds a new outgoing connection to this node.
*
* @param connection The connection to add.
*/
public void addOutgoingConnection(DagConnection connection) {
if (this.outgoingConnections == null) {
this.outgoingConnections = new ArrayList<DagConnection>();
} else {
if (this.outgoingConnections.size() == 64) {
throw new CompilerException(
"Cannot currently handle nodes with more than 64 outputs.");
}
}
this.outgoingConnections.add(connection);
} | 3.68 |
hbase_SimpleRpcServerResponder_registerForWrite | /**
* Add a connection to the list that want to write,
*/
public void registerForWrite(SimpleServerRpcConnection c) {
if (writingCons.add(c)) {
writeSelector.wakeup();
}
} | 3.68 |
hbase_MasterObserver_preDecommissionRegionServers | /**
* Called before decommission region servers.
*/
default void preDecommissionRegionServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ServerName> servers, boolean offload) throws IOException {
} | 3.68 |
hbase_FirstKeyOnlyFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof FirstKeyOnlyFilter)) {
return false;
}
return true;
} | 3.68 |
rocketmq-connect_MemoryConfigManagementServiceImpl_resumeConnector | /**
* resume connector
*
* @param connectorName
*/
@Override
public void resumeConnector(String connectorName) {
if (!connectorKeyValueStore.containsKey(connectorName)) {
throw new ConnectException("Connector [" + connectorName + "] does not exist");
}
ConnectKeyValue config = connectorKeyValueStore.get(connectorName);
config.setEpoch(System.currentTimeMillis());
config.setTargetState(TargetState.STARTED);
connectorKeyValueStore.put(connectorName, config.nextGeneration());
triggerListener();
} | 3.68 |
flink_PythonShellParser_parseLocal | /**
* Parses Python shell options and transfer to options which will be used in `java` to exec a
* flink job in local mini cluster.
*
* @param args Python shell options.
* @return Options used in `java` run.
*/
static List<String> parseLocal(String[] args) {
String[] params = new String[args.length - 1];
System.arraycopy(args, 1, params, 0, params.length);
CommandLine commandLine = parse(LOCAL_OPTIONS, params);
if (commandLine.hasOption(OPTION_HELP.getOpt())) {
printLocalHelp();
System.exit(0);
}
List<String> options = new ArrayList<>();
options.add("local");
return options;
} | 3.68 |
hbase_MultithreadedTableMapper_getNumberOfThreads | /**
* The number of threads in the thread pool that will run the map function.
* @param job the job
* @return the number of threads
*/
public static int getNumberOfThreads(JobContext job) {
return job.getConfiguration().getInt(NUMBER_OF_THREADS, 10);
} | 3.68 |
rocketmq-connect_Worker_stopAndAwaitConnector | /**
* Stop a connector that belongs to this worker and await its termination.
*
* @param connName the name of the connector to be stopped.
*/
public void stopAndAwaitConnector(String connName) {
stopConnector(connName);
awaitStopConnectors(Collections.singletonList(connName));
} | 3.68 |
flink_ConfigurationParserUtils_loadCommonConfiguration | /**
* Generate configuration from only the config file and dynamic properties.
*
* @param args the commandline arguments
* @param cmdLineSyntax the syntax for this application
* @return generated configuration
* @throws FlinkParseException if the configuration cannot be generated
*/
public static Configuration loadCommonConfiguration(String[] args, String cmdLineSyntax)
throws FlinkParseException {
final CommandLineParser<ClusterConfiguration> commandLineParser =
new CommandLineParser<>(new ClusterConfigurationParserFactory());
final ClusterConfiguration clusterConfiguration;
try {
clusterConfiguration = commandLineParser.parse(args);
} catch (FlinkParseException e) {
LOG.error("Could not parse the command line options.", e);
commandLineParser.printHelp(cmdLineSyntax);
throw e;
}
final Configuration dynamicProperties =
ConfigurationUtils.createConfiguration(clusterConfiguration.getDynamicProperties());
return GlobalConfiguration.loadConfiguration(
clusterConfiguration.getConfigDir(), dynamicProperties);
} | 3.68 |
hudi_InternalBloomFilter_getVectorSize | /**
* @return size of the the bloomfilter
*/
public int getVectorSize() {
return this.vectorSize;
} | 3.68 |
hbase_SnapshotReferenceUtil_verifySnapshot | /**
* Verify the validity of the snapshot.
* @param visitor user-specified store file visitor
*/
public static void verifySnapshot(final Configuration conf, final FileSystem fs,
final SnapshotManifest manifest, final StoreFileVisitor visitor) throws IOException {
concurrentVisitReferencedFiles(conf, fs, manifest, "VerifySnapshot", visitor);
} | 3.68 |
hadoop_FederationUtil_getAllConfiguredNS | /**
* Collect all configured nameservices.
*
* @param conf the configuration object.
* @return Set of name services in config.
* @throws IllegalArgumentException if monitored namenodes are not correctly configured.
*/
public static Set<String> getAllConfiguredNS(Configuration conf)
throws IllegalArgumentException {
// Get all name services configured
Collection<String> namenodes = conf.getTrimmedStringCollection(
DFS_ROUTER_MONITOR_NAMENODE);
Set<String> nameservices = new HashSet();
for (String namenode : namenodes) {
String[] namenodeSplit = namenode.split("\\.");
String nsId;
if (namenodeSplit.length == 2) {
nsId = namenodeSplit[0];
} else if (namenodeSplit.length == 1) {
nsId = namenode;
} else {
String errorMsg = "Wrong name service specified : " + namenode;
throw new IllegalArgumentException(
errorMsg);
}
nameservices.add(nsId);
}
return nameservices;
} | 3.68 |
framework_Table_addListener | /**
* @deprecated As of 7.0, replaced by
* {@link #addColumnReorderListener(ColumnReorderListener)}
*/
@Deprecated
public void addListener(ColumnReorderListener listener) {
addColumnReorderListener(listener);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.