name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_AllocateRequest_setSchedulingRequests | /**
* Set the list of Scheduling requests to inform the
* <code>ResourceManager</code> about the application's resource requirements
* (potentially including allocation tags and placement constraints).
* @param schedulingRequests list of {@link SchedulingRequest} to update
* the <code>ResourceManager</code> about the application's resource
* requirements.
*/
@Public
@Unstable
public void setSchedulingRequests(
List<SchedulingRequest> schedulingRequests) {
} | 3.68 |
hbase_WALUtil_writeRegionEventMarker | /**
* Write a region open marker indicating that the region is opened. This write is for internal use
* only. Not for external client consumption.
*/
public static WALKeyImpl writeRegionEventMarker(WAL wal,
NavigableMap<byte[], Integer> replicationScope, RegionInfo hri, RegionEventDescriptor r,
MultiVersionConcurrencyControl mvcc, RegionReplicationSink sink) throws IOException {
WALKeyImpl walKey = writeMarker(wal, replicationScope, hri,
WALEdit.createRegionEventWALEdit(hri, r), mvcc, null, sink);
if (LOG.isTraceEnabled()) {
LOG.trace("Appended region event marker " + TextFormat.shortDebugString(r));
}
return walKey;
} | 3.68 |
framework_AbstractComponentContainer_fireComponentDetachEvent | /**
* Fires the component detached event. This should be called by the
* removeComponent methods after the component have been removed from this
* container.
*
* @param component
* the component that has been removed from this container.
*/
protected void fireComponentDetachEvent(Component component) {
fireEvent(new ComponentDetachEvent(this, component));
} | 3.68 |
hudi_HoodieAvroUtils_sanitizeName | /**
* Sanitizes Name according to Avro rule for names.
* Removes characters other than the ones mentioned in https://avro.apache.org/docs/current/spec.html#names .
*
* @param name input name
* @param invalidCharMask replacement for invalid characters.
* @return sanitized name
*/
public static String sanitizeName(String name, String invalidCharMask) {
if (INVALID_AVRO_FIRST_CHAR_IN_NAMES_PATTERN.matcher(name.substring(0, 1)).matches()) {
name = INVALID_AVRO_FIRST_CHAR_IN_NAMES_PATTERN.matcher(name).replaceFirst(invalidCharMask);
}
return INVALID_AVRO_CHARS_IN_NAMES_PATTERN.matcher(name).replaceAll(invalidCharMask);
} | 3.68 |
dubbo_AbstractConfigManager_getConfig | /**
* Get config instance by id or by name
*
* @param cls Config type
* @param idOrName the id or name of the config
* @return
*/
public <T extends AbstractConfig> Optional<T> getConfig(Class<T> cls, String idOrName) {
T config = getConfigById(getTagName(cls), idOrName);
if (config == null) {
config = getConfigByName(cls, idOrName);
}
return ofNullable(config);
} | 3.68 |
hudi_RocksDBDAO_putInBatch | /**
* Helper to add put operation in batch.
*
* @param batch Batch Handle
* @param columnFamilyName Column Family
* @param key Key
* @param value Payload
* @param <T> Type of payload
*/
public <K extends Serializable, T extends Serializable> void putInBatch(WriteBatch batch, String columnFamilyName,
K key, T value) {
try {
byte[] keyBytes = SerializationUtils.serialize(key);
byte[] payload = serializePayload(value);
batch.put(managedHandlesMap.get(columnFamilyName), keyBytes, payload);
} catch (Exception e) {
throw new HoodieException(e);
}
} | 3.68 |
flink_Executors_newDirectExecutorServiceWithNoOpShutdown | /**
* Creates a new {@link ExecutorService} that runs the passed tasks in the calling thread but
* doesn't implement proper shutdown behavior. Tasks can be still submitted even after {@link
* ExecutorService#shutdown()} is called.
*
* @see #newDirectExecutorService()
*/
public static ExecutorService newDirectExecutorServiceWithNoOpShutdown() {
return new DirectExecutorService(false);
} | 3.68 |
hbase_OrderedBytes_isFixedInt64 | /**
* Return true when the next encoded value in {@code src} uses fixed-width Int64 encoding, false
* otherwise.
*/
public static boolean isFixedInt64(PositionedByteRange src) {
return FIXED_INT64
== (-1 == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.68 |
hbase_HBaseMetrics2HadoopMetricsAdapter_snapshotAllMetrics | /**
* Iterates over the MetricRegistry and adds them to the {@code builder}.
* @param builder A record builder
*/
public void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsRecordBuilder builder) {
Map<String, Metric> metrics = metricRegistry.getMetrics();
for (Map.Entry<String, Metric> e : metrics.entrySet()) {
// Always capitalize the name
String name = StringUtils.capitalize(e.getKey());
Metric metric = e.getValue();
if (metric instanceof Gauge) {
addGauge(name, (Gauge<?>) metric, builder);
} else if (metric instanceof Counter) {
addCounter(name, (Counter) metric, builder);
} else if (metric instanceof Histogram) {
addHistogram(name, (Histogram) metric, builder);
} else if (metric instanceof Meter) {
addMeter(name, (Meter) metric, builder);
} else if (metric instanceof Timer) {
addTimer(name, (Timer) metric, builder);
} else {
LOG.info("Ignoring unknown Metric class " + metric.getClass().getName());
}
}
} | 3.68 |
framework_TableElementContextMenu_fillTable | // fill the table with some random data
private void fillTable(Table table) {
initProperties(table);
for (int i = 0; i < ROWS; i++) {
String[] line = new String[COLUMNS];
for (int j = 0; j < COLUMNS; j++) {
line[j] = "col=" + j + " row=" + i;
}
table.addItem(line, null);
}
} | 3.68 |
flink_PartitionTempFileManager_createPartitionDir | /** Generate a new partition directory with partitions. */
public Path createPartitionDir(String... partitions) {
Path parentPath = taskTmpDir;
for (String dir : partitions) {
parentPath = new Path(parentPath, dir);
}
return new Path(parentPath, newFileName());
} | 3.68 |
MagicPlugin_SpellResult_isSuccess | /**
* Determine if this result is a success or not,
* possibly counting no_target as a success.
*
* @return True if this cast was a success.
*/
public boolean isSuccess(boolean castOnNoTarget) {
if (this == SpellResult.NO_TARGET || this == SpellResult.NO_ACTION || this == SpellResult.STOP) {
return castOnNoTarget;
}
return isSuccess();
} | 3.68 |
hudi_BaseHoodieWriteClient_cluster | /**
* Ensures clustering instant is in expected state and performs clustering for the plan stored in metadata.
* @param clusteringInstant Clustering Instant Time
* @return Collection of Write Status
*/
public HoodieWriteMetadata<O> cluster(String clusteringInstant, boolean shouldComplete) {
HoodieTable table = createTable(config, context.getHadoopConf().get());
preWrite(clusteringInstant, WriteOperationType.CLUSTER, table.getMetaClient());
return tableServiceClient.cluster(clusteringInstant, shouldComplete);
} | 3.68 |
hadoop_FilePosition_absolute | /**
* Gets the current absolute position within this file.
*
* @return the current absolute position within this file.
*/
public long absolute() {
throwIfInvalidBuffer();
return bufferStartOffset + relative();
} | 3.68 |
framework_AbsoluteLayoutConnector_addDefaultPositionIfMissing | /**
* Adds default value of 0.0px for the given property if it's missing from
* the position string altogether. If the property value is already set no
* changes are needed.
*
* @param position
* original position styles
* @param property
* the property that needs to have a value
* @return updated position, or the original string if no updates were
* needed
*/
private String addDefaultPositionIfMissing(String position,
String property) {
if (!position.contains(property)) {
position = position + property + ":0.0px;";
}
return position;
} | 3.68 |
flink_Optimizer_compile | /**
* Translates the given program to an OptimizedPlan. The optimized plan describes for each
* operator which strategy to use (such as hash join versus sort-merge join), what data exchange
* method to use (local pipe forward, shuffle, broadcast), what exchange mode to use (pipelined,
* batch), where to cache intermediate results, etc,
*
* <p>The optimization happens in multiple phases:
*
* <ol>
* <li>Create optimizer dag implementation of the program.
* <p><tt>OptimizerNode</tt> representations of the PACTs, assign parallelism and compute
* size estimates.
* <li>Compute interesting properties and auxiliary structures.
* <li>Enumerate plan alternatives. This cannot be done in the same step as the interesting
* property computation (as opposed to the Database approaches), because we support plans
* that are not trees.
* </ol>
*
* @param program The program to be translated.
* @param postPasser The function to be used for post passing the optimizer's plan and setting
* the data type specific serialization routines.
* @return The optimized plan.
* @throws CompilerException Thrown, if the plan is invalid or the optimizer encountered an
* inconsistent situation during the compilation process.
*/
private OptimizedPlan compile(Plan program, OptimizerPostPass postPasser)
throws CompilerException {
if (program == null || postPasser == null) {
throw new NullPointerException();
}
if (LOG.isDebugEnabled()) {
LOG.debug("Beginning compilation of program '" + program.getJobName() + '\'');
}
final ExecutionMode defaultDataExchangeMode =
program.getExecutionConfig().getExecutionMode();
final int defaultParallelism =
program.getDefaultParallelism() > 0
? program.getDefaultParallelism()
: this.defaultParallelism;
// log the default settings
LOG.debug("Using a default parallelism of {}", defaultParallelism);
LOG.debug("Using default data exchange mode {}", defaultDataExchangeMode);
// the first step in the compilation is to create the optimizer plan representation
// this step does the following:
// 1) It creates an optimizer plan node for each operator
// 2) It connects them via channels
// 3) It looks for hints about local strategies and channel types and
// sets the types and strategies accordingly
// 4) It makes estimates about the data volume of the data sources and
// propagates those estimates through the plan
GraphCreatingVisitor graphCreator =
new GraphCreatingVisitor(defaultParallelism, defaultDataExchangeMode);
program.accept(graphCreator);
// if we have a plan with multiple data sinks, add logical optimizer nodes that have two
// data-sinks as children
// each until we have only a single root node. This allows to transparently deal with the
// nodes with
// multiple outputs
OptimizerNode rootNode;
if (graphCreator.getSinks().size() == 1) {
rootNode = graphCreator.getSinks().get(0);
} else if (graphCreator.getSinks().size() > 1) {
Iterator<DataSinkNode> iter = graphCreator.getSinks().iterator();
rootNode = iter.next();
while (iter.hasNext()) {
rootNode = new SinkJoiner(rootNode, iter.next());
}
} else {
throw new CompilerException("Bug: The optimizer plan representation has no sinks.");
}
// now that we have all nodes created and recorded which ones consume memory, tell the nodes
// their minimal
// guaranteed memory, for further cost estimations. We assume an equal distribution of
// memory among consumer tasks
rootNode.accept(new IdAndEstimatesVisitor(this.statistics));
// We need to enforce that union nodes always forward their output to their successor.
// Any partitioning must be either pushed before or done after the union, but not on the
// union's output.
UnionParallelismAndForwardEnforcer unionEnforcer = new UnionParallelismAndForwardEnforcer();
rootNode.accept(unionEnforcer);
// We are dealing with operator DAGs, rather than operator trees.
// That requires us to deviate at some points from the classical DB optimizer algorithms.
// This step builds auxiliary structures to help track branches and joins in the DAG
BranchesVisitor branchingVisitor = new BranchesVisitor();
rootNode.accept(branchingVisitor);
// Propagate the interesting properties top-down through the graph
InterestingPropertyVisitor propsVisitor =
new InterestingPropertyVisitor(this.costEstimator);
rootNode.accept(propsVisitor);
// perform a sanity check: the root may not have any unclosed branches
if (rootNode.getOpenBranches() != null && rootNode.getOpenBranches().size() > 0) {
throw new CompilerException(
"Bug: Logic for branching plans (non-tree plans) has an error, and does not "
+ "track the re-joining of branches correctly.");
}
// the final step is now to generate the actual plan alternatives
List<PlanNode> bestPlan = rootNode.getAlternativePlans(this.costEstimator);
if (bestPlan.size() != 1) {
throw new CompilerException("Error in compiler: more than one best plan was created!");
}
// check if the best plan's root is a data sink (single sink plan)
// if so, directly take it. if it is a sink joiner node, get its contained sinks
PlanNode bestPlanRoot = bestPlan.get(0);
List<SinkPlanNode> bestPlanSinks = new ArrayList<SinkPlanNode>(4);
if (bestPlanRoot instanceof SinkPlanNode) {
bestPlanSinks.add((SinkPlanNode) bestPlanRoot);
} else if (bestPlanRoot instanceof SinkJoinerPlanNode) {
((SinkJoinerPlanNode) bestPlanRoot).getDataSinks(bestPlanSinks);
}
// finalize the plan
OptimizedPlan plan =
new PlanFinalizer().createFinalPlan(bestPlanSinks, program.getJobName(), program);
plan.accept(new BinaryUnionReplacer());
plan.accept(new RangePartitionRewriter(plan));
// post pass the plan. this is the phase where the serialization and comparator code is set
postPasser.postPass(plan);
return plan;
} | 3.68 |
framework_Escalator_snapDeltas | /**
* Snap deltas of x and y to the major four axes (up, down, left, right)
* with a threshold of a number of degrees from those axes.
*
* @param deltaX
* the delta in the x axis
* @param deltaY
* the delta in the y axis
* @param thresholdRatio
* the threshold in ratio (0..1) between x and y for when to snap
* @return a two-element array: <code>[snappedX, snappedY]</code>
*/
private static double[] snapDeltas(final double deltaX, final double deltaY,
final double thresholdRatio) {
final double[] array = new double[2];
if (deltaX != 0 && deltaY != 0) {
final double aDeltaX = Math.abs(deltaX);
final double aDeltaY = Math.abs(deltaY);
final double yRatio = aDeltaY / aDeltaX;
final double xRatio = aDeltaX / aDeltaY;
array[0] = (xRatio < thresholdRatio) ? 0 : deltaX;
array[1] = (yRatio < thresholdRatio) ? 0 : deltaY;
} else {
array[0] = deltaX;
array[1] = deltaY;
}
return array;
} | 3.68 |
hbase_RequestConverter_buildEnableCatalogJanitorRequest | /**
* Creates a request for enabling/disabling the catalog janitor
* @return A {@link EnableCatalogJanitorRequest}
*/
public static EnableCatalogJanitorRequest buildEnableCatalogJanitorRequest(boolean enable) {
return EnableCatalogJanitorRequest.newBuilder().setEnable(enable).build();
} | 3.68 |
querydsl_MathExpressions_degrees | /**
* Create a {@code deg(num)} expression
*
* <p>Convert radians to degrees.</p>
*
* @param num numeric expression
* @return deg(num)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Double> degrees(Expression<A> num) {
return Expressions.numberOperation(Double.class, Ops.MathOps.DEG, num);
} | 3.68 |
querydsl_Expressions_anyOf | /**
* Get the union of the given Boolean expressions
*
* @param exprs predicates
* @return union of predicates
*/
public static BooleanExpression anyOf(BooleanExpression... exprs) {
BooleanExpression rv = null;
for (BooleanExpression b : exprs) {
rv = rv == null ? b : rv.or(b);
}
return rv;
} | 3.68 |
flink_CommittableCollector_of | /**
* Creates a {@link CommittableCollector} based on the current runtime information. This method
* should be used for to instantiate a collector for all Sink V2.
*
* @param context holding runtime of information
* @param metricGroup storing the committable metrics
* @param <CommT> type of the committable
* @return {@link CommittableCollector}
*/
public static <CommT> CommittableCollector<CommT> of(
RuntimeContext context, SinkCommitterMetricGroup metricGroup) {
return new CommittableCollector<>(
context.getIndexOfThisSubtask(),
context.getNumberOfParallelSubtasks(),
metricGroup);
} | 3.68 |
framework_AbstractComponentConnector_isRealUpdate | /**
* Checks whether the update is 'real' or contains cached information.
*
* @param uidl
* the UIDL to check
* @return {@code true} if doesn't have "cached" attribute, {@code false}
* otherwise
*/
@Deprecated
public static boolean isRealUpdate(UIDL uidl) {
return !uidl.hasAttribute("cached");
} | 3.68 |
flink_FileSystem_loadHadoopFsFactory | /**
* Utility loader for the Hadoop file system factory. We treat the Hadoop FS factory in a
* special way, because we use it as a catch all for file systems schemes not supported directly
* in Flink.
*
* <p>This method does a set of eager checks for availability of certain classes, to be able to
* give better error messages.
*/
private static FileSystemFactory loadHadoopFsFactory() {
final ClassLoader cl = FileSystem.class.getClassLoader();
// first, see if the Flink runtime classes are available
final Class<? extends FileSystemFactory> factoryClass;
try {
factoryClass =
Class.forName("org.apache.flink.runtime.fs.hdfs.HadoopFsFactory", false, cl)
.asSubclass(FileSystemFactory.class);
} catch (ClassNotFoundException e) {
LOG.info(
"No Flink runtime dependency present. "
+ "The extended set of supported File Systems via Hadoop is not available.");
return new UnsupportedSchemeFactory(
"Flink runtime classes missing in classpath/dependencies.");
} catch (Exception | LinkageError e) {
LOG.warn("Flink's Hadoop file system factory could not be loaded", e);
return new UnsupportedSchemeFactory(
"Flink's Hadoop file system factory could not be loaded", e);
}
// check (for eager and better exception messages) if the Hadoop classes are available here
try {
Class.forName("org.apache.hadoop.conf.Configuration", false, cl);
Class.forName("org.apache.hadoop.fs.FileSystem", false, cl);
} catch (ClassNotFoundException e) {
LOG.info(
"Hadoop is not in the classpath/dependencies. "
+ "The extended set of supported File Systems via Hadoop is not available.");
return new UnsupportedSchemeFactory("Hadoop is not in the classpath/dependencies.");
}
// Create the factory.
try {
return factoryClass.newInstance();
} catch (Exception | LinkageError e) {
LOG.warn("Flink's Hadoop file system factory could not be created", e);
return new UnsupportedSchemeFactory(
"Flink's Hadoop file system factory could not be created", e);
}
} | 3.68 |
hbase_RawCellBuilderFactory_create | /** Returns the cell that is created */
public static RawCellBuilder create() {
return new KeyValueBuilder();
} | 3.68 |
hudi_SixToFiveDowngradeHandler_syncCompactionRequestedFileToAuxiliaryFolder | /**
* See HUDI-6040.
*/
private static void syncCompactionRequestedFileToAuxiliaryFolder(HoodieTable table) {
HoodieTableMetaClient metaClient = table.getMetaClient();
HoodieTimeline compactionTimeline = new HoodieActiveTimeline(metaClient, false).filterPendingCompactionTimeline()
.filter(instant -> instant.getState() == HoodieInstant.State.REQUESTED);
compactionTimeline.getInstantsAsStream().forEach(instant -> {
String fileName = instant.getFileName();
FileIOUtils.copy(metaClient.getFs(),
new Path(metaClient.getMetaPath(), fileName),
new Path(metaClient.getMetaAuxiliaryPath(), fileName));
});
} | 3.68 |
hbase_HBaseTestingUtility_shutdownMiniDFSCluster | /**
* Shuts down instance created by call to {@link #startMiniDFSCluster(int)} or does nothing.
*/
public void shutdownMiniDFSCluster() throws IOException {
if (this.dfsCluster != null) {
// The below throws an exception per dn, AsynchronousCloseException.
this.dfsCluster.shutdown();
dfsCluster = null;
// It is possible that the dfs cluster is set through setDFSCluster method, where we will not
// have a fixer
if (dfsClusterFixer != null) {
this.dfsClusterFixer.shutdown();
dfsClusterFixer = null;
}
dataTestDirOnTestFS = null;
CommonFSUtils.setFsDefault(this.conf, new Path("file:///"));
}
} | 3.68 |
hbase_Encryption_encryptWithSubjectKey | /**
* Encrypts a block of plaintext with the symmetric key resolved for the given subject
* @param out ciphertext
* @param in plaintext
* @param conf configuration
* @param cipher the encryption algorithm
* @param iv the initialization vector, can be null
*/
public static void encryptWithSubjectKey(OutputStream out, InputStream in, String subject,
Configuration conf, Cipher cipher, byte[] iv) throws IOException {
Key key = getSecretKeyForSubject(subject, conf);
if (key == null) {
throw new IOException("No key found for subject '" + subject + "'");
}
Encryptor e = cipher.getEncryptor();
e.setKey(key);
e.setIv(iv); // can be null
encrypt(out, in, e);
} | 3.68 |
flink_EmbeddedRocksDBStateBackend_setRocksDBOptions | /**
* Sets {@link org.rocksdb.Options} for the RocksDB instances. Because the options are not
* serializable and hold native code references, they must be specified through a factory.
*
* <p>The options created by the factory here are applied on top of the pre-defined options
* profile selected via {@link #setPredefinedOptions(PredefinedOptions)} and user-configured
* options from configuration set by {@link #configure(ReadableConfig, ClassLoader)} with keys
* in {@link RocksDBConfigurableOptions}.
*
* @param optionsFactory The options factory that lazily creates the RocksDB options.
*/
public void setRocksDBOptions(RocksDBOptionsFactory optionsFactory) {
this.rocksDbOptionsFactory = optionsFactory;
} | 3.68 |
AreaShop_InfoCommand_getTypeOrder | /**
* Get an integer to order by type, usable for Comparators.
* @param region The region to get the order for
* @return An integer for sorting by type
*/
private Integer getTypeOrder(GeneralRegion region) {
if(region.getType() == GeneralRegion.RegionType.RENT) {
if(region.getOwner() == null) {
return 1;
} else {
return 2;
}
} else {
if(region.getOwner() == null) {
return 3;
} else if(!((BuyRegion)region).isInResellingMode()) {
return 4;
} else {
return 5;
}
}
} | 3.68 |
hbase_ReplicationThrottler_addPushSize | /**
* Add current size to the current cycle's total push size
* @param size is the current size added to the current cycle's total push size
*/
public void addPushSize(final long size) {
if (this.enabled) {
this.cyclePushSize += size;
}
} | 3.68 |
hudi_BaseHoodieTableServiceClient_clean | /**
* Clean up any stale/old files/data lying around (either on file storage or index storage) based on the
* configurations and CleaningPolicy used. (typically files that no longer can be used by a running query can be
* cleaned). This API provides the flexibility to schedule clean instant asynchronously via
* {@link BaseHoodieTableServiceClient#scheduleTableService(String, Option, TableServiceType)} and disable inline scheduling
* of clean.
*
* @param cleanInstantTime instant time for clean.
* @param scheduleInline true if needs to be scheduled inline. false otherwise.
*/
@Nullable
public HoodieCleanMetadata clean(String cleanInstantTime, boolean scheduleInline) throws HoodieIOException {
if (!tableServicesEnabled(config)) {
return null;
}
final Timer.Context timerContext = metrics.getCleanCtx();
CleanerUtils.rollbackFailedWrites(config.getFailedWritesCleanPolicy(),
HoodieTimeline.CLEAN_ACTION, () -> rollbackFailedWrites());
HoodieTable table = createTable(config, hadoopConf);
if (config.allowMultipleCleans() || !table.getActiveTimeline().getCleanerTimeline().filterInflightsAndRequested().firstInstant().isPresent()) {
LOG.info("Cleaner started");
// proceed only if multiple clean schedules are enabled or if there are no pending cleans.
if (scheduleInline) {
scheduleTableServiceInternal(cleanInstantTime, Option.empty(), TableServiceType.CLEAN);
table.getMetaClient().reloadActiveTimeline();
}
if (shouldDelegateToTableServiceManager(config, ActionType.clean)) {
LOG.warn("Cleaning is not yet supported with Table Service Manager.");
return null;
}
}
// Proceeds to execute any requested or inflight clean instances in the timeline
HoodieCleanMetadata metadata = table.clean(context, cleanInstantTime);
if (timerContext != null && metadata != null) {
long durationMs = metrics.getDurationInMs(timerContext.stop());
metrics.updateCleanMetrics(durationMs, metadata.getTotalFilesDeleted());
LOG.info("Cleaned " + metadata.getTotalFilesDeleted() + " files"
+ " Earliest Retained Instant :" + metadata.getEarliestCommitToRetain()
+ " cleanerElapsedMs" + durationMs);
}
return metadata;
} | 3.68 |
graphhopper_ShortestPathTree_setDistanceLimit | /**
* Distance limit in meter
*/
public void setDistanceLimit(double limit) {
exploreType = DISTANCE;
this.limit = limit;
this.queueByZ = new PriorityQueue<>(1000, comparingDouble(l -> l.distance));
} | 3.68 |
hudi_BoundedInMemoryQueue_readNextRecord | /**
* Reader interface but never exposed to outside world as this is a single consumer queue. Reading is done through a
* singleton iterator for this queue.
*/
@Override
public Option<O> readNextRecord() {
if (this.isReadDone.get()) {
return Option.empty();
}
rateLimiter.release();
Option<O> newRecord = Option.empty();
while (expectMoreRecords()) {
try {
throwExceptionIfFailed();
newRecord = queue.poll(RECORD_POLL_INTERVAL_SEC, TimeUnit.SECONDS);
if (newRecord != null) {
break;
}
} catch (InterruptedException e) {
LOG.error("error reading records from queue", e);
throw new HoodieException(e);
}
}
// Check one more time here as it is possible producer erred out and closed immediately
throwExceptionIfFailed();
if (newRecord != null && newRecord.isPresent()) {
return newRecord;
} else {
// We are done reading all the records from internal iterator.
this.isReadDone.set(true);
return Option.empty();
}
} | 3.68 |
morf_Criterion_in | /**
* Helper method to create a new "IN" expression.
*
* <blockquote>
* <pre>
* SelectStatement stmt = select()
* .from(tableRef("Schedule"))
* .where(
* Criterion.in(field("chargeType"), 1, 2, 3)
* )
* </pre>
* </blockquote>
*
* <strong>Any null values returned by {@code selectStatement}
* that are compared to {@code field} can produce unexpected
* results.</strong>
*
* @param field the field to evaluate (the left-hand side of the expression)
* @param values the list of values (the right-hand side of the expression)
* @return a new Criterion object
*/
public static Criterion in(AliasedField field, Iterable<? extends Object> values) {
return new Criterion(Operator.IN, field, ImmutableList.copyOf(values));
} | 3.68 |
framework_VTabsheet_updateDynamicWidth | /**
* For internal use only. May be removed or replaced in the future.
*
* @see #isDynamicWidth()
*/
public void updateDynamicWidth() {
// Find width consumed by tabs
// spacer is a filler cell that covers the gap beside the tabs when
// the content is wider than the collective width of the tabs (also
// ensures there's room for the scroller element but that is usually
// hidden in dynamic width tab sheets), by default hidden by Valo
TableCellElement spacerCell = ((TableCellElement) tb.spacerTd.cast());
int spacerWidth = spacerCell.getOffsetWidth();
DivElement spacerContent = (DivElement) spacerCell
.getFirstChildElement();
int spacerMinWidth = spacerWidth - spacerContent.getOffsetWidth();
int tabsWidth = tb.getOffsetWidth() - spacerWidth + spacerMinWidth;
// Find content width
Style style = tabPanel.getElement().getStyle();
String overflow = style.getProperty("overflow");
style.setProperty("overflow", "hidden");
// set temporary width to match the tab widths in case the content
// component is relatively sized and previously calculated width is now
// too wide
style.setPropertyPx("width", tabsWidth);
boolean hasContent = tabPanel.getWidgetCount() > 0;
Style wrapperstyle = null;
int contentWidth = 0;
if (hasContent) {
wrapperstyle = getCurrentlyDisplayedWidget().getElement()
.getParentElement().getStyle();
wrapperstyle.setPropertyPx("width", tabsWidth);
// Get content width from actual widget
contentWidth = getCurrentlyDisplayedWidget().getOffsetWidth();
}
style.setProperty("overflow", overflow);
// Set widths to max(tabs,content)
if (tabsWidth < contentWidth) {
tabsWidth = contentWidth;
}
int outerWidth = tabsWidth + getContentAreaBorderWidth();
tabs.getStyle().setPropertyPx("width", outerWidth);
style.setPropertyPx("width", tabsWidth);
if (hasContent) {
wrapperstyle.setPropertyPx("width", tabsWidth);
}
contentNode.getStyle().setPropertyPx("width", tabsWidth);
super.setWidth(outerWidth + "px");
updateOpenTabSize();
} | 3.68 |
hbase_HFileArchiver_archiveStoreFiles | /**
* Remove the store files, either by archiving them or outright deletion
* @param conf {@link Configuration} to examine to determine the archive directory
* @param fs the filesystem where the store files live
* @param regionInfo {@link RegionInfo} of the region hosting the store files
* @param family the family hosting the store files
* @param compactedFiles files to be disposed of. No further reading of these files should be
* attempted; otherwise likely to cause an {@link IOException}
* @throws IOException if the files could not be correctly disposed.
*/
public static void archiveStoreFiles(Configuration conf, FileSystem fs, RegionInfo regionInfo,
Path tableDir, byte[] family, Collection<HStoreFile> compactedFiles) throws IOException {
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
archive(fs, regionInfo, family, compactedFiles, storeArchiveDir);
} | 3.68 |
hbase_BlockingRpcConnection_run | /**
* Reads the call from the queue, write them on the socket.
*/
@Override
public void run() {
synchronized (BlockingRpcConnection.this) {
while (!closed) {
if (callsToWrite.isEmpty()) {
// We should use another monitor object here for better performance since the read
// thread also uses ConnectionImpl.this. But this makes the locking schema more
// complicated, can do it later as an optimization.
try {
BlockingRpcConnection.this.wait();
} catch (InterruptedException e) {
// Restore interrupt status
Thread.currentThread().interrupt();
}
// check if we need to quit, so continue the main loop instead of fallback.
continue;
}
Call call = callsToWrite.poll();
if (call.isDone()) {
continue;
}
try (Scope scope = call.span.makeCurrent()) {
writeRequest(call);
} catch (IOException e) {
// exception here means the call has not been added to the pendingCalls yet, so we need
// to fail it by our own.
LOG.debug("call write error for {}", call.toShortString());
call.setException(e);
closeConn(e);
}
}
}
} | 3.68 |
framework_VListSelect_getTabIndex | /**
* Gets the tab index.
*
* @return the tab index
*/
public int getTabIndex() {
return select.getTabIndex();
} | 3.68 |
framework_Profiler_initialize | /**
* Initializes the profiler. This should be done before calling any other
* function in this class. Failing to do so might cause undesired behavior.
* This method has no side effects if the initialization has already been
* done.
* <p>
* Please note that this method should be called even if the profiler is not
* enabled because it will then remove a logger function that might have
* been included in the HTML page and that would leak memory unless removed.
* </p>
*
* @since 7.0.2
*/
public static void initialize() {
if (hasHighPrecisionTime()) {
RELATIVE_TIME_SUPPLIER = new HighResolutionTimeSupplier();
} else {
RELATIVE_TIME_SUPPLIER = new DefaultRelativeTimeSupplier();
}
if (isEnabled()) {
ensureLogger();
} else {
ensureNoLogger();
}
} | 3.68 |
hbase_ZKUtil_listChildrenNoWatch | /**
* Lists the children of the specified znode without setting any watches. Sets no watches at all,
* this method is best effort. Returns an empty list if the node has no children. Returns null if
* the parent node itself does not exist.
* @param zkw zookeeper reference
* @param znode node to get children
* @return list of data of children of specified znode, empty if no children, null if parent does
* not exist
* @throws KeeperException if unexpected zookeeper exception
*/
public static List<String> listChildrenNoWatch(ZKWatcher zkw, String znode)
throws KeeperException {
List<String> children = null;
try {
// List the children without watching
children = zkw.getRecoverableZooKeeper().getChildren(znode, null);
} catch (KeeperException.NoNodeException nne) {
return null;
} catch (InterruptedException ie) {
zkw.interruptedException(ie);
}
return children;
} | 3.68 |
framework_ServerRpcQueue_isLegacyVariableChange | /**
* Checks if the given method invocation represents a Vaadin 6 variable
* change.
*
* @param invocation
* the invocation to check
* @return true if the method invocation is a legacy variable change, false
* otherwise
*/
public static boolean isLegacyVariableChange(MethodInvocation invocation) {
return ApplicationConstants.UPDATE_VARIABLE_METHOD
.equals(invocation.getInterfaceName())
&& ApplicationConstants.UPDATE_VARIABLE_METHOD
.equals(invocation.getMethodName());
} | 3.68 |
hbase_StoreFileScanner_seekBeforeAndSaveKeyToPreviousRow | /**
* Seeks before the seek target cell and saves the location to {@link #previousRow}. If there
* doesn't exist a KV in this file before the seek target cell, reposition the scanner at the
* beginning of the storefile (in preparation to a reseek at or after the seek key) and set the
* {@link #previousRow} to null. If {@link #previousRow} is ever non-null and then transitions to
* being null again via this method, that's because there doesn't exist a row before the seek
* target in the storefile (i.e. we're at the beginning of the storefile)
*/
private void seekBeforeAndSaveKeyToPreviousRow(Cell seekKey) throws IOException {
if (seekCount != null) {
seekCount.increment();
}
if (!hfs.seekBefore(seekKey)) {
// Since the above seek failed, we need to position ourselves back at the start of the
// block or else our reseek might fail. seekTo() cannot return false here as at least
// one seekBefore will have returned true by the time we get here
hfs.seekTo();
this.previousRow = null;
} else {
this.previousRow = hfs.getCell();
}
} | 3.68 |
hbase_ProcedureWALFormat_load | /**
* Load all the procedures in these ProcedureWALFiles, and rebuild the given {@code tracker} if
* needed, i.e, the {@code tracker} is a partial one.
* <p/>
* The method in the give {@code loader} will be called at the end after we load all the
* procedures and construct the hierarchy.
* <p/>
* And we will call the {@link ProcedureStoreTracker#resetModified()} method for the given
* {@code tracker} before returning, as it will be used to track the next proc wal file's modified
* procedures.
*/
public static void load(Iterator<ProcedureWALFile> logs, ProcedureStoreTracker tracker,
Loader loader) throws IOException {
ProcedureWALFormatReader reader = new ProcedureWALFormatReader(tracker, loader);
tracker.setKeepDeletes(true);
// Ignore the last log which is current active log.
while (logs.hasNext()) {
ProcedureWALFile log = logs.next();
log.open();
try {
reader.read(log);
} finally {
log.close();
}
}
reader.finish();
// The tracker is now updated with all the procedures read from the logs
if (tracker.isPartial()) {
tracker.setPartialFlag(false);
}
tracker.resetModified();
tracker.setKeepDeletes(false);
} | 3.68 |
querydsl_StringExpression_upper | /**
* Create a {@code this.toUpperCase()} expression
*
* <p>Get the upper case form</p>
*
* @return this.toUpperCase()
* @see java.lang.String#toUpperCase()
*/
public StringExpression upper() {
if (upper == null) {
upper = Expressions.stringOperation(Ops.UPPER, mixin);
}
return upper;
} | 3.68 |
dubbo_ServiceAnnotationPostProcessor_getServiceAnnotationAttributes | /**
* Get dubbo service annotation class at java-config @bean method
* @return return service annotation attributes map if found, or return null if not found.
*/
private Map<String, Object> getServiceAnnotationAttributes(BeanDefinition beanDefinition) {
if (beanDefinition instanceof AnnotatedBeanDefinition) {
AnnotatedBeanDefinition annotatedBeanDefinition = (AnnotatedBeanDefinition) beanDefinition;
MethodMetadata factoryMethodMetadata = SpringCompatUtils.getFactoryMethodMetadata(annotatedBeanDefinition);
if (factoryMethodMetadata != null) {
// try all dubbo service annotation types
for (Class<? extends Annotation> annotationType : serviceAnnotationTypes) {
if (factoryMethodMetadata.isAnnotated(annotationType.getName())) {
// Since Spring 5.2
// return
// factoryMethodMetadata.getAnnotations().get(annotationType).filterDefaultValues().asMap();
// Compatible with Spring 4.x
Map<String, Object> annotationAttributes =
factoryMethodMetadata.getAnnotationAttributes(annotationType.getName());
return filterDefaultValues(annotationType, annotationAttributes);
}
}
}
}
return null;
} | 3.68 |
pulsar_ClientConfiguration_setTlsTrustCertsFilePath | /**
* Set the path to the trusted TLS certificate file.
*
* @param tlsTrustCertsFilePath
*/
public void setTlsTrustCertsFilePath(String tlsTrustCertsFilePath) {
confData.setTlsTrustCertsFilePath(tlsTrustCertsFilePath);
} | 3.68 |
dubbo_ReflectUtils_desc2name | /**
* desc to name.
* "[[I" => "int[][]"
*
* @param desc desc.
* @return name.
*/
public static String desc2name(String desc) {
StringBuilder sb = new StringBuilder();
int c = desc.lastIndexOf('[') + 1;
if (desc.length() == c + 1) {
switch (desc.charAt(c)) {
case JVM_VOID: {
sb.append("void");
break;
}
case JVM_BOOLEAN: {
sb.append("boolean");
break;
}
case JVM_BYTE: {
sb.append("byte");
break;
}
case JVM_CHAR: {
sb.append("char");
break;
}
case JVM_DOUBLE: {
sb.append("double");
break;
}
case JVM_FLOAT: {
sb.append("float");
break;
}
case JVM_INT: {
sb.append("int");
break;
}
case JVM_LONG: {
sb.append("long");
break;
}
case JVM_SHORT: {
sb.append("short");
break;
}
default:
throw new RuntimeException();
}
} else {
sb.append(desc.substring(c + 1, desc.length() - 1).replace('/', '.'));
}
while (c-- > 0) {
sb.append("[]");
}
return sb.toString();
} | 3.68 |
hadoop_FedBalance_setMap | /**
* Max number of concurrent maps to use for copy.
* @param value the map number of the distcp.
*/
public Builder setMap(int value) {
this.map = value;
return this;
} | 3.68 |
hudi_ExpressionPredicates_fromExpression | /**
* Converts specific call expression to the predicate.
*
* <p>Two steps to bind the call:
* 1. map the predicate instance;
* 2. bind the field reference;
*
* <p>Normalize the expression to simplify the subsequent decision logic:
* always put the literal expression in the RHS.
*
* @param callExpression The call expression to convert.
* @return The converted predicate.
*/
public static Predicate fromExpression(CallExpression callExpression) {
FunctionDefinition functionDefinition = callExpression.getFunctionDefinition();
List<Expression> childExpressions = callExpression.getChildren();
boolean normalized = childExpressions.get(0) instanceof FieldReferenceExpression;
if (BuiltInFunctionDefinitions.NOT.equals(functionDefinition)) {
Not predicate = Not.getInstance();
Predicate childPredicate = fromExpression((CallExpression) childExpressions.get(0));
return predicate.bindPredicate(childPredicate);
}
if (BuiltInFunctionDefinitions.AND.equals(functionDefinition)) {
And predicate = And.getInstance();
Predicate predicate1 = fromExpression((CallExpression) childExpressions.get(0));
Predicate predicate2 = fromExpression((CallExpression) childExpressions.get(1));
return predicate.bindPredicates(predicate1, predicate2);
}
if (BuiltInFunctionDefinitions.OR.equals(functionDefinition)) {
Or predicate = Or.getInstance();
Predicate predicate1 = fromExpression((CallExpression) childExpressions.get(0));
Predicate predicate2 = fromExpression((CallExpression) childExpressions.get(1));
return predicate.bindPredicates(predicate1, predicate2);
}
if (BuiltInFunctionDefinitions.IS_NULL.equals(functionDefinition)
|| BuiltInFunctionDefinitions.IS_NOT_NULL.equals(functionDefinition)
|| childExpressions.stream().anyMatch(e -> e instanceof ValueLiteralExpression
&& getValueFromLiteral((ValueLiteralExpression) e) == null)) {
return AlwaysNull.getInstance();
}
// handle IN specifically
if (BuiltInFunctionDefinitions.IN.equals(functionDefinition)) {
checkState(normalized, "The IN expression expects to be normalized");
In in = In.getInstance();
FieldReferenceExpression fieldReference = (FieldReferenceExpression) childExpressions.get(0);
List<ValueLiteralExpression> valueLiterals = IntStream.range(1, childExpressions.size())
.mapToObj(index -> (ValueLiteralExpression) childExpressions.get(index))
.collect(Collectors.toList());
return in.bindValueLiterals(valueLiterals).bindFieldReference(fieldReference);
}
ColumnPredicate predicate;
// handle binary operators
if (BuiltInFunctionDefinitions.EQUALS.equals(functionDefinition)) {
predicate = Equals.getInstance();
} else if (BuiltInFunctionDefinitions.NOT_EQUALS.equals(functionDefinition)) {
predicate = NotEquals.getInstance();
} else if (BuiltInFunctionDefinitions.LESS_THAN.equals(functionDefinition)) {
predicate = normalized ? LessThan.getInstance() : GreaterThan.getInstance();
} else if (BuiltInFunctionDefinitions.GREATER_THAN.equals(functionDefinition)) {
predicate = normalized ? GreaterThan.getInstance() : LessThan.getInstance();
} else if (BuiltInFunctionDefinitions.LESS_THAN_OR_EQUAL.equals(functionDefinition)) {
predicate = normalized ? LessThanOrEqual.getInstance() : GreaterThanOrEqual.getInstance();
} else if (BuiltInFunctionDefinitions.GREATER_THAN_OR_EQUAL.equals(functionDefinition)) {
predicate = normalized ? GreaterThanOrEqual.getInstance() : LessThanOrEqual.getInstance();
} else {
throw new AssertionError("Unexpected function definition " + functionDefinition);
}
FieldReferenceExpression fieldReference = normalized
? (FieldReferenceExpression) childExpressions.get(0)
: (FieldReferenceExpression) childExpressions.get(1);
ValueLiteralExpression valueLiteral = normalized
? (ValueLiteralExpression) childExpressions.get(1)
: (ValueLiteralExpression) childExpressions.get(0);
return predicate.bindValueLiteral(valueLiteral).bindFieldReference(fieldReference);
} | 3.68 |
hadoop_RecurrenceId_getRunId | /**
* Return the runId for the pipeline job in one run.
*
* @return the runId.
*/
public final String getRunId() {
return runId;
} | 3.68 |
hmily_TarsHmilyConfiguration_tarsHmilyStartupBean | /**
* add TarsHmilyStartup.
*
* @return TarsHmilyStartup
*/
@Bean
public TarsHmilyFilterStartupBean tarsHmilyStartupBean() {
return new TarsHmilyFilterStartupBean();
} | 3.68 |
pulsar_ConsumerConfigurationData_getMaxPendingChuckedMessage | /**
* @deprecated use {@link #getMaxPendingChunkedMessage()}
*/
@Deprecated
public int getMaxPendingChuckedMessage() {
return maxPendingChunkedMessage;
} | 3.68 |
pulsar_SaslRoleToken_getSession | /**
* Returns the authentication mechanism of the token.
*
* @return the authentication mechanism of the token.
*/
public String getSession() {
return session;
} | 3.68 |
rocketmq-connect_ClusterConfigState_tasks | /**
* Get the current set of task IDs for the specified connector.
*
* @param connectorName the name of the connector to look up task configs for
* @return the current set of connector task IDs
*/
public List<ConnectorTaskId> tasks(String connectorName) {
Integer numTasks = connectorTaskCounts.get(connectorName);
if (numTasks == null) {
return Collections.emptyList();
}
List<ConnectorTaskId> taskIds = new ArrayList<>(numTasks);
for (int taskIndex = 0; taskIndex < numTasks; taskIndex++) {
ConnectorTaskId taskId = new ConnectorTaskId(connectorName, taskIndex);
taskIds.add(taskId);
}
return Collections.unmodifiableList(taskIds);
} | 3.68 |
hbase_ScannerContext_checkTimeLimit | /**
* @param checkerScope The scope that the limit is being checked from. The time limit is always
* checked against {@link EnvironmentEdgeManager.currentTime}
* @return true when the limit is enforceable from the checker's scope and it has been reached
*/
boolean checkTimeLimit(LimitScope checkerScope) {
return !skippingRow && hasTimeLimit(checkerScope)
&& (returnImmediately || EnvironmentEdgeManager.currentTime() >= limits.getTime());
} | 3.68 |
querydsl_AbstractHibernateQuery_setCacheable | /**
* Enable caching of this query result set.
* @param cacheable Should the query results be cacheable?
*/
@SuppressWarnings("unchecked")
public Q setCacheable(boolean cacheable) {
this.cacheable = cacheable;
return (Q) this;
} | 3.68 |
hbase_WindowMovingAverage_getNumberOfStatistics | /** Returns number of statistics */
protected int getNumberOfStatistics() {
return lastN.length;
} | 3.68 |
framework_VTree_isSelected | /**
* Is a node selected in the tree.
*
* @param treeNode
* The node to check
* @return
*/
public boolean isSelected(TreeNode treeNode) {
return selectedIds.contains(treeNode.key);
} | 3.68 |
hudi_HoodieAvroDataBlock_deserializeRecords | // TODO (na) - Break down content into smaller chunks of byte [] to be GC as they are used
@Override
protected <T> ClosableIterator<HoodieRecord<T>> deserializeRecords(byte[] content, HoodieRecordType type) throws IOException {
checkState(this.readerSchema != null, "Reader's schema has to be non-null");
checkArgument(type != HoodieRecordType.SPARK, "Not support read avro to spark record");
// TODO AvroSparkReader need
RecordIterator iterator = RecordIterator.getInstance(this, content);
return new CloseableMappingIterator<>(iterator, data -> (HoodieRecord<T>) new HoodieAvroIndexedRecord(data));
} | 3.68 |
flink_TopNBuffer_lastElement | /** Returns the last record of the last Entry in the buffer. */
public RowData lastElement() {
Map.Entry<RowData, Collection<RowData>> last = treeMap.lastEntry();
RowData lastElement = null;
if (last != null) {
Collection<RowData> collection = last.getValue();
lastElement = getLastElement(collection);
}
return lastElement;
} | 3.68 |
flink_AsyncLookupFunction_eval | /** Invokes {@link #asyncLookup} and chains futures. */
public final void eval(CompletableFuture<Collection<RowData>> future, Object... keys) {
GenericRowData keyRow = GenericRowData.of(keys);
asyncLookup(keyRow)
.whenComplete(
(result, exception) -> {
if (exception != null) {
future.completeExceptionally(
new TableException(
String.format(
"Failed to asynchronously lookup entries with key '%s'",
keyRow),
exception));
return;
}
future.complete(result);
});
} | 3.68 |
hadoop_CompressedWritable_ensureInflated | /** Must be called by all methods which access fields to ensure that the data
* has been uncompressed. */
protected void ensureInflated() {
if (compressed != null) {
try {
ByteArrayInputStream deflated = new ByteArrayInputStream(compressed);
DataInput inflater =
new DataInputStream(new InflaterInputStream(deflated));
readFieldsCompressed(inflater);
compressed = null;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
} | 3.68 |
cron-utils_WeekDay_mapTo | /**
* Maps given WeekDay to representation hold by this instance.
*
* @param targetWeekDayDefinition - referred weekDay
* @param dayOfWeek - day of week to be mapped.
* Value corresponds to this instance mapping.
* @return - int result
*/
public int mapTo(final int dayOfWeek, final WeekDay targetWeekDayDefinition) {
if (firstDayZero && targetWeekDayDefinition.isFirstDayZero()) {
return bothSameStartOfRange(0, 6, this, targetWeekDayDefinition).apply(dayOfWeek);
}
if (!firstDayZero && !targetWeekDayDefinition.isFirstDayZero()) {
return bothSameStartOfRange(1, 7, this, targetWeekDayDefinition).apply(dayOfWeek);
}
//start range is different for each case. We need to normalize ranges
if (targetWeekDayDefinition.isFirstDayZero()) {
//my range is 1-7. I normalize ranges, get the "zero" mapping and turn result into original scale
return mapTo(dayOfWeek, new WeekDay(targetWeekDayDefinition.getMondayDoWValue() + 1, false)) - 1;
} else {
//my range is 0-6. I normalize ranges, get the "one" mapping and turn result into original scale
return (mapTo(dayOfWeek, new WeekDay(targetWeekDayDefinition.getMondayDoWValue() - 1, true))) % 7 + 1;
}
} | 3.68 |
hadoop_FileIoProvider_posixFadvise | /**
* Call posix_fadvise on the given file descriptor.
*
* @param volume target volume. null if unavailable.
*/
public void posixFadvise(
@Nullable FsVolumeSpi volume, String identifier, FileDescriptor outFd,
long offset, long length, int flags) throws NativeIOException {
final long begin = profilingEventHook.beforeMetadataOp(volume, FADVISE);
try {
faultInjectorEventHook.beforeMetadataOp(volume, FADVISE);
NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
identifier, outFd, offset, length, flags);
profilingEventHook.afterMetadataOp(volume, FADVISE, begin);
} catch (Exception e) {
onFailure(volume, begin);
throw e;
}
} | 3.68 |
flink_ScopeFormat_asVariable | /**
* Formats the given string to resemble a scope variable.
*
* @param scope The string to format
* @return The formatted string
*/
public static String asVariable(String scope) {
return SCOPE_VARIABLE_PREFIX + scope + SCOPE_VARIABLE_SUFFIX;
} | 3.68 |
flink_MasterHooks_wrapHook | /**
* Wraps a hook such that the user-code classloader is applied when the hook is invoked.
*
* @param hook the hook to wrap
* @param userClassLoader the classloader to use
*/
public static <T> MasterTriggerRestoreHook<T> wrapHook(
MasterTriggerRestoreHook<T> hook, ClassLoader userClassLoader) {
return new WrappedMasterHook<>(hook, userClassLoader);
} | 3.68 |
flink_FeedbackTransformation_getWaitTime | /**
* Returns the wait time. This is the amount of time that the feedback operator keeps listening
* for feedback elements. Once the time expires the operation will close and will not receive
* further elements.
*/
public Long getWaitTime() {
return waitTime;
} | 3.68 |
morf_ArchiveDataSetWriter_reallyClose | /**
* @see java.util.zip.ZipOutputStream#close()
* @throws IOException If the exception is thrown from {@link ZipOutputStream#close()}
*/
public void reallyClose() throws IOException {
super.close();
} | 3.68 |
flink_MemorySegment_getOffHeapBuffer | /**
* Returns the off-heap buffer of memory segments.
*
* @return underlying off-heap buffer
* @throws IllegalStateException if the memory segment does not represent off-heap buffer
*/
public ByteBuffer getOffHeapBuffer() {
if (offHeapBuffer != null) {
return offHeapBuffer;
} else {
throw new IllegalStateException("Memory segment does not represent off-heap buffer");
}
} | 3.68 |
hadoop_ManifestSuccessData_setSuccess | /**
* Set the success flag.
* @param success did the job succeed?
*/
public void setSuccess(boolean success) {
this.success = success;
} | 3.68 |
hbase_LruBlockCache_cacheBlock | /**
* Cache the block with the specified name and buffer.
* <p>
* TODO after HBASE-22005, we may cache an block which allocated from off-heap, but our LRU cache
* sizing is based on heap size, so we should handle this in HBASE-22127. It will introduce an
* switch whether make the LRU on-heap or not, if so we may need copy the memory to on-heap,
* otherwise the caching size is based on off-heap.
* @param cacheKey block's cache key
* @param buf block buffer
*/
@Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
cacheBlock(cacheKey, buf, false);
} | 3.68 |
hadoop_FedBalanceContext_setMapNum | /**
* The map number of the distcp job.
* @param value the map number of the distcp.
* @return the builder.
*/
public Builder setMapNum(int value) {
this.mapNum = value;
return this;
} | 3.68 |
hadoop_FullCredentialsTokenBinding_createTokenIdentifier | /**
* Create a new delegation token.
*
* It's slightly inefficient to create a new one every time, but
* it avoids concurrency problems with managing any singleton.
* @param policy minimum policy to use, if known.
* @param encryptionSecrets encryption secrets.
* @return a DT identifier
* @throws IOException failure
*/
@Override
public AbstractS3ATokenIdentifier createTokenIdentifier(
final Optional<RoleModel.Policy> policy,
final EncryptionSecrets encryptionSecrets,
final Text renewer) throws IOException {
requireServiceStarted();
Preconditions.checkNotNull(
awsCredentials, "No AWS credentials to use for a delegation token");
return new FullCredentialsTokenIdentifier(getCanonicalUri(),
getOwnerText(),
renewer,
awsCredentials,
encryptionSecrets,
credentialOrigin);
} | 3.68 |
hbase_CleanerChore_initCleanerChain | /**
* Instantiate and initialize all the file cleaners set in the configuration
* @param confKey key to get the file cleaner classes from the configuration
*/
private void initCleanerChain(String confKey) {
this.cleanersChain = new ArrayList<>();
String[] cleaners = conf.getStrings(confKey);
if (cleaners != null) {
for (String className : cleaners) {
className = className.trim();
if (className.isEmpty()) {
continue;
}
T logCleaner = newFileCleaner(className, conf);
if (logCleaner != null) {
LOG.info("Initialize cleaner={}", className);
this.cleanersChain.add(logCleaner);
}
}
}
} | 3.68 |
hbase_WALUtil_writeCompactionMarker | /**
* Write the marker that a compaction has succeeded and is about to be committed. This provides
* info to the HMaster to allow it to recover the compaction if this regionserver dies in the
* middle. It also prevents the compaction from finishing if this regionserver has already lost
* its lease on the log.
* <p/>
* This write is for internal use only. Not for external client consumption.
* @param mvcc Used by WAL to get sequence Id for the waledit.
*/
public static WALKeyImpl writeCompactionMarker(WAL wal,
NavigableMap<byte[], Integer> replicationScope, RegionInfo hri, final CompactionDescriptor c,
MultiVersionConcurrencyControl mvcc, RegionReplicationSink sink) throws IOException {
WALKeyImpl walKey =
writeMarker(wal, replicationScope, hri, WALEdit.createCompaction(hri, c), mvcc, null, sink);
if (LOG.isTraceEnabled()) {
LOG.trace("Appended compaction marker " + TextFormat.shortDebugString(c));
}
return walKey;
} | 3.68 |
framework_FocusUtil_setFocus | /**
* Explicitly focus/unfocus the given widget. Only one widget can have focus
* at a time, and the widget that does will receive all keyboard events.
*
* @param focusable
* the widget to focus/unfocus
* @param focus
* whether this widget should take focus or release it
*/
public static void setFocus(Widget focusable, boolean focus) {
assert (focusable != null && focusable
.getElement() != null) : "Can't setFocus for a widget without an element";
if (focus) {
focusable.getElement().focus();
} else {
focusable.getElement().blur();
}
} | 3.68 |
framework_VFilterSelect_selectLastItem | /**
* @deprecated use {@link SuggestionPopup#selectLastItem()} instead.
*/
@Deprecated
public void selectLastItem() {
debug("VFS.SM: selectLastItem()");
List<MenuItem> items = getItems();
MenuItem lastItem = items.get(items.size() - 1);
selectItem(lastItem);
} | 3.68 |
pulsar_AuthorizationProvider_allowNamespaceOperation | /**
* @deprecated - will be removed after 2.12. Use async variant.
*/
@Deprecated
default Boolean allowNamespaceOperation(NamespaceName namespaceName,
String role,
NamespaceOperation operation,
AuthenticationDataSource authData) {
try {
return allowNamespaceOperationAsync(namespaceName, role, operation, authData).get();
} catch (InterruptedException e) {
throw new RestException(e);
} catch (ExecutionException e) {
throw new RestException(e.getCause());
}
} | 3.68 |
hadoop_IOStatisticsBinding_trackDurationOfOperation | /**
* Given an IOException raising callable/lambda expression,
* return a new one which wraps the inner and tracks
* the duration of the operation, including whether
* it passes/fails.
* @param factory factory of duration trackers
* @param statistic statistic key
* @param input input callable.
* @param <B> return type.
* @return a new callable which tracks duration and failure.
*/
public static <B> CallableRaisingIOE<B> trackDurationOfOperation(
@Nullable DurationTrackerFactory factory,
String statistic,
CallableRaisingIOE<B> input) {
return () -> {
// create the tracker outside try-with-resources so
// that failures can be set in the catcher.
DurationTracker tracker = createTracker(factory, statistic);
return invokeTrackingDuration(tracker, input);
};
} | 3.68 |
flink_SegmentsUtil_setInt | /**
* set int from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setInt(MemorySegment[] segments, int offset, int value) {
if (inFirstSegment(segments, offset, 4)) {
segments[0].putInt(offset, value);
} else {
setIntMultiSegments(segments, offset, value);
}
} | 3.68 |
framework_TableQuery_executeQuery | /**
* Executes the given query string using either the active connection if a
* transaction is already open, or a new connection from this query's
* connection pool.
*
* @param sh
* an instance of StatementHelper, containing the query string
* and parameter values.
* @return ResultSet of the query
* @throws SQLException
*/
private ResultSet executeQuery(StatementHelper sh) throws SQLException {
ensureTransaction();
Connection connection = getConnection();
PreparedStatement pstmt = null;
try {
pstmt = connection.prepareStatement(sh.getQueryString());
sh.setParameterValuesToStatement(pstmt);
getLogger().log(Level.FINE, "DB -> {0}", sh.getQueryString());
return pstmt.executeQuery();
} catch (SQLException e) {
releaseConnection(null, pstmt, null);
throw e;
}
} | 3.68 |
hbase_MasterObserver_postDeleteNamespace | /**
* Called after the deleteNamespace operation has been requested.
* @param ctx the environment to interact with the framework and master
* @param namespace the name of the namespace
*/
default void postDeleteNamespace(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String namespace) throws IOException {
} | 3.68 |
morf_AbstractSqlDialectTest_testTemporaryCreateTableStatements | /**
* Tests the SQL for creating tables.
*/
@SuppressWarnings("unchecked")
@Test
public void testTemporaryCreateTableStatements() {
compareStatements(
expectedCreateTemporaryTableStatements(),
testDialect.tableDeploymentStatements(testTempTable),
testDialect.tableDeploymentStatements(alternateTestTempTable),
testDialect.tableDeploymentStatements(nonNullTempTable));
} | 3.68 |
flink_BootstrapTransformation_getMaxParallelism | /** @return The max parallelism for this operator. */
int getMaxParallelism(int globalMaxParallelism) {
return operatorMaxParallelism.orElse(globalMaxParallelism);
} | 3.68 |
hadoop_FederationUtil_newFileSubclusterResolver | /**
* Creates an instance of a FileSubclusterResolver from the configuration.
*
* @param conf Configuration that defines the file resolver class.
* @param router Router service.
* @return New file subcluster resolver.
*/
public static FileSubclusterResolver newFileSubclusterResolver(
Configuration conf, Router router) {
Class<? extends FileSubclusterResolver> clazz = conf.getClass(
RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS,
RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT,
FileSubclusterResolver.class);
return newInstance(conf, router, Router.class, clazz);
} | 3.68 |
hudi_HoodieCombineHiveInputFormat_getPartitionFromPath | /**
* HiveFileFormatUtils.getPartitionDescFromPathRecursively is no longer available since Hive 3.
* This method is to make it compatible with both Hive 2 and Hive 3.
* @param pathToPartitionInfo
* @param dir
* @param cacheMap
* @return
* @throws IOException
*/
private static PartitionDesc getPartitionFromPath(Map<Path, PartitionDesc> pathToPartitionInfo, Path dir,
Map<Map<Path, PartitionDesc>, Map<Path, PartitionDesc>> cacheMap)
throws IOException {
Method method;
try {
Class<?> hiveUtilsClass = Class.forName("org.apache.hadoop.hive.ql.io.HiveFileFormatUtils");
try {
// HiveFileFormatUtils.getPartitionDescFromPathRecursively method only available in Hive 2.x
method = hiveUtilsClass.getMethod("getPartitionDescFromPathRecursively", Map.class, Path.class, Map.class);
} catch (NoSuchMethodException e) {
// HiveFileFormatUtils.getFromPathRecursively method only available in Hive 3.x
method = hiveUtilsClass.getMethod("getFromPathRecursively", Map.class, Path.class, Map.class);
}
return (PartitionDesc) method.invoke(null, pathToPartitionInfo, dir, cacheMap);
} catch (ReflectiveOperationException e) {
throw new IOException(e);
}
} | 3.68 |
streampipes_AssetLinkBuilder_create | /**
* Static method to create a new instance of AssetLinkBuilder.
*
* @return A new instance of AssetLinkBuilder.
*/
public static AssetLinkBuilder create() {
return new AssetLinkBuilder();
} | 3.68 |
hbase_ZKUtil_getNodeName | /**
* Get the name of the current node from the specified fully-qualified path.
* @param path fully-qualified path
* @return name of the current node
*/
public static String getNodeName(String path) {
return path.substring(path.lastIndexOf("/") + 1);
} | 3.68 |
hudi_UtilHelpers_tableExists | /**
* Returns true if the table already exists in the JDBC database.
*/
private static Boolean tableExists(Connection conn, Map<String, String> options) throws SQLException {
JdbcDialect dialect = JdbcDialects.get(options.get(JDBCOptions.JDBC_URL()));
try (PreparedStatement statement = conn.prepareStatement(dialect.getTableExistsQuery(options.get(JDBCOptions.JDBC_TABLE_NAME())))) {
statement.setQueryTimeout(Integer.parseInt(options.get(JDBCOptions.JDBC_QUERY_TIMEOUT())));
statement.executeQuery();
}
return true;
} | 3.68 |
framework_VTransferable_setDragSource | /**
* Sets the component currently being dragged or from which the transferable
* is created (e.g. a tree which node is dragged).
* <p>
* The server side counterpart of the component may implement
* {@link DragSource} interface if it wants to translate or complement the
* server side instance of this Transferable.
*
* @param component
* the component to set
*/
public void setDragSource(ComponentConnector component) {
this.component = component;
} | 3.68 |
hadoop_TFile_getRecordNum | /**
* Get the RecordNum corresponding to the entry pointed by the cursor.
* @return The RecordNum corresponding to the entry pointed by the cursor.
* @throws IOException raised on errors performing I/O.
*/
public long getRecordNum() throws IOException {
return reader.getRecordNumByLocation(currentLocation);
} | 3.68 |
rocketmq-connect_ProcessingContext_sourceRecord | /**
* Set the source record being processed in the connect pipeline.
*
* @param record the source record
*/
public void sourceRecord(ConnectRecord record) {
this.sourceRecord = record;
reset();
} | 3.68 |
flink_DataSinkTask_initInputReaders | /**
* Initializes the input readers of the DataSinkTask.
*
* @throws RuntimeException Thrown in case of invalid task input configuration.
*/
@SuppressWarnings("unchecked")
private void initInputReaders() throws Exception {
int numGates = 0;
// ---------------- create the input readers ---------------------
// in case where a logical input unions multiple physical inputs, create a union reader
final int groupSize = this.config.getGroupSize(0);
numGates += groupSize;
if (groupSize == 1) {
// non-union case
inputReader =
new MutableRecordReader<DeserializationDelegate<IT>>(
getEnvironment().getInputGate(0),
getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else if (groupSize > 1) {
// union case
inputReader =
new MutableRecordReader<IOReadableWritable>(
new UnionInputGate(getEnvironment().getAllInputGates()),
getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else {
throw new Exception("Illegal input group size in task configuration: " + groupSize);
}
this.inputTypeSerializerFactory =
this.config.getInputSerializer(0, getUserCodeClassLoader());
@SuppressWarnings({"rawtypes"})
final MutableObjectIterator<?> iter =
new ReaderIterator(inputReader, this.inputTypeSerializerFactory.getSerializer());
this.reader = (MutableObjectIterator<IT>) iter;
// final sanity check
if (numGates != this.config.getNumInputs()) {
throw new Exception(
"Illegal configuration: Number of input gates and group sizes are not consistent.");
}
} | 3.68 |
flink_ExceptionUtils_firstOrSuppressed | /**
* Adds a new exception as a {@link Throwable#addSuppressed(Throwable) suppressed exception} to
* a prior exception, or returns the new exception, if no prior exception exists.
*
* <pre>{@code
* public void closeAllThings() throws Exception {
* Exception ex = null;
* try {
* component.shutdown();
* } catch (Exception e) {
* ex = firstOrSuppressed(e, ex);
* }
* try {
* anotherComponent.stop();
* } catch (Exception e) {
* ex = firstOrSuppressed(e, ex);
* }
* try {
* lastComponent.shutdown();
* } catch (Exception e) {
* ex = firstOrSuppressed(e, ex);
* }
*
* if (ex != null) {
* throw ex;
* }
* }
* }</pre>
*
* @param newException The newly occurred exception
* @param previous The previously occurred exception, possibly null.
* @return The new exception, if no previous exception exists, or the previous exception with
* the new exception in the list of suppressed exceptions.
*/
public static <T extends Throwable> T firstOrSuppressed(T newException, @Nullable T previous) {
checkNotNull(newException, "newException");
if (previous == null || previous == newException) {
return newException;
} else {
previous.addSuppressed(newException);
return previous;
}
} | 3.68 |
hadoop_MultipleOutputFormat_generateActualKey | /**
* Generate the actual key from the given key/value. The default behavior is that
* the actual key is equal to the given key
*
* @param key
* the key of the output data
* @param value
* the value of the output data
* @return the actual key derived from the given key/value
*/
protected K generateActualKey(K key, V value) {
return key;
} | 3.68 |
hudi_BaseCommitActionExecutor_saveWorkloadProfileMetadataToInflight | /**
* Save the workload profile in an intermediate file (here re-using commit files) This is useful when performing
* rollback for MOR tables. Only updates are recorded in the workload profile metadata since updates to log blocks
* are unknown across batches Inserts (which are new parquet files) are rolled back based on commit time. // TODO :
* Create a new WorkloadProfile metadata file instead of using HoodieCommitMetadata
*/
void saveWorkloadProfileMetadataToInflight(WorkloadProfile profile, String instantTime)
throws HoodieCommitException {
try {
HoodieCommitMetadata metadata = new HoodieCommitMetadata();
profile.getOutputPartitionPaths().forEach(path -> {
WorkloadStat partitionStat = profile.getOutputWorkloadStat(path);
HoodieWriteStat insertStat = new HoodieWriteStat();
insertStat.setNumInserts(partitionStat.getNumInserts());
insertStat.setFileId("");
insertStat.setPrevCommit(HoodieWriteStat.NULL_COMMIT);
metadata.addWriteStat(path, insertStat);
Map<String, Pair<String, Long>> updateLocationMap = partitionStat.getUpdateLocationToCount();
Map<String, Pair<String, Long>> insertLocationMap = partitionStat.getInsertLocationToCount();
Stream.concat(updateLocationMap.keySet().stream(), insertLocationMap.keySet().stream())
.distinct()
.forEach(fileId -> {
HoodieWriteStat writeStat = new HoodieWriteStat();
writeStat.setFileId(fileId);
Pair<String, Long> updateLocation = updateLocationMap.get(fileId);
Pair<String, Long> insertLocation = insertLocationMap.get(fileId);
// TODO : Write baseCommitTime is possible here ?
writeStat.setPrevCommit(updateLocation != null ? updateLocation.getKey() : insertLocation.getKey());
if (updateLocation != null) {
writeStat.setNumUpdateWrites(updateLocation.getValue());
}
if (insertLocation != null) {
writeStat.setNumInserts(insertLocation.getValue());
}
metadata.addWriteStat(path, writeStat);
});
});
metadata.setOperationType(operationType);
HoodieActiveTimeline activeTimeline = table.getActiveTimeline();
String commitActionType = getCommitActionType();
HoodieInstant requested = new HoodieInstant(State.REQUESTED, commitActionType, instantTime);
activeTimeline.transitionRequestedToInflight(
requested,
serializeCommitMetadata(metadata),
config.shouldAllowMultiWriteOnSameInstant());
} catch (IOException io) {
throw new HoodieCommitException("Failed to commit " + instantTime + " unable to save inflight metadata ", io);
}
} | 3.68 |
hadoop_BlockManager_get | /**
* Gets the block having the given {@code blockNumber}.
*
* The entire block is read into memory and returned as a {@code BufferData}.
* The blocks are treated as a limited resource and must be released when
* one is done reading them.
*
* @param blockNumber the number of the block to be read and returned.
* @return {@code BufferData} having data from the given block.
*
* @throws IOException if there an error reading the given block.
* @throws IllegalArgumentException if blockNumber is negative.
*/
public BufferData get(int blockNumber) throws IOException {
checkNotNegative(blockNumber, "blockNumber");
int size = blockData.getSize(blockNumber);
ByteBuffer buffer = ByteBuffer.allocate(size);
long startOffset = blockData.getStartOffset(blockNumber);
read(buffer, startOffset, size);
buffer.flip();
return new BufferData(blockNumber, buffer);
} | 3.68 |
framework_ContainerEventProvider_listenToContainerEvents | /**
* Attaches listeners to the container so container events can be processed
*/
private void listenToContainerEvents() {
if (container instanceof ItemSetChangeNotifier) {
((ItemSetChangeNotifier) container).addItemSetChangeListener(this);
}
if (container instanceof ValueChangeNotifier) {
((ValueChangeNotifier) container).addValueChangeListener(this);
}
} | 3.68 |
flink_ZooKeeperStateHandleStore_get | /**
* Gets a state handle from ZooKeeper and optionally locks it.
*
* @param pathInZooKeeper Path in ZooKeeper to get the state handle from
* @param lock True if we should lock the node; otherwise false
* @return The state handle
* @throws IOException Thrown if the method failed to deserialize the stored state handle
* @throws Exception Thrown if a ZooKeeper operation failed
*/
private RetrievableStateHandle<T> get(String pathInZooKeeper, boolean lock) throws Exception {
checkNotNull(pathInZooKeeper, "Path in ZooKeeper");
final String path = normalizePath(pathInZooKeeper);
if (lock) {
// try to lock the node
try {
client.create().withMode(CreateMode.EPHEMERAL).forPath(getInstanceLockPath(path));
} catch (KeeperException.NodeExistsException ignored) {
// we have already created the lock
} catch (KeeperException.NoNodeException ex) {
// We could run into this exception because the parent node does not exist when we
// are trying to lock.
// We wrap the exception here so that it could be caught in DefaultJobGraphStore
throw new NotExistException("ZooKeeper node " + path + " does not exist.", ex);
}
}
boolean success = false;
try {
byte[] data = client.getData().forPath(path);
RetrievableStateHandle<T> retrievableStateHandle = deserialize(data);
success = true;
return retrievableStateHandle;
} catch (KeeperException.NoNodeException ex) {
// We wrap the exception here so that it could be caught in DefaultJobGraphStore
throw new NotExistException("ZooKeeper node " + path + " does not exist.", ex);
} catch (IOException | ClassNotFoundException e) {
throw new IOException(
"Failed to deserialize state handle from ZooKeeper data from " + path + '.', e);
} finally {
if (!success && lock) {
// release the lock
release(path);
}
}
} | 3.68 |
hbase_ZKWatcher_checkAndSetZNodeAcls | /**
* On master start, we check the znode ACLs under the root directory and set the ACLs properly if
* needed. If the cluster goes from an unsecure setup to a secure setup, this step is needed so
* that the existing znodes created with open permissions are now changed with restrictive perms.
*/
public void checkAndSetZNodeAcls() {
if (!ZKAuthentication.isSecureZooKeeper(getConfiguration())) {
LOG.info("not a secure deployment, proceeding");
return;
}
// Check the base znodes permission first. Only do the recursion if base znode's perms are not
// correct.
try {
List<ACL> actualAcls = recoverableZooKeeper.getAcl(znodePaths.baseZNode, new Stat());
if (!isBaseZnodeAclSetup(actualAcls)) {
LOG.info("setting znode ACLs");
setZnodeAclsRecursive(znodePaths.baseZNode);
}
} catch (KeeperException.NoNodeException nne) {
return;
} catch (InterruptedException ie) {
interruptedExceptionNoThrow(ie, false);
} catch (IOException | KeeperException e) {
LOG.warn("Received exception while checking and setting zookeeper ACLs", e);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.