name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
pulsar_NettyChannelUtil_writeAndFlushWithClosePromise | /**
* Write and flush the message to the channel and the close the channel.
*
* This method is particularly helpful when the connection is in an invalid state
* and therefore a new connection must be created to continue.
*
* @param ctx channel's context
* @param msg buffer to write in the channel
*/
public static void writeAndFlushWithClosePromise(ChannelOutboundInvoker ctx, ByteBuf msg) {
ctx.writeAndFlush(msg).addListener(ChannelFutureListener.CLOSE);
} | 3.68 |
hbase_WALPrettyPrinter_disableJSON | /**
* turns JSON output off, and turns on "pretty strings" for human consumption
*/
public void disableJSON() {
outputJSON = false;
} | 3.68 |
flink_BulkIterationNode_setNextPartialSolution | /**
* Sets the nextPartialSolution for this BulkIterationNode.
*
* @param nextPartialSolution The nextPartialSolution to set.
*/
public void setNextPartialSolution(
OptimizerNode nextPartialSolution, OptimizerNode terminationCriterion) {
// check if the root of the step function has the same parallelism as the iteration
// or if the step function has any operator at all
if (nextPartialSolution.getParallelism() != getParallelism()
|| nextPartialSolution == partialSolution
|| nextPartialSolution instanceof BinaryUnionNode) {
// add a no-op to the root to express the re-partitioning
NoOpNode noop = new NoOpNode();
noop.setParallelism(getParallelism());
DagConnection noOpConn =
new DagConnection(nextPartialSolution, noop, ExecutionMode.PIPELINED);
noop.setIncomingConnection(noOpConn);
nextPartialSolution.addOutgoingConnection(noOpConn);
nextPartialSolution = noop;
}
this.nextPartialSolution = nextPartialSolution;
this.terminationCriterion = terminationCriterion;
if (terminationCriterion == null) {
this.singleRoot = nextPartialSolution;
this.rootConnection = new DagConnection(nextPartialSolution, ExecutionMode.PIPELINED);
} else {
// we have a termination criterion
SingleRootJoiner singleRootJoiner = new SingleRootJoiner();
this.rootConnection =
new DagConnection(
nextPartialSolution, singleRootJoiner, ExecutionMode.PIPELINED);
this.terminationCriterionRootConnection =
new DagConnection(
terminationCriterion, singleRootJoiner, ExecutionMode.PIPELINED);
singleRootJoiner.setInputs(
this.rootConnection, this.terminationCriterionRootConnection);
this.singleRoot = singleRootJoiner;
// add connection to terminationCriterion for interesting properties visitor
terminationCriterion.addOutgoingConnection(terminationCriterionRootConnection);
}
nextPartialSolution.addOutgoingConnection(rootConnection);
} | 3.68 |
flink_DataSet_flatMap | /**
* Applies a FlatMap transformation on a {@link DataSet}.
*
* <p>The transformation calls a {@link
* org.apache.flink.api.common.functions.RichFlatMapFunction} for each element of the DataSet.
* Each FlatMapFunction call can return any number of elements including none.
*
* @param flatMapper The FlatMapFunction that is called for each element of the DataSet.
* @return A FlatMapOperator that represents the transformed DataSet.
* @see org.apache.flink.api.common.functions.RichFlatMapFunction
* @see FlatMapOperator
* @see DataSet
*/
public <R> FlatMapOperator<T, R> flatMap(FlatMapFunction<T, R> flatMapper) {
if (flatMapper == null) {
throw new NullPointerException("FlatMap function must not be null.");
}
String callLocation = Utils.getCallLocationName();
TypeInformation<R> resultType =
TypeExtractor.getFlatMapReturnTypes(flatMapper, getType(), callLocation, true);
return new FlatMapOperator<>(this, resultType, clean(flatMapper), callLocation);
} | 3.68 |
flink_Pool_pollEntry | /** Gets the next cached entry. This blocks until the next entry is available. */
public T pollEntry() throws InterruptedException {
return pool.take();
} | 3.68 |
hadoop_ResourceVector_decrement | /**
* Decrements the given resource by the specified value.
* @param resourceName name of the resource
* @param value value to be subtracted from the resource's current value
*/
public void decrement(String resourceName, double value) {
setValue(resourceName, getValue(resourceName) - value);
} | 3.68 |
framework_Slot_getCaptionElement | /**
* Get the slots caption element.
*
* @return the caption element or {@code null} if there is no caption
*/
@SuppressWarnings("deprecation")
public com.google.gwt.user.client.Element getCaptionElement() {
return DOM.asOld(caption);
} | 3.68 |
pulsar_ResourceUnitRanking_calculateBrokerMaxCapacity | /**
* Estimate the maximum number namespace bundles a ResourceUnit is able to handle with all resource.
*/
public static long calculateBrokerMaxCapacity(SystemResourceUsage systemResourceUsage, ResourceQuota defaultQuota) {
double bandwidthOutLimit = systemResourceUsage.bandwidthOut.limit * KBITS_TO_BYTES;
double bandwidthInLimit = systemResourceUsage.bandwidthIn.limit * KBITS_TO_BYTES;
long capacity = calculateBrokerCapacity(defaultQuota, systemResourceUsage.cpu.limit,
systemResourceUsage.memory.limit, bandwidthOutLimit, bandwidthInLimit);
return capacity;
} | 3.68 |
hadoop_Quota_getQuotaRemoteLocations | /**
* Get all quota remote locations across subclusters under given
* federation path.
* @param path Federation path.
* @return List of quota remote locations.
* @throws IOException
*/
private List<RemoteLocation> getQuotaRemoteLocations(String path)
throws IOException {
List<RemoteLocation> locations = new ArrayList<>();
RouterQuotaManager manager = this.router.getQuotaManager();
if (manager != null) {
Set<String> childrenPaths = manager.getPaths(path);
for (String childPath : childrenPaths) {
locations.addAll(
rpcServer.getLocationsForPath(childPath, false, false));
}
}
if (locations.size() >= 1) {
return locations;
} else {
locations.addAll(rpcServer.getLocationsForPath(path, false, false));
return locations;
}
} | 3.68 |
framework_PointerCancelEvent_getType | /**
* Gets the event type associated with pointer cancel events.
*
* @return the handler type
*/
public static Type<PointerCancelHandler> getType() {
return TYPE;
} | 3.68 |
hbase_ExplicitColumnTracker_checkColumn | /**
* {@inheritDoc}
*/
@Override
public ScanQueryMatcher.MatchCode checkColumn(Cell cell, byte type) {
// delete markers should never be passed to an
// *Explicit*ColumnTracker
assert !PrivateCellUtil.isDelete(type);
do {
// No more columns left, we are done with this query
if (done()) {
return ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW; // done_row
}
// No more columns to match against, done with storefile
if (this.column == null) {
return ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW; // done_row
}
// Compare specific column to current column
int ret = CellUtil.compareQualifiers(cell, column.getBuffer(), column.getOffset(),
column.getLength());
// Column Matches. Return include code. The caller would call checkVersions
// to limit the number of versions.
if (ret == 0) {
return ScanQueryMatcher.MatchCode.INCLUDE;
}
resetTS();
if (ret < 0) {
// The current KV is smaller than the column the ExplicitColumnTracker
// is interested in, so seek to that column of interest.
return ScanQueryMatcher.MatchCode.SEEK_NEXT_COL;
}
// The current KV is bigger than the column the ExplicitColumnTracker
// is interested in. That means there is no more data for the column
// of interest. Advance the ExplicitColumnTracker state to next
// column of interest, and check again.
if (ret > 0) {
++this.index;
if (done()) {
// No more to match, do not include, done with this row.
return ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW; // done_row
}
// This is the recursive case.
this.column = this.columns[this.index];
}
} while (true);
} | 3.68 |
AreaShop_FileManager_removeGroup | /**
* Remove a group.
* @param group Group to remove
*/
public void removeGroup(RegionGroup group) {
groups.remove(group.getLowerCaseName());
groupsConfig.set(group.getLowerCaseName(), null);
saveGroupsIsRequired();
} | 3.68 |
hadoop_YarnVersionInfo_getVersion | /**
* Get the YARN version.
* @return the YARN version string, eg. "0.6.3-dev"
*/
public static String getVersion() {
return YARN_VERSION_INFO._getVersion();
} | 3.68 |
hudi_OptionsResolver_hasReadCommitsLimit | /**
* Returns whether the read commits limit is specified.
*/
public static boolean hasReadCommitsLimit(Configuration conf) {
return conf.contains(FlinkOptions.READ_COMMITS_LIMIT);
} | 3.68 |
framework_AbstractSingleSelect_updateSelectedItemState | /**
* This method updates the shared selection state of the
* {@code AbstractSingleSelect}.
*
* @param value
* the value that is selected; may be {@code null}
*
* @since 8.5
*/
protected void updateSelectedItemState(T value) {
// FIXME: If selecting a value that does not exist, this will leave and
// extra object in the key mapper that will not be dropped any time.
getState().selectedItemKey = value != null
? getDataCommunicator().getKeyMapper().key(value)
: null;
} | 3.68 |
framework_ErrorHandlingRunnable_processException | /**
* Process the given exception in the context of the given runnable. If the
* runnable extends {@link ErrorHandlingRunnable}, then the exception is
* passed to {@link #handleError(Exception)} and null is returned. If
* {@link #handleError(Exception)} throws an exception, that exception is
* returned. If the runnable does not extend {@link ErrorHandlingRunnable},
* then the original exception is returned.
*
* @since 8.7
* @param runnable
* the runnable for which the exception should be processed, not
* <code>null</code>
* @param exception
* the exception to process, not <code>null</code>
* @return the resulting exception, or <code>null</code> if the exception is
* fully processed
*/
public static Exception processException(Runnable runnable,
Exception exception) {
Objects.requireNonNull(runnable, "The runnable cannot be null.");
if (runnable instanceof ErrorHandlingRunnable) {
ErrorHandlingRunnable errorHandlingRunnable = (ErrorHandlingRunnable) runnable;
try {
errorHandlingRunnable.handleError(exception);
return null;
} catch (Exception exceptionFromHandler) {
return exceptionFromHandler;
}
}
return exception;
} | 3.68 |
pulsar_SchemaDefinition_builder | /**
* Get a new builder instance that can used to configure and build a {@link SchemaDefinition} instance.
*
* @return the {@link SchemaDefinition}
*/
static <T> SchemaDefinitionBuilder<T> builder() {
return DefaultImplementation.getDefaultImplementation().newSchemaDefinitionBuilder();
} | 3.68 |
flink_NetUtils_ipAddressToUrlString | /**
* Encodes an IP address properly as a URL string. This method makes sure that IPv6 addresses
* have the proper formatting to be included in URLs.
*
* @param address The IP address to encode.
* @return The proper URL string encoded IP address.
*/
public static String ipAddressToUrlString(InetAddress address) {
if (address == null) {
throw new NullPointerException("address is null");
} else if (address instanceof Inet4Address) {
return address.getHostAddress();
} else if (address instanceof Inet6Address) {
return getIPv6UrlRepresentation((Inet6Address) address);
} else {
throw new IllegalArgumentException("Unrecognized type of InetAddress: " + address);
}
} | 3.68 |
hadoop_DBNameNodeConnector_getVolumeInfoFromStorageReports | /**
* Reads the relevant fields from each storage volume and populate the
* DiskBalancer Node.
*
* @param node - Disk Balancer Node
* @param reports - Array of StorageReport
*/
private void getVolumeInfoFromStorageReports(DiskBalancerDataNode node,
StorageReport[] reports)
throws Exception {
Preconditions.checkNotNull(node);
Preconditions.checkNotNull(reports);
for (StorageReport report : reports) {
DatanodeStorage storage = report.getStorage();
DiskBalancerVolume volume = new DiskBalancerVolume();
volume.setCapacity(report.getCapacity());
volume.setFailed(report.isFailed());
volume.setUsed(report.getDfsUsed());
// TODO : Should we do BlockPool level balancing at all ?
// Does it make sense ? Balancer does do that. Right now
// we only deal with volumes and not blockPools
volume.setUuid(storage.getStorageID());
// we will skip this volume for disk balancer if
// it is read-only since we will not be able to delete
// or if it is already failed.
volume.setSkip((storage.getState() == DatanodeStorage.State
.READ_ONLY_SHARED) || report.isFailed());
volume.setStorageType(storage.getStorageType().name());
volume.setIsTransient(storage.getStorageType().isTransient());
node.addVolume(volume);
}
} | 3.68 |
dubbo_ModuleServiceRepository_registerService | /**
* See {@link #registerService(Class)}
* <p>
* we assume:
* 1. services with different interfaces are not allowed to have the same path.
* 2. services share the same interface but has different group/version can share the same path.
* 3. path's default value is the name of the interface.
*
* @param path
* @param interfaceClass
* @return
*/
public ServiceDescriptor registerService(String path, Class<?> interfaceClass) {
ServiceDescriptor serviceDescriptor = registerService(interfaceClass);
// if path is different with interface name, add extra path mapping
if (!interfaceClass.getName().equals(path)) {
List<ServiceDescriptor> serviceDescriptors =
ConcurrentHashMapUtils.computeIfAbsent(services, path, _k -> new CopyOnWriteArrayList<>());
synchronized (serviceDescriptors) {
Optional<ServiceDescriptor> previous = serviceDescriptors.stream()
.filter(s -> s.getServiceInterfaceClass().equals(serviceDescriptor.getServiceInterfaceClass()))
.findFirst();
if (previous.isPresent()) {
return previous.get();
} else {
serviceDescriptors.add(serviceDescriptor);
return serviceDescriptor;
}
}
}
return serviceDescriptor;
} | 3.68 |
shardingsphere-elasticjob_JobRegistry_addJobInstance | /**
* Add job instance.
*
* @param jobName job name
* @param jobInstance job instance
*/
public void addJobInstance(final String jobName, final JobInstance jobInstance) {
jobInstanceMap.put(jobName, jobInstance);
} | 3.68 |
morf_ExistingViewStateLoader_getViewsToDrop | /**
* @return the views which need to be dropped.
*/
public Collection<View> getViewsToDrop() {
return viewsToDrop;
} | 3.68 |
flink_FileInputFormat_getSplitLength | /**
* Gets the length or remaining length of the current split.
*
* @return The length or remaining length of the current split.
*/
public long getSplitLength() {
return splitLength;
} | 3.68 |
hbase_HBackupFileSystem_checkImageManifestExist | /**
* Check whether the backup image path and there is manifest file in the path.
* @param backupManifestMap If all the manifests are found, then they are put into this map
* @param tableArray the tables involved
* @throws IOException exception
*/
public static void checkImageManifestExist(HashMap<TableName, BackupManifest> backupManifestMap,
TableName[] tableArray, Configuration conf, Path backupRootPath, String backupId)
throws IOException {
for (TableName tableName : tableArray) {
BackupManifest manifest = getManifest(conf, backupRootPath, backupId);
backupManifestMap.put(tableName, manifest);
}
} | 3.68 |
hbase_ExportSnapshot_doWork | /**
* Execute the export snapshot by copying the snapshot metadata, hfiles and wals.
* @return 0 on success, and != 0 upon failure.
*/
@Override
public int doWork() throws IOException {
Configuration conf = getConf();
// Check user options
if (snapshotName == null) {
System.err.println("Snapshot name not provided.");
LOG.error("Use -h or --help for usage instructions.");
return 0;
}
if (outputRoot == null) {
System.err
.println("Destination file-system (--" + Options.COPY_TO.getLongOpt() + ") not provided.");
LOG.error("Use -h or --help for usage instructions.");
return 0;
}
if (targetName == null) {
targetName = snapshotName;
}
if (inputRoot == null) {
inputRoot = CommonFSUtils.getRootDir(conf);
} else {
CommonFSUtils.setRootDir(conf, inputRoot);
}
Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX);
srcConf.setBoolean("fs." + inputRoot.toUri().getScheme() + ".impl.disable.cache", true);
FileSystem inputFs = FileSystem.get(inputRoot.toUri(), srcConf);
Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX);
destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true);
FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf);
boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false)
|| conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null;
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot);
Path snapshotTmpDir =
SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot, destConf);
Path outputSnapshotDir =
SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot);
Path initialOutputSnapshotDir = skipTmp ? outputSnapshotDir : snapshotTmpDir;
LOG.debug("inputFs={}, inputRoot={}", inputFs.getUri().toString(), inputRoot);
LOG.debug("outputFs={}, outputRoot={}, skipTmp={}, initialOutputSnapshotDir={}", outputFs,
outputRoot.toString(), skipTmp, initialOutputSnapshotDir);
// Verify snapshot source before copying files
if (verifySource) {
LOG.info("Verify snapshot source, inputFs={}, inputRoot={}, snapshotDir={}.",
inputFs.getUri(), inputRoot, snapshotDir);
verifySnapshot(srcConf, inputFs, inputRoot, snapshotDir);
}
// Find the necessary directory which need to change owner and group
Path needSetOwnerDir = SnapshotDescriptionUtils.getSnapshotRootDir(outputRoot);
if (outputFs.exists(needSetOwnerDir)) {
if (skipTmp) {
needSetOwnerDir = outputSnapshotDir;
} else {
needSetOwnerDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(outputRoot, destConf);
if (outputFs.exists(needSetOwnerDir)) {
needSetOwnerDir = snapshotTmpDir;
}
}
}
// Check if the snapshot already exists
if (outputFs.exists(outputSnapshotDir)) {
if (overwrite) {
if (!outputFs.delete(outputSnapshotDir, true)) {
System.err.println("Unable to remove existing snapshot directory: " + outputSnapshotDir);
return 1;
}
} else {
System.err.println("The snapshot '" + targetName + "' already exists in the destination: "
+ outputSnapshotDir);
return 1;
}
}
if (!skipTmp) {
// Check if the snapshot already in-progress
if (outputFs.exists(snapshotTmpDir)) {
if (overwrite) {
if (!outputFs.delete(snapshotTmpDir, true)) {
System.err
.println("Unable to remove existing snapshot tmp directory: " + snapshotTmpDir);
return 1;
}
} else {
System.err
.println("A snapshot with the same name '" + targetName + "' may be in-progress");
System.err
.println("Please check " + snapshotTmpDir + ". If the snapshot has completed, ");
System.err
.println("consider removing " + snapshotTmpDir + " by using the -overwrite option");
return 1;
}
}
}
// Step 1 - Copy fs1:/.snapshot/<snapshot> to fs2:/.snapshot/.tmp/<snapshot>
// The snapshot references must be copied before the hfiles otherwise the cleaner
// will remove them because they are unreferenced.
List<Path> travesedPaths = new ArrayList<>();
boolean copySucceeded = false;
try {
LOG.info("Copy Snapshot Manifest from " + snapshotDir + " to " + initialOutputSnapshotDir);
travesedPaths =
FSUtils.copyFilesParallel(inputFs, snapshotDir, outputFs, initialOutputSnapshotDir, conf,
conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS));
copySucceeded = true;
} catch (IOException e) {
throw new ExportSnapshotException("Failed to copy the snapshot directory: from=" + snapshotDir
+ " to=" + initialOutputSnapshotDir, e);
} finally {
if (copySucceeded) {
if (filesUser != null || filesGroup != null) {
LOG.warn(
(filesUser == null ? "" : "Change the owner of " + needSetOwnerDir + " to " + filesUser)
+ (filesGroup == null
? ""
: ", Change the group of " + needSetOwnerDir + " to " + filesGroup));
setOwnerParallel(outputFs, filesUser, filesGroup, conf, travesedPaths);
}
if (filesMode > 0) {
LOG.warn("Change the permission of " + needSetOwnerDir + " to " + filesMode);
setPermissionParallel(outputFs, (short) filesMode, travesedPaths, conf);
}
}
}
// Write a new .snapshotinfo if the target name is different from the source name or we want to
// reset TTL for target snapshot.
if (!targetName.equals(snapshotName) || resetTtl) {
SnapshotDescription.Builder snapshotDescBuilder =
SnapshotDescriptionUtils.readSnapshotInfo(inputFs, snapshotDir).toBuilder();
if (!targetName.equals(snapshotName)) {
snapshotDescBuilder.setName(targetName);
}
if (resetTtl) {
snapshotDescBuilder.setTtl(HConstants.DEFAULT_SNAPSHOT_TTL);
}
SnapshotDescriptionUtils.writeSnapshotInfo(snapshotDescBuilder.build(),
initialOutputSnapshotDir, outputFs);
if (filesUser != null || filesGroup != null) {
outputFs.setOwner(
new Path(initialOutputSnapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), filesUser,
filesGroup);
}
if (filesMode > 0) {
outputFs.setPermission(
new Path(initialOutputSnapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE),
new FsPermission((short) filesMode));
}
}
// Step 2 - Start MR Job to copy files
// The snapshot references must be copied before the files otherwise the files gets removed
// by the HFileArchiver, since they have no references.
try {
runCopyJob(inputRoot, outputRoot, snapshotName, snapshotDir, verifyChecksum, filesUser,
filesGroup, filesMode, mappers, bandwidthMB);
LOG.info("Finalize the Snapshot Export");
if (!skipTmp) {
// Step 3 - Rename fs2:/.snapshot/.tmp/<snapshot> fs2:/.snapshot/<snapshot>
if (!outputFs.rename(snapshotTmpDir, outputSnapshotDir)) {
throw new ExportSnapshotException("Unable to rename snapshot directory from="
+ snapshotTmpDir + " to=" + outputSnapshotDir);
}
}
// Step 4 - Verify snapshot integrity
if (verifyTarget) {
LOG.info("Verify snapshot integrity");
verifySnapshot(destConf, outputFs, outputRoot, outputSnapshotDir);
}
LOG.info("Export Completed: " + targetName);
return 0;
} catch (Exception e) {
LOG.error("Snapshot export failed", e);
if (!skipTmp) {
outputFs.delete(snapshotTmpDir, true);
}
outputFs.delete(outputSnapshotDir, true);
return 1;
} finally {
IOUtils.closeStream(inputFs);
IOUtils.closeStream(outputFs);
}
} | 3.68 |
framework_NativeSelect_getVisibleItemCount | /**
* Gets the number of items that are visible. If only one item is visible,
* then the box will be displayed as a drop-down list.
*
* @since 8.1
* @return the visible item count
*/
public int getVisibleItemCount() {
return getState(false).visibleItemCount;
} | 3.68 |
hbase_ZKUtil_createSetData | /**
* Set data into node creating node if it doesn't yet exist. Does not set watch.
* @param zkw zk reference
* @param znode path of node
* @param data data to set for node
* @throws KeeperException if a ZooKeeper operation fails
*/
public static void createSetData(final ZKWatcher zkw, final String znode, final byte[] data)
throws KeeperException {
if (checkExists(zkw, znode) == -1) {
ZKUtil.createWithParents(zkw, znode, data);
} else {
ZKUtil.setData(zkw, znode, data);
}
} | 3.68 |
flink_WindowSavepointReader_evictor | /** Reads from a window that uses an evictor. */
public EvictingWindowSavepointReader<W> evictor() {
return new EvictingWindowSavepointReader<>(env, metadata, stateBackend, windowSerializer);
} | 3.68 |
hadoop_FifoCandidatesSelector_preemptFrom | /**
* Given a target preemption for a specific application, select containers
* to preempt (after unreserving all reservation for that app).
*/
private void preemptFrom(FiCaSchedulerApp app,
Resource clusterResource, Map<String, Resource> resToObtainByPartition,
List<RMContainer> skippedAMContainerlist, Resource skippedAMSize,
Map<ApplicationAttemptId, Set<RMContainer>> selectedContainers,
Map<ApplicationAttemptId, Set<RMContainer>> curCandidates,
Resource totalPreemptionAllowed) {
ApplicationAttemptId appId = app.getApplicationAttemptId();
// first drop reserved containers towards rsrcPreempt
List<RMContainer> reservedContainers =
new ArrayList<>(app.getReservedContainers());
for (RMContainer c : reservedContainers) {
if (CapacitySchedulerPreemptionUtils.isContainerAlreadySelected(c,
selectedContainers)) {
continue;
}
if (resToObtainByPartition.isEmpty()) {
return;
}
// Try to preempt this container
CapacitySchedulerPreemptionUtils
.tryPreemptContainerAndDeductResToObtain(rc, preemptionContext,
resToObtainByPartition, c, clusterResource, selectedContainers,
curCandidates, totalPreemptionAllowed,
preemptionContext.getCrossQueuePreemptionConservativeDRF());
if (!preemptionContext.isObserveOnly()) {
preemptionContext.getRMContext().getDispatcher().getEventHandler()
.handle(new ContainerPreemptEvent(appId, c,
SchedulerEventType.KILL_RESERVED_CONTAINER));
}
}
// if more resources are to be freed go through all live containers in
// reverse priority and reverse allocation order and mark them for
// preemption
List<RMContainer> liveContainers =
new ArrayList<>(app.getLiveContainers());
sortContainers(liveContainers);
for (RMContainer c : liveContainers) {
if (resToObtainByPartition.isEmpty()) {
return;
}
if (CapacitySchedulerPreemptionUtils.isContainerAlreadySelected(c,
selectedContainers)) {
continue;
}
// Skip already marked to killable containers
if (null != preemptionContext.getKillableContainers() && preemptionContext
.getKillableContainers().contains(c.getContainerId())) {
continue;
}
// Skip AM Container from preemption for now.
if (c.isAMContainer()) {
skippedAMContainerlist.add(c);
Resources.addTo(skippedAMSize, c.getAllocatedResource());
continue;
}
// Try to preempt this container
CapacitySchedulerPreemptionUtils
.tryPreemptContainerAndDeductResToObtain(rc, preemptionContext,
resToObtainByPartition, c, clusterResource, selectedContainers,
curCandidates, totalPreemptionAllowed,
preemptionContext.getCrossQueuePreemptionConservativeDRF());
}
} | 3.68 |
hbase_QuotaSettingsFactory_unthrottleNamespaceByThrottleType | /**
* Remove the throttling for the specified namespace by throttle type.
* @param namespace the namespace
* @param type the type of throttling
* @return the quota settings
*/
public static QuotaSettings unthrottleNamespaceByThrottleType(final String namespace,
final ThrottleType type) {
return throttle(null, null, namespace, null, type, 0, null, QuotaScope.MACHINE);
} | 3.68 |
hmily_StringUtils_isNoneBlank | /**
* Is none blank boolean.
*
* @param css the css
* @return the boolean
*/
public static boolean isNoneBlank(final CharSequence... css) {
return !isAnyBlank(css);
} | 3.68 |
hadoop_FsGetter_get | /**
* Gets file system instance of given uri.
*
* @param uri uri.
* @param conf configuration.
* @throws IOException raised on errors performing I/O.
* @return FileSystem.
*/
public FileSystem get(URI uri, Configuration conf) throws IOException {
return FileSystem.get(uri, conf);
} | 3.68 |
hbase_MemStoreLABImpl_getNewExternalChunk | /*
* Returning a new chunk, without replacing current chunk, meaning MSLABImpl does not make the
* returned chunk as CurChunk. The space on this chunk will be allocated externally. The interface
* is only for external callers. Chunks from pools are not allocated from here, since they have
* fixed sizes
*/
@Override
public Chunk getNewExternalChunk(int size) {
int allocSize = size + ChunkCreator.SIZEOF_CHUNK_HEADER;
if (allocSize <= ChunkCreator.getInstance().getChunkSize()) {
return getNewExternalChunk(ChunkCreator.ChunkType.DATA_CHUNK);
} else {
Chunk c = this.chunkCreator.getJumboChunk(size);
chunks.add(c.getId());
return c;
}
} | 3.68 |
framework_VScrollTable_getExpandRatio | /**
* Returns the expand ratio of the cell.
*
* @return The expand ratio
*/
public float getExpandRatio() {
return expandRatio;
} | 3.68 |
querydsl_SQLExpressions_stddev | /**
* returns the sample standard deviation of expr, a set of numbers.
*
* @param expr argument
* @return stddev(expr)
*/
public static <T extends Number> WindowOver<T> stddev(Expression<T> expr) {
return new WindowOver<T>(expr.getType(), SQLOps.STDDEV, expr);
} | 3.68 |
framework_VAbstractSplitPanel_convertToPixels | /**
* Converts given split position string (in pixels or percentage) to a
* floating point pixel value.
*
* @param pos
* @return
*/
private float convertToPixels(String pos) {
float posAsFloat;
if (pos.indexOf("%") > 0) {
posAsFloat = Math.round(
Float.parseFloat(pos.substring(0, pos.length() - 1)) / 100
* (orientation == Orientation.HORIZONTAL
? getOffsetWidth()
: getOffsetHeight()));
} else {
posAsFloat = Float.parseFloat(pos.substring(0, pos.length() - 2));
}
return posAsFloat;
} | 3.68 |
open-banking-gateway_AccountInformationRequestCommon_fintech_calls_list_accounts_for_max_musterman_with_expected_balances | // Note that max.musterman is typically used for EMBEDDED (real EMBEDDED that is returned by bank, and not EMBEDDED approach in table)
public SELF fintech_calls_list_accounts_for_max_musterman_with_expected_balances(Boolean withBalance) {
ExtractableResponse<Response> response = withAccountsHeaders(MAX_MUSTERMAN)
.header(SERVICE_SESSION_ID, UUID.randomUUID().toString())
.queryParam("withBalance", withBalance)
.when()
.get(AIS_ACCOUNTS_ENDPOINT)
.then()
.statusCode(ACCEPTED.value())
.extract();
updateServiceSessionId(response);
updateRedirectCode(response);
updateNextConsentAuthorizationUrl(response);
return self();
} | 3.68 |
flink_RoundRobinOperatorStateRepartitioner_repartitionSplitState | /** Repartition SPLIT_DISTRIBUTE state. */
private void repartitionSplitState(
Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>>
nameToDistributeState,
int newParallelism,
List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList) {
int startParallelOp = 0;
// Iterate all named states and repartition one named state at a time per iteration
for (Map.Entry<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>>
e : nameToDistributeState.entrySet()) {
List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>> current =
e.getValue();
// Determine actual number of partitions for this named state
int totalPartitions = 0;
for (Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo> offsets : current) {
totalPartitions += offsets.f1.getOffsets().length;
}
// Repartition the state across the parallel operator instances
int lstIdx = 0;
int offsetIdx = 0;
int baseFraction = totalPartitions / newParallelism;
int remainder = totalPartitions % newParallelism;
int newStartParallelOp = startParallelOp;
for (int i = 0; i < newParallelism; ++i) {
// Preparation: calculate the actual index considering wrap around
int parallelOpIdx = (i + startParallelOp) % newParallelism;
// Now calculate the number of partitions we will assign to the parallel instance in
// this round ...
int numberOfPartitionsToAssign = baseFraction;
// ... and distribute odd partitions while we still have some, one at a time
if (remainder > 0) {
++numberOfPartitionsToAssign;
--remainder;
} else if (remainder == 0) {
// We are out of odd partitions now and begin our next redistribution round with
// the current
// parallel operator to ensure fair load balance
newStartParallelOp = parallelOpIdx;
--remainder;
}
// Now start collection the partitions for the parallel instance into this list
while (numberOfPartitionsToAssign > 0) {
Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo> handleWithOffsets =
current.get(lstIdx);
long[] offsets = handleWithOffsets.f1.getOffsets();
int remaining = offsets.length - offsetIdx;
// Repartition offsets
long[] offs;
if (remaining > numberOfPartitionsToAssign) {
offs =
Arrays.copyOfRange(
offsets, offsetIdx, offsetIdx + numberOfPartitionsToAssign);
offsetIdx += numberOfPartitionsToAssign;
} else {
if (OPTIMIZE_MEMORY_USE) {
handleWithOffsets.f1 = null; // GC
}
offs = Arrays.copyOfRange(offsets, offsetIdx, offsets.length);
offsetIdx = 0;
++lstIdx;
}
numberOfPartitionsToAssign -= remaining;
// As a last step we merge partitions that use the same StreamStateHandle in a
// single
// OperatorStateHandle
Map<StreamStateHandle, OperatorStateHandle> mergeMap =
mergeMapList.get(parallelOpIdx);
OperatorStateHandle operatorStateHandle = mergeMap.get(handleWithOffsets.f0);
if (operatorStateHandle == null) {
operatorStateHandle =
new OperatorStreamStateHandle(
CollectionUtil.newHashMapWithExpectedSize(
nameToDistributeState.size()),
handleWithOffsets.f0);
mergeMap.put(handleWithOffsets.f0, operatorStateHandle);
}
operatorStateHandle
.getStateNameToPartitionOffsets()
.put(
e.getKey(),
new OperatorStateHandle.StateMetaInfo(
offs, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
}
}
startParallelOp = newStartParallelOp;
e.setValue(null);
}
} | 3.68 |
hbase_DynamicMetricsRegistry_newSizeHistogram | /**
* Create a new histogram with size range counts.
* @param name The name of the histogram
* @param desc The description of the data in the histogram.
* @return A new MutableSizeHistogram
*/
public MutableSizeHistogram newSizeHistogram(String name, String desc) {
MutableSizeHistogram histo = new MutableSizeHistogram(name, desc);
return addNewMetricIfAbsent(name, histo, MutableSizeHistogram.class);
} | 3.68 |
flink_Tuple4_setFields | /**
* Sets new values to all fields of the tuple.
*
* @param f0 The value for field 0
* @param f1 The value for field 1
* @param f2 The value for field 2
* @param f3 The value for field 3
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3 f3) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
} | 3.68 |
flink_Router_allAllowedMethods | /** Returns all methods that this router handles. For {@code OPTIONS *}. */
public Set<HttpMethod> allAllowedMethods() {
if (anyMethodRouter.size() > 0) {
Set<HttpMethod> ret = new HashSet<HttpMethod>(9);
ret.add(HttpMethod.CONNECT);
ret.add(HttpMethod.DELETE);
ret.add(HttpMethod.GET);
ret.add(HttpMethod.HEAD);
ret.add(HttpMethod.OPTIONS);
ret.add(HttpMethod.PATCH);
ret.add(HttpMethod.POST);
ret.add(HttpMethod.PUT);
ret.add(HttpMethod.TRACE);
return ret;
} else {
return new HashSet<HttpMethod>(routers.keySet());
}
} | 3.68 |
framework_UIDL_getPaintableVariable | /**
* Gets the Paintable with the id found in the named variable's value.
*
* @param name
* the name of the variable
* @return the Paintable referenced by the variable, if it exists
*/
public ServerConnector getPaintableVariable(String name,
ApplicationConnection connection) {
return ConnectorMap.get(connection)
.getConnector(getStringVariable(name));
} | 3.68 |
framework_DataCommunicator_setMaximumAllowedRows | /**
* Set the maximum allowed rows to be fetched in one query.
*
* @param maximumAllowedRows Maximum allowed rows for one query.
* @since
*/
public void setMaximumAllowedRows(int maximumAllowedRows) {
this.maximumAllowedRows = maximumAllowedRows;
} | 3.68 |
hbase_MutableRegionInfo_getTable | /**
* Get current table name of the region
*/
@Override
public TableName getTable() {
return this.tableName;
} | 3.68 |
hadoop_S3AReadOpContext_getFuturePool | /**
* Gets the {@code ExecutorServiceFuturePool} used for asynchronous prefetches.
*
* @return the {@code ExecutorServiceFuturePool} used for asynchronous prefetches.
*/
public ExecutorServiceFuturePool getFuturePool() {
return this.futurePool;
} | 3.68 |
flink_KeyedOperatorTransformation_transform | /**
* Method for passing user defined operators along with the type information that will transform
* the OperatorTransformation.
*
* <p><b>IMPORTANT:</b> Any output from this operator will be discarded.
*
* @param factory A factory returning transformation logic type of the return stream
* @return An {@link BootstrapTransformation} that can be added to a {@link Savepoint}.
*/
public BootstrapTransformation<T> transform(SavepointWriterOperatorFactory factory) {
return new BootstrapTransformation<>(
dataSet, operatorMaxParallelism, timestamper, factory, keySelector, keyType);
} | 3.68 |
hbase_CompactionConfiguration_getThrottlePoint | /** Returns ThrottlePoint used for classifying small and large compactions */
public long getThrottlePoint() {
return throttlePoint;
} | 3.68 |
hadoop_FullCredentialsTokenBinding_loadAWSCredentials | /**
* Load the AWS credentials.
* @throws IOException failure
*/
private void loadAWSCredentials() throws IOException {
credentialOrigin = AbstractS3ATokenIdentifier.createDefaultOriginMessage();
Configuration conf = getConfig();
URI uri = getCanonicalUri();
// look for access keys to FS
S3xLoginHelper.Login secrets = S3AUtils.getAWSAccessKeys(uri, conf);
if (secrets.hasLogin()) {
awsCredentials = new MarshalledCredentials(
secrets.getUser(), secrets.getPassword(), "");
credentialOrigin += "; source = Hadoop configuration data";
} else {
// if there are none, look for the environment variables.
awsCredentials = MarshalledCredentialBinding.fromEnvironment(
System.getenv());
if (awsCredentials.isValid(
MarshalledCredentials.CredentialTypeRequired.AnyNonEmpty)) {
// valid tokens, so mark as origin
credentialOrigin += "; source = Environment variables";
} else {
credentialOrigin = "no credentials in configuration or"
+ " environment variables";
}
}
awsCredentials.validate(credentialOrigin +": ",
MarshalledCredentials.CredentialTypeRequired.AnyNonEmpty);
} | 3.68 |
hbase_MasterObserver_postModifyTable | /**
* Called after the modifyTable operation has been requested. Called as part of modify table RPC
* call.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
* @param oldDescriptor descriptor of table before modify operation happened
* @param currentDescriptor current TableDescriptor of the table
*/
default void postModifyTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, TableDescriptor oldDescriptor, TableDescriptor currentDescriptor)
throws IOException {
} | 3.68 |
druid_DruidDataSource_discardConnection | /**
* 抛弃连接,不进行回收,而是抛弃
*
* @param conn
* @deprecated
*/
public void discardConnection(Connection conn) {
if (conn == null) {
return;
}
try {
if (!conn.isClosed()) {
conn.close();
}
} catch (SQLRecoverableException ignored) {
discardErrorCountUpdater.incrementAndGet(this);
// ignored
} catch (Throwable e) {
discardErrorCountUpdater.incrementAndGet(this);
if (LOG.isDebugEnabled()) {
LOG.debug("discard to close connection error", e);
}
}
lock.lock();
try {
activeCount--;
discardCount++;
if (activeCount <= minIdle) {
emptySignal();
}
} finally {
lock.unlock();
}
} | 3.68 |
hbase_VersionModel_setServerVersion | /**
* @param version the servlet container version string
*/
public void setServerVersion(String version) {
this.serverVersion = version;
} | 3.68 |
hbase_ExtendedCell_getChunkId | /**
* Extracts the id of the backing bytebuffer of this cell if it was obtained from fixed sized
* chunks as in case of MemstoreLAB
* @return the chunk id if the cell is backed by fixed sized Chunks, else return
* {@link #CELL_NOT_BASED_ON_CHUNK}; i.e. -1.
*/
default int getChunkId() {
return CELL_NOT_BASED_ON_CHUNK;
} | 3.68 |
framework_VAccordion_getCaptionWidth | /**
* Returns caption width including padding.
*
* @return the width of the caption (in pixels), or zero if there is no
* caption element (not possible via the default implementation)
*/
public int getCaptionWidth() {
if (caption == null) {
return 0;
}
int captionWidth = caption.getRequiredWidth();
int padding = WidgetUtil.measureHorizontalPaddingAndBorder(
caption.getElement(), 18);
return captionWidth + padding;
} | 3.68 |
hbase_NewVersionBehaviorTracker_add | // DeleteTracker
@Override
public void add(Cell cell) {
prepare(cell);
byte type = cell.getTypeByte();
switch (Type.codeToType(type)) {
// By the order of seen. We put null cq at first.
case DeleteFamily: // Delete all versions of all columns of the specified family
delFamMap.put(cell.getSequenceId(),
new DeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId()));
break;
case DeleteFamilyVersion: // Delete all columns of the specified family and specified version
delFamMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell);
break;
// These two kinds of markers are mix with Puts.
case DeleteColumn: // Delete all versions of the specified column
delColMap.put(cell.getSequenceId(),
new DeleteVersionsNode(cell.getTimestamp(), cell.getSequenceId()));
break;
case Delete: // Delete the specified version of the specified column.
delColMap.ceilingEntry(cell.getSequenceId()).getValue().addVersionDelete(cell);
break;
default:
throw new AssertionError("Unknown delete marker type for " + cell);
}
} | 3.68 |
hibernate-validator_TypeHelper_getValidatorTypes | /**
* @param annotationType The annotation type.
* @param validators List of constraint validator classes (for a given constraint).
* @param <A> the type of the annotation
*
* @return Return a Map<Class, Class<? extends ConstraintValidator>> where the map
* key is the type the validator accepts and value the validator class itself.
*/
public static <A extends Annotation> Map<Type, ConstraintValidatorDescriptor<A>> getValidatorTypes(
Class<A> annotationType,
List<ConstraintValidatorDescriptor<A>> validators) {
Map<Type, ConstraintValidatorDescriptor<A>> validatorsTypes = newHashMap();
for ( ConstraintValidatorDescriptor<A> validator : validators ) {
Type type = validator.getValidatedType();
ConstraintValidatorDescriptor<A> previous = validatorsTypes.put( type, validator );
if ( previous != null ) {
throw LOG.getMultipleValidatorsForSameTypeException( annotationType, type, previous.getValidatorClass(), validator.getValidatorClass() );
}
}
return validatorsTypes;
} | 3.68 |
framework_VTabsheet_scheduleDeferred | /**
* Schedule the command for a deferred execution.
*
* @since 7.4
*/
public void scheduleDeferred() {
Scheduler.get().scheduleDeferred(this);
} | 3.68 |
flink_HiveParserTypeCheckProcFactory_getNullExprProcessor | /** Factory method to get NullExprProcessor. */
public HiveParserTypeCheckProcFactory.NullExprProcessor getNullExprProcessor() {
return new HiveParserTypeCheckProcFactory.NullExprProcessor();
} | 3.68 |
flink_FlinkRelBuilder_windowAggregate | /** Build window aggregate for either aggregate or table aggregate. */
public RelBuilder windowAggregate(
LogicalWindow window,
GroupKey groupKey,
List<NamedWindowProperty> namedProperties,
Iterable<AggCall> aggCalls) {
// build logical aggregate
// Because of:
// [CALCITE-3763] RelBuilder.aggregate should prune unused fields from the input,
// if the input is a Project.
//
// the field can not be pruned if it is referenced by other expressions
// of the window aggregation(i.e. the TUMBLE_START/END).
// To solve this, we config the RelBuilder to forbidden this feature.
final LogicalAggregate aggregate =
(LogicalAggregate)
super.transform(t -> t.withPruneInputOfAggregate(false))
.push(build())
.aggregate(groupKey, aggCalls)
.build();
// build logical window aggregate from it
final RelNode windowAggregate;
if (isTableAggregate(aggregate.getAggCallList())) {
windowAggregate =
LogicalWindowTableAggregate.create(window, namedProperties, aggregate);
} else {
windowAggregate = LogicalWindowAggregate.create(window, namedProperties, aggregate);
}
return push(windowAggregate);
} | 3.68 |
hadoop_AbstractReservationSystem_getDefaultReservationSystem | /**
* Get the default reservation system corresponding to the scheduler
*
* @param scheduler the scheduler for which the reservation system is required
*
* @return the {@link ReservationSystem} based on the configured scheduler
*/
public static String getDefaultReservationSystem(
ResourceScheduler scheduler) {
if (scheduler instanceof CapacityScheduler) {
return CapacityReservationSystem.class.getName();
} else if (scheduler instanceof FairScheduler) {
return FairReservationSystem.class.getName();
}
return null;
} | 3.68 |
hadoop_TypedBytesWritable_setValue | /** Set the typed bytes from a given Java object. */
public void setValue(Object obj) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
TypedBytesOutput tbo = TypedBytesOutput.get(new DataOutputStream(baos));
tbo.write(obj);
byte[] bytes = baos.toByteArray();
set(bytes, 0, bytes.length);
} catch (IOException e) {
throw new RuntimeException(e);
}
} | 3.68 |
hbase_VersionInfoUtil_versionNumberToString | /**
* Returns the passed-in <code>version</code> int as a version String (e.g. 0x0103004 is 1.3.4)
*/
public static String versionNumberToString(final int version) {
return String.format("%d.%d.%d", ((version >> 20) & 0xff), ((version >> 12) & 0xff),
(version & 0xfff));
} | 3.68 |
hadoop_Paths_path | /**
* Varags constructor of paths. Not very efficient.
* @param parent parent path
* @param child child entries. "" elements are skipped.
* @return the full child path.
*/
public static Path path(Path parent, String... child) {
Path p = parent;
for (String c : child) {
if (!c.isEmpty()) {
p = new Path(p, c);
}
}
return p;
} | 3.68 |
hadoop_BlockStorageMovementAttemptedItems_add | /**
* Add item to block storage movement attempted items map which holds the
* tracking/blockCollection id versus time stamp.
*
* @param startPathId
* - start satisfier path identifier
* @param fileId
* - file identifier
* @param monotonicNow
* - time now
* @param assignedBlocks
* - assigned blocks for block movement
* @param retryCount
* - retry count
*/
public void add(long startPathId, long fileId, long monotonicNow,
Map<Block, Set<StorageTypeNodePair>> assignedBlocks, int retryCount) {
AttemptedItemInfo itemInfo = new AttemptedItemInfo(startPathId, fileId,
monotonicNow, assignedBlocks.keySet(), retryCount);
synchronized (storageMovementAttemptedItems) {
storageMovementAttemptedItems.add(itemInfo);
}
synchronized (scheduledBlkLocs) {
scheduledBlkLocs.putAll(assignedBlocks);
}
} | 3.68 |
graphhopper_GraphHopper_setSortGraph | /**
* Sorts the graph which requires more RAM while import. See #12
*/
public GraphHopper setSortGraph(boolean sortGraph) {
ensureNotLoaded();
this.sortGraph = sortGraph;
return this;
} | 3.68 |
MagicPlugin_MageDataStore_save | /**
* @deprecated Replaced by
* {@link #save(MageData, MageDataCallback, boolean)}.
*/
@Deprecated
default void save(MageData mage, MageDataCallback callback) {
save(mage, callback, false);
} | 3.68 |
hbase_Result_getValue | /**
* Get the latest version of the specified column. Note: this call clones the value content of the
* hosting Cell. See {@link #getValueAsByteBuffer(byte[], byte[])}, etc., or {@link #listCells()}
* if you would avoid the cloning.
* @param family family name
* @param qualifier column qualifier
* @return value of latest version of column, null if none found
*/
public byte[] getValue(byte[] family, byte[] qualifier) {
Cell kv = getColumnLatestCell(family, qualifier);
if (kv == null) {
return null;
}
return CellUtil.cloneValue(kv);
} | 3.68 |
framework_AbstractComponentConnector_createWidget | /**
* Creates and returns the widget for this VPaintableWidget. This method
* should only be called once when initializing the paintable.
* <p>
* You should typically not override this method since the framework by
* default generates an implementation that uses {@link GWT#create(Class)}
* to create a widget of the same type as returned by the most specific
* override of {@link #getWidget()}. If you do override the method, you
* can't call <code>super.createWidget()</code> since the metadata needed
* for that implementation is not generated if there's an override of the
* method.
*
* @return a new widget instance to use for this component connector
*/
protected Widget createWidget() {
Type type = TypeData.getType(getClass());
try {
Type widgetType = type.getMethod("getWidget").getReturnType();
Object instance = widgetType.createInstance();
return (Widget) instance;
} catch (NoDataException e) {
throw new IllegalStateException(
"Default implementation of createWidget() does not work for "
+ getClass().getSimpleName()
+ ". This might be caused by explicitely using "
+ "super.createWidget() or some unspecified "
+ "problem with the widgetset compilation.",
e);
}
} | 3.68 |
hadoop_TimelinePutResponse_addError | /**
* Add a single {@link TimelinePutError} instance into the existing list
*
* @param error
* a single {@link TimelinePutError} instance
*/
public void addError(TimelinePutError error) {
errors.add(error);
} | 3.68 |
hadoop_ConnectionContext_isUsable | /**
* Check if the connection can be used. It checks if the connection is used by
* another thread or already closed.
*
* @return True if the connection can be used.
*/
public synchronized boolean isUsable() {
return hasAvailableConcurrency() && !isClosed();
} | 3.68 |
graphhopper_GHSortedCollection_pollKey | /**
* @return removes the smallest entry (key and value) from this collection
*/
public int pollKey() {
size--;
if (size < 0) {
throw new IllegalStateException("collection is already empty!?");
}
Entry<Integer, GHIntHashSet> e = map.firstEntry();
GHIntHashSet set = e.getValue();
if (set.isEmpty()) {
throw new IllegalStateException("internal set is already empty!?");
}
Iterator<IntCursor> iter = set.iterator();
final int val = iter.next().value;
set.remove(val);
if (set.isEmpty()) {
map.remove(e.getKey());
}
return val;
} | 3.68 |
hadoop_FrameworkCounterGroup_write | /**
* FrameworkGroup ::= #counter (key value)*
*/
@Override
@SuppressWarnings("unchecked")
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, size());
for (int i = 0; i < counters.length; ++i) {
Counter counter = (C) counters[i];
if (counter != null) {
WritableUtils.writeVInt(out, i);
WritableUtils.writeVLong(out, counter.getValue());
}
}
} | 3.68 |
hadoop_TimelineEntity_getPrimaryFiltersJAXB | // Required by JAXB
@Private
@XmlElement(name = "primaryfilters")
public HashMap<String, Set<Object>> getPrimaryFiltersJAXB() {
return primaryFilters;
} | 3.68 |
flink_FlinkSemiAntiJoinJoinTransposeRule_setJoinAdjustments | /**
* Sets an array to reflect how much each index corresponding to a field needs to be adjusted.
* The array corresponds to fields in a 3-way join between (X, Y, and Z). X remains unchanged,
* but Y and Z need to be adjusted by some fixed amount as determined by the input.
*
* @param adjustments array to be filled out
* @param nFieldsX number of fields in X
* @param nFieldsY number of fields in Y
* @param nFieldsZ number of fields in Z
* @param adjustY the amount to adjust Y by
* @param adjustZ the amount to adjust Z by
*/
private void setJoinAdjustments(
int[] adjustments, int nFieldsX, int nFieldsY, int nFieldsZ, int adjustY, int adjustZ) {
for (int i = 0; i < nFieldsX; i++) {
adjustments[i] = 0;
}
for (int i = nFieldsX; i < (nFieldsX + nFieldsY); i++) {
adjustments[i] = adjustY;
}
for (int i = nFieldsX + nFieldsY; i < (nFieldsX + nFieldsY + nFieldsZ); i++) {
adjustments[i] = adjustZ;
}
} | 3.68 |
hbase_MetaFixer_getRegionInfoWithLargestEndKey | /**
* @return Either <code>a</code> or <code>b</code>, whichever has the endkey that is furthest
* along in the Table.
*/
static RegionInfo getRegionInfoWithLargestEndKey(RegionInfo a, RegionInfo b) {
if (a == null) {
// b may be null.
return b;
}
if (b == null) {
// Both are null. The return is not-defined.
return a;
}
if (!a.getTable().equals(b.getTable())) {
// This is an odd one. This should be the right answer.
return b;
}
if (a.isLast()) {
return a;
}
if (b.isLast()) {
return b;
}
int compare = Bytes.compareTo(a.getEndKey(), b.getEndKey());
return compare == 0 || compare > 0 ? a : b;
} | 3.68 |
hbase_BucketCache_startWriterThreads | /**
* Called by the constructor to start the writer threads. Used by tests that need to override
* starting the threads.
*/
protected void startWriterThreads() {
for (WriterThread thread : writerThreads) {
thread.start();
}
} | 3.68 |
framework_EventHelper_updateHandler | /**
* Updates handler registered using {@code handlerProvider}: removes it if
* connector doesn't have anymore {@code eventIdentifier} using provided
* {@code handlerRegistration} and adds it via provided
* {@code handlerProvider} if connector has event listener with
* {@code eventIdentifier}.
*
* @param connector
* connector to check event listener presence
* @param eventIdentifier
* event identifier whose presence in the connector is checked
* @param handlerRegistration
* resulting handler registration to remove added handler in case
* of absence event listener
* @param handlerProvider
* the strategy to register handler
* @return handlerRegistration which should be used to remove registered
* handler via {@code handlerProvider}
*/
public static <H extends EventHandler, W extends Widget> HandlerRegistration updateHandler(
ComponentConnector connector, String eventIdentifier,
HandlerRegistration handlerRegistration,
Supplier<HandlerRegistration> handlerProvider) {
if (connector.hasEventListener(eventIdentifier)) {
if (handlerRegistration == null) {
handlerRegistration = handlerProvider.get();
}
} else if (handlerRegistration != null) {
handlerRegistration.removeHandler();
handlerRegistration = null;
}
return handlerRegistration;
} | 3.68 |
hbase_BloomFilterChunk_add | // Used only by tests
void add(byte[] buf, int offset, int len) {
/*
* For faster hashing, use combinatorial generation
* http://www.eecs.harvard.edu/~kirsch/pubs/bbbf/esa06.pdf
*/
HashKey<byte[]> hashKey = new ByteArrayHashKey(buf, offset, len);
int hash1 = this.hash.hash(hashKey, 0);
int hash2 = this.hash.hash(hashKey, hash1);
setHashLoc(hash1, hash2);
} | 3.68 |
hadoop_FileIoProvider_createFile | /**
* Create a file.
* @param volume target volume. null if unavailable.
* @param f File to be created.
* @return true if the file does not exist and was successfully created.
* false if the file already exists.
* @throws IOException
*/
public boolean createFile(
@Nullable FsVolumeSpi volume, File f) throws IOException {
final long begin = profilingEventHook.beforeMetadataOp(volume, OPEN);
try {
faultInjectorEventHook.beforeMetadataOp(volume, OPEN);
boolean created = f.createNewFile();
profilingEventHook.afterMetadataOp(volume, OPEN, begin);
return created;
} catch (Exception e) {
onFailure(volume, begin);
throw e;
}
} | 3.68 |
flink_ZooKeeperStateHandleStore_getAllAndLock | /**
* Gets all available state handles from ZooKeeper and locks the respective state nodes.
*
* <p>If there is a concurrent modification, the operation is retried until it succeeds.
*
* @return All state handles from ZooKeeper.
* @throws Exception If a ZooKeeper or state handle operation fails
*/
@Override
public List<Tuple2<RetrievableStateHandle<T>, String>> getAllAndLock() throws Exception {
return getAllAndLock(parentNodePath -> client.getChildren().forPath(parentNodePath));
} | 3.68 |
framework_RadioButtonGroupConnector_onDataChange | /**
* A data change handler registered to the data source. Updates the data
* items and selection status when the data source notifies of new changes
* from the server side.
*
* @param range
* the new range of data items
*/
private void onDataChange(Range range) {
assert range.getStart() == 0 && range.getEnd() == getDataSource()
.size() : "RadioButtonGroup only supports full updates, but "
+ "got range " + range;
final VRadioButtonGroup select = getWidget();
DataSource<JsonObject> dataSource = getDataSource();
int size = dataSource.size();
List<JsonObject> options = new ArrayList<>();
for (int i = 0; i < size; i++) {
options.add(dataSource.getRow(i));
}
select.buildOptions(options);
getLayoutManager().setNeedsMeasure(this);
updateSelectedItem();
} | 3.68 |
flink_DataTypeTemplate_fromDefaults | /** Creates an instance with no parameter content. */
static DataTypeTemplate fromDefaults() {
return new DataTypeTemplate(
null, null, null, null, null, null, null, null, null, null, null);
} | 3.68 |
framework_Validator_getCauses | /**
* Returns the {@code InvalidValueExceptions} that caused this
* exception.
*
* @return An array containing the {@code InvalidValueExceptions} that
* caused this exception. Returns an empty array if this
* exception was not caused by other exceptions.
*/
public InvalidValueException[] getCauses() {
return causes;
} | 3.68 |
hbase_CellComparatorImpl_compareFamilies | /**
* This method will be overridden when we compare cells inner store to bypass family comparing.
*/
protected int compareFamilies(KeyValue left, int leftFamilyPosition, int leftFamilyLength,
ByteBufferKeyValue right, int rightFamilyPosition, int rightFamilyLength) {
return ByteBufferUtils.compareTo(left.getFamilyArray(), leftFamilyPosition, leftFamilyLength,
right.getFamilyByteBuffer(), rightFamilyPosition, rightFamilyLength);
} | 3.68 |
flink_CatalogTableStatistics_getRowCount | /** The number of rows. */
public long getRowCount() {
return this.rowCount;
} | 3.68 |
hadoop_MapHost_markAvailable | /**
* Called when the node is done with its penalty or done copying.
* @return the host's new state
*/
public synchronized State markAvailable() {
if (maps.isEmpty()) {
state = State.IDLE;
} else {
state = State.PENDING;
}
return state;
} | 3.68 |
hadoop_ActiveAuditManagerS3A_modifyHttpRequest | /**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context,
ExecutionAttributes executionAttributes) {
return span.modifyHttpRequest(context, executionAttributes);
} | 3.68 |
hadoop_DelegatingSSLSocketFactory_getDefaultFactory | /**
* Singleton instance of the SSLSocketFactory.
*
* SSLSocketFactory must be initialized with appropriate SSLChannelMode
* using initializeDefaultFactory method.
*
* @return instance of the SSLSocketFactory, instance must be initialized by
* initializeDefaultFactory.
*/
public static DelegatingSSLSocketFactory getDefaultFactory() {
return instance;
} | 3.68 |
framework_HeartbeatHandler_handleSessionExpired | /*
* (non-Javadoc)
*
* @see
* com.vaadin.server.SessionExpiredHandler#handleSessionExpired(com.vaadin
* .server.VaadinRequest, com.vaadin.server.VaadinResponse)
*/
@Override
public boolean handleSessionExpired(VaadinRequest request,
VaadinResponse response) throws IOException {
if (!ServletPortletHelper.isHeartbeatRequest(request)) {
return false;
}
// Ensure that the browser does not cache expired heartbeat responses.
// iOS 6 Safari requires this (#3226)
response.setHeader("Cache-Control", "no-cache");
// If Content-Type is not set, browsers assume text/html and may
// complain about the empty response body (#4167)
response.setHeader("Content-Type", "text/plain");
response.sendError(HttpServletResponse.SC_FORBIDDEN, "Session expired");
return true;
} | 3.68 |
framework_VPopupView_syncChildren | /**
* Try to sync all known active child widgets to server.
*/
public void syncChildren() {
// Notify children with focus
if ((popupComponentWidget instanceof Focusable)) {
((Focusable) popupComponentWidget).setFocus(false);
}
// Notify children that have used the keyboard
for (Element e : activeChildren) {
try {
nativeBlur(e);
} catch (Exception ignored) {
}
}
activeChildren.clear();
} | 3.68 |
framework_RangeValidator_of | /**
* Returns a {@code RangeValidator} comparing values of a {@code Comparable}
* type using their <i>natural order</i>. Passing null to either
* {@code minValue} or {@code maxValue} means there is no limit in that
* direction. Both limits may be null; this can be useful if the limits are
* resolved programmatically.
* <p>
* Null is considered to be less than any non-null value. This means null
* never passes validation if a minimum value is specified.
*
* @param <C>
* the {@code Comparable} value type
* @param errorMessage
* the error message to return if validation fails, not null
* @param minValue
* the least value of the accepted range or null for no limit
* @param maxValue
* the greatest value of the accepted range or null for no limit
* @return the new validator
*/
public static <C extends Comparable<? super C>> RangeValidator<C> of(
String errorMessage, C minValue, C maxValue) {
return new RangeValidator<>(errorMessage,
Comparator.nullsFirst(Comparator.naturalOrder()), minValue,
maxValue);
} | 3.68 |
framework_BootstrapResponse_getRequest | /**
* Gets the request for which the generated bootstrap HTML will be the
* response.
*
* This can be used to read request headers and other additional
* information. Please note that {@link VaadinSession#getBrowser()} will not
* be available because the bootstrap page is generated before the bootstrap
* javascript has had a chance to send any information back to the server.
*
* @return the Vaadin request that is being handled
*/
public VaadinRequest getRequest() {
return request;
} | 3.68 |
flink_SpillChannelManager_unregisterOpenChannelToBeRemovedAtShutdown | /**
* Removes a channel reader/writer from the list of channels that are to be removed at shutdown.
*
* @param channel The channel reader/writer.
*/
synchronized void unregisterOpenChannelToBeRemovedAtShutdown(FileIOChannel channel) {
openChannels.remove(channel);
} | 3.68 |
framework_VTree_onKeyDown | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.KeyDownHandler#onKeyDown(com.google.gwt
* .event.dom.client.KeyDownEvent)
*/
@Override
public void onKeyDown(KeyDownEvent event) {
if (handleKeyNavigation(event.getNativeEvent().getKeyCode(),
event.isControlKeyDown() || event.isMetaKeyDown(),
event.isShiftKeyDown())) {
event.preventDefault();
event.stopPropagation();
}
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithLiterals | /**
* Tests that selects which include field literals.
*/
@Test
public void testSelectWithLiterals() {
SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD),
new FieldReference(INT_FIELD),
new FieldReference(DATE_FIELD).as("aliasDate"),
new FieldLiteral("SOME_STRING"),
new FieldLiteral(1.23d),
new FieldLiteral(1),
new FieldLiteral('c'),
new FieldLiteral("ANOTHER_STRING").as("aliasedString"))
.from(new TableReference(TEST_TABLE));
String value1 = varCharCast("'SOME_STRING'");
String value2 = varCharCast("'c'");
String value3 = varCharCast("'ANOTHER_STRING'");
String expectedSql = "SELECT stringField, intField, dateField AS aliasDate, " + stringLiteralPrefix() + value1 + ", " + expectedDecimalRepresentationOfLiteral("1.23") + ", 1, " + stringLiteralPrefix() + value2 + ", " + stringLiteralPrefix() + value3 + " AS aliasedString FROM " + tableName(TEST_TABLE);
assertEquals("Select with literal values for some fields", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
framework_SetPageFirstItemLoadsNeededRowsOnly_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Only cached rows and rows in viewport should be rendered after "
+ "calling table.setCurrentPageFirstItemIndex(n) - as opposed to all rows "
+ "between the previous position and new position";
} | 3.68 |
flink_ListView_add | /**
* Adds the given value to the list.
*
* @throws Exception Thrown if the system cannot add data.
* @param value The element to be appended to this list view.
*/
public void add(T value) throws Exception {
list.add(value);
} | 3.68 |
hbase_StoreFileInfo_isValid | /**
* Return if the specified file is a valid store file or not.
* @param fileStatus The {@link FileStatus} of the file
* @return <tt>true</tt> if the file is valid
*/
public static boolean isValid(final FileStatus fileStatus) throws IOException {
final Path p = fileStatus.getPath();
if (fileStatus.isDirectory()) {
return false;
}
// Check for empty hfile. Should never be the case but can happen
// after data loss in hdfs for whatever reason (upgrade, etc.): HBASE-646
// NOTE: that the HFileLink is just a name, so it's an empty file.
if (!HFileLink.isHFileLink(p) && fileStatus.getLen() <= 0) {
LOG.warn("Skipping {} because it is empty. HBASE-646 DATA LOSS?", p);
return false;
}
return validateStoreFileName(p.getName());
} | 3.68 |
framework_CalendarDateRange_getEnd | /**
* Get the end date of the date range.
*
* @return the end Date of the range
*/
public Date getEnd() {
return end;
} | 3.68 |
flink_WindowsGrouping_buildTriggerWindowElementsIterator | /** @return the iterator of the next triggerable window's elements. */
public RowIterator<BinaryRowData> buildTriggerWindowElementsIterator() {
currentWindow = nextWindow;
// It is illegal to call this method after [[hasTriggerWindow()]] has returned `false`.
Preconditions.checkState(
watermark == Long.MIN_VALUE || nextWindow != null,
"next trigger window cannot be null.");
if (nextWindow.getEnd() > watermark) {
throw new IllegalStateException("invalid window triggered " + currentWindow);
}
// advance in the stride of slideSize for hasTriggerWindow
nextWindow =
TimeWindow.of(
currentWindow.getStart() + slideSize,
currentWindow.getStart() + slideSize + windowSize);
// build trigger window elements' iterator
emptyWindowTriggered = true;
onBufferEvict(triggerWindowStartIndex);
return new WindowsElementsIterator(newBufferIterator(triggerWindowStartIndex));
} | 3.68 |
flink_AbstractUdfOperator_emptyClassArray | /**
* Generic utility function that returns an empty class array.
*
* @param <U> The type of the classes.
* @return An empty array of type <tt>Class<U></tt>.
*/
protected static <U> Class<U>[] emptyClassArray() {
@SuppressWarnings("unchecked")
Class<U>[] array = new Class[0];
return array;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.