name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_PrivateCellUtil_createLastOnRow | /**
* Create a Cell that is larger than all other possible Cells for the given Cell's row.
* @return Last possible Cell on passed Cell's row.
*/
public static Cell createLastOnRow(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
return new LastOnRowByteBufferExtendedCell(((ByteBufferExtendedCell) cell).getRowByteBuffer(),
((ByteBufferExtendedCell) cell).getRowPosition(), cell.getRowLength());
}
return new LastOnRowCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
} | 3.68 |
hudi_HoodieTableConfig_isMetadataTableAvailable | /**
* @returns true if metadata table has been created and is being used for this dataset, else returns false.
*/
public boolean isMetadataTableAvailable() {
return isMetadataPartitionAvailable(MetadataPartitionType.FILES);
} | 3.68 |
hadoop_RBFMetrics_getNamespaceInfo | /**
* Build a set of unique values found in all namespaces.
*
* @param f Method reference of the appropriate FederationNamespaceInfo
* getter function
* @return Set of unique string values found in all discovered namespaces.
* @throws IOException if the query could not be executed.
*/
private Collection<String> getNamespaceInfo(
Function<FederationNamespaceInfo, String> f) throws IOException {
if (membershipStore == null) {
return new HashSet<>();
}
GetNamespaceInfoRequest request = GetNamespaceInfoRequest.newInstance();
GetNamespaceInfoResponse response =
membershipStore.getNamespaceInfo(request);
return response.getNamespaceInfo().stream()
.map(f)
.collect(Collectors.toSet());
} | 3.68 |
hadoop_RouterFedBalance_getDefaultConf | /**
* Loads properties from hdfs-fedbalance-default.xml into configuration
* object.
*
* @return Configuration which includes properties from
* hdfs-fedbalance-default.xml and hdfs-fedbalance-site.xml
*/
@VisibleForTesting
static Configuration getDefaultConf() {
Configuration config = new HdfsConfiguration();
config.addResource(FED_BALANCE_DEFAULT_XML);
config.addResource(FED_BALANCE_SITE_XML);
return config;
} | 3.68 |
hadoop_CachedDNSToSwitchMapping_getSwitchMap | /**
* Get the (host x switch) map.
* @return a copy of the cached map of hosts to rack
*/
@Override
public Map<String, String> getSwitchMap() {
return new HashMap<>(cache);
} | 3.68 |
dubbo_AccessLogData_setOutTime | /**
* Set the out date. As an argument it accept date string.
*
* @param outTime
*/
public void setOutTime(Date outTime) {
set(OUT_TIME, outTime);
} | 3.68 |
hadoop_JWTRedirectAuthenticationHandler_init | /**
* Initializes the authentication handler instance.
* <p>
* This method is invoked by the {@link AuthenticationFilter#init} method.
* </p>
* @param config
* configuration properties to initialize the handler.
*
* @throws ServletException
* thrown if the handler could not be initialized.
*/
@Override
public void init(Properties config) throws ServletException {
super.init(config);
// setup the URL to redirect to for authentication
authenticationProviderUrl = config
.getProperty(AUTHENTICATION_PROVIDER_URL);
if (authenticationProviderUrl == null) {
throw new ServletException(
"Authentication provider URL must not be null - configure: "
+ AUTHENTICATION_PROVIDER_URL);
}
// setup the public key of the token issuer for verification
if (publicKey == null) {
String pemPublicKey = config.getProperty(PUBLIC_KEY_PEM);
if (pemPublicKey == null) {
throw new ServletException(
"Public key for signature validation must be provisioned.");
}
publicKey = CertificateUtil.parseRSAPublicKey(pemPublicKey);
}
// setup the list of valid audiences for token validation
String auds = config.getProperty(EXPECTED_JWT_AUDIENCES);
if (auds != null) {
// parse into the list
String[] audArray = auds.split(",");
audiences = new ArrayList<String>();
for (String a : audArray) {
audiences.add(a);
}
}
// setup custom cookie name if configured
String customCookieName = config.getProperty(JWT_COOKIE_NAME);
if (customCookieName != null) {
cookieName = customCookieName;
}
} | 3.68 |
graphhopper_PointList_shallowCopy | /**
* Create a shallow copy of this Pointlist from from to end, excluding end.
*
* @param makeImmutable makes this PointList immutable. If you don't ensure the consistency it might happen that due to changes of this
* object, the shallow copy might contain incorrect or corrupt data.
*/
public PointList shallowCopy(final int from, final int end, boolean makeImmutable) {
if (makeImmutable)
this.makeImmutable();
return new ShallowImmutablePointList(from, end, this);
} | 3.68 |
hbase_TableMapReduceUtil_findOrCreateJar | /**
* Finds the Jar for a class or creates it if it doesn't exist. If the class is in a directory in
* the classpath, it creates a Jar on the fly with the contents of the directory and returns the
* path to that Jar. If a Jar is created, it is created in the system temporary directory.
* Otherwise, returns an existing jar that contains a class of the same name. Maintains a mapping
* from jar contents to the tmp jar created.
* @param my_class the class to find.
* @param fs the FileSystem with which to qualify the returned path.
* @param packagedClasses a map of class name to path.
* @return a jar file that contains the class.
*/
private static Path findOrCreateJar(Class<?> my_class, FileSystem fs,
Map<String, String> packagedClasses) throws IOException {
// attempt to locate an existing jar for the class.
String jar = findContainingJar(my_class, packagedClasses);
if (null == jar || jar.isEmpty()) {
jar = getJar(my_class);
updateMap(jar, packagedClasses);
}
if (null == jar || jar.isEmpty()) {
return null;
}
LOG.debug(String.format("For class %s, using jar %s", my_class.getName(), jar));
return new Path(jar).makeQualified(fs.getUri(), fs.getWorkingDirectory());
} | 3.68 |
hudi_BaseRollbackPlanActionExecutor_requestRollback | /**
* Creates a Rollback plan if there are files to be rolled back and stores them in instant file.
* Rollback Plan contains absolute file paths.
*
* @param startRollbackTime Rollback Instant Time
* @return Rollback Plan if generated
*/
protected Option<HoodieRollbackPlan> requestRollback(String startRollbackTime) {
final HoodieInstant rollbackInstant = new HoodieInstant(HoodieInstant.State.REQUESTED, HoodieTimeline.ROLLBACK_ACTION, startRollbackTime);
try {
List<HoodieRollbackRequest> rollbackRequests = new ArrayList<>();
if (!instantToRollback.isRequested()) {
rollbackRequests.addAll(getRollbackStrategy().getRollbackRequests(instantToRollback));
}
HoodieRollbackPlan rollbackPlan = new HoodieRollbackPlan(new HoodieInstantInfo(instantToRollback.getTimestamp(),
instantToRollback.getAction()), rollbackRequests, LATEST_ROLLBACK_PLAN_VERSION);
if (!skipTimelinePublish) {
if (table.getRollbackTimeline().filterInflightsAndRequested().containsInstant(rollbackInstant.getTimestamp())) {
LOG.warn("Request Rollback found with instant time " + rollbackInstant + ", hence skipping scheduling rollback");
} else {
table.getActiveTimeline().saveToRollbackRequested(rollbackInstant, TimelineMetadataUtils.serializeRollbackPlan(rollbackPlan));
table.getMetaClient().reloadActiveTimeline();
LOG.info("Requesting Rollback with instant time " + rollbackInstant);
}
}
return Option.of(rollbackPlan);
} catch (IOException e) {
LOG.error("Got exception when saving rollback requested file", e);
throw new HoodieIOException(e.getMessage(), e);
}
} | 3.68 |
framework_DropTargetExtensionConnector_onDragOver | /**
* Event handler for the {@code dragover} event.
* <p>
* Override this method in case custom handling for the dragover event is
* required. If the drop is allowed, the event should prevent default.
*
* @param event
* browser event to be handled
*/
protected void onDragOver(Event event) {
NativeEvent nativeEvent = (NativeEvent) event;
if (isDropAllowed(nativeEvent)) {
setDropEffect(nativeEvent);
// Add drag over indicator in case the element doesn't have one
addDragOverStyle(nativeEvent);
// Prevent default to allow drop
nativeEvent.preventDefault();
nativeEvent.stopPropagation();
} else {
// Remove drop effect
nativeEvent.getDataTransfer()
.setDropEffect(DataTransfer.DropEffect.NONE);
// Remove drag over indicator
removeDragOverStyle(nativeEvent);
}
} | 3.68 |
morf_PortableSqlStatement_deepCopy | /**
* Deep copy.
*
* @return null.
*/
@Override
public Statement deepCopy() {
return null;
} | 3.68 |
framework_Table_setConverter | /**
* Sets a converter for a property id.
* <p>
* The converter is used to format the the data for the given property id
* before displaying it in the table.
* </p>
*
* @param propertyId
* The propertyId to format using the converter
* @param converter
* The converter to use for the property id
*/
public void setConverter(Object propertyId,
Converter<String, ?> converter) {
if (!getContainerPropertyIds().contains(propertyId)) {
throw new IllegalArgumentException(
"PropertyId " + propertyId + " must be in the container");
}
if (!typeIsCompatible(converter.getModelType(), getType(propertyId))) {
throw new IllegalArgumentException(
"Property type (" + getType(propertyId)
+ ") must match converter source type ("
+ converter.getModelType() + ")");
}
propertyValueConverters.put(propertyId,
(Converter<String, Object>) converter);
refreshRowCache();
} | 3.68 |
hadoop_JsonSerialization_fromResource | /**
* Convert from a JSON file.
* @param resource input file
* @return the parsed JSON
* @throws IOException IO problems
* @throws JsonParseException If the input is not well-formatted
* @throws JsonMappingException failure to map from the JSON to this class
*/
@SuppressWarnings({"IOResourceOpenedButNotSafelyClosed"})
public synchronized T fromResource(String resource)
throws IOException, JsonParseException, JsonMappingException {
try (InputStream resStream = this.getClass()
.getResourceAsStream(resource)) {
if (resStream == null) {
throw new FileNotFoundException(resource);
}
return mapper.readValue(resStream, classType);
} catch (IOException e) {
LOG.error("Exception while parsing json resource {}", resource, e);
throw e;
}
} | 3.68 |
framework_TreeElement_getValue | /**
* Returns selected item of the tree. In multiselect mode returns first
* selected item. If there is no selected item returns empty string
*
* @return selected item of the tree
*/
public String getValue() {
List<WebElement> selectedElements = findElements(
By.className("v-tree-node-selected"));
if (selectedElements.isEmpty()) {
return "";
} else {
return selectedElements.get(0).getText();
}
} | 3.68 |
hbase_TableInputFormatBase_getRecordReader | /**
* Builds a TableRecordReader. If no TableRecordReader was provided, uses the default.
* @see InputFormat#getRecordReader(InputSplit, JobConf, Reporter)
*/
public RecordReader<ImmutableBytesWritable, Result> getRecordReader(InputSplit split, JobConf job,
Reporter reporter) throws IOException {
// In case a subclass uses the deprecated approach or calls initializeTable directly
if (table == null) {
initialize(job);
}
// null check in case our child overrides getTable to not throw.
try {
if (getTable() == null) {
// initialize() must not have been implemented in the subclass.
throw new IOException(INITIALIZATION_ERROR);
}
} catch (IllegalStateException exception) {
throw new IOException(INITIALIZATION_ERROR, exception);
}
TableSplit tSplit = (TableSplit) split;
// if no table record reader was provided use default
final TableRecordReader trr =
this.tableRecordReader == null ? new TableRecordReader() : this.tableRecordReader;
trr.setStartRow(tSplit.getStartRow());
trr.setEndRow(tSplit.getEndRow());
trr.setHTable(this.table);
trr.setInputColumns(this.inputColumns);
trr.setRowFilter(this.rowFilter);
trr.init();
return new RecordReader<ImmutableBytesWritable, Result>() {
@Override
public void close() throws IOException {
trr.close();
closeTable();
}
@Override
public ImmutableBytesWritable createKey() {
return trr.createKey();
}
@Override
public Result createValue() {
return trr.createValue();
}
@Override
public long getPos() throws IOException {
return trr.getPos();
}
@Override
public float getProgress() throws IOException {
return trr.getProgress();
}
@Override
public boolean next(ImmutableBytesWritable key, Result value) throws IOException {
return trr.next(key, value);
}
};
} | 3.68 |
pulsar_LeastResourceUsageWithWeight_updateAndGetMaxResourceUsageWithWeight | /**
* Update and get the max resource usage with weight of broker according to the service configuration.
*
* @param broker the broker name.
* @param brokerData The broker load data.
* @param conf The service configuration.
* @return the max resource usage with weight of broker
*/
private double updateAndGetMaxResourceUsageWithWeight(String broker, BrokerData brokerData,
ServiceConfiguration conf) {
final double historyPercentage = conf.getLoadBalancerHistoryResourcePercentage();
Double historyUsage = brokerAvgResourceUsageWithWeight.get(broker);
LocalBrokerData localData = brokerData.getLocalData();
// If the broker restarted or MsgRate is 0, should use current resourceUsage to cover the historyUsage
if (localData.getBundles().size() == 0 || (localData.getMsgRateIn() == 0 && localData.getMsgRateOut() == 0)){
historyUsage = null;
}
double resourceUsage = brokerData.getLocalData().getMaxResourceUsageWithWeight(
conf.getLoadBalancerCPUResourceWeight(),
conf.getLoadBalancerDirectMemoryResourceWeight(),
conf.getLoadBalancerBandwithInResourceWeight(),
conf.getLoadBalancerBandwithOutResourceWeight());
historyUsage = historyUsage == null
? resourceUsage : historyUsage * historyPercentage + (1 - historyPercentage) * resourceUsage;
if (log.isDebugEnabled()) {
log.debug(
"Broker {} get max resource usage with weight: {}, history resource percentage: {}%, CPU weight: "
+ "{}, MEMORY weight: {}, DIRECT MEMORY weight: {}, BANDWIDTH IN weight: {}, BANDWIDTH "
+ "OUT weight: {} ",
broker, historyUsage, historyPercentage, conf.getLoadBalancerCPUResourceWeight(),
conf.getLoadBalancerMemoryResourceWeight(), conf.getLoadBalancerDirectMemoryResourceWeight(),
conf.getLoadBalancerBandwithInResourceWeight(),
conf.getLoadBalancerBandwithOutResourceWeight());
}
brokerAvgResourceUsageWithWeight.put(broker, historyUsage);
return historyUsage;
} | 3.68 |
hadoop_BlockManager_requestPrefetch | /**
* Requests optional prefetching of the given block.
*
* @param blockNumber the id of the block to prefetch.
*
* @throws IllegalArgumentException if blockNumber is negative.
*/
public void requestPrefetch(int blockNumber) {
checkNotNegative(blockNumber, "blockNumber");
// Do nothing because we do not support prefetches.
} | 3.68 |
hadoop_DecayRpcSchedulerDetailedMetrics_shutdown | /**
* Shutdown the instrumentation process.
*/
public void shutdown() {
DefaultMetricsSystem.instance().unregisterSource(name);
} | 3.68 |
hbase_HRegion_getRegionDir | /**
* Computes the Path of the HRegion
* @param tabledir qualified path for table
* @param name ENCODED region name
* @return Path of HRegion directory
* @deprecated For tests only; to be removed.
*/
@Deprecated
public static Path getRegionDir(final Path tabledir, final String name) {
return new Path(tabledir, name);
} | 3.68 |
hadoop_BalanceProcedureScheduler_shutDown | /**
* Shutdown the scheduler.
*/
public synchronized void shutDown() {
if (!running.get()) {
return;
}
running.set(false);
readerThread.interrupt();
roosterThread.interrupt();
recoverThread.interrupt();
workersPool.shutdownNow();
} | 3.68 |
hbase_CoprocessorClassLoader_getClassLoader | /**
* Get a CoprocessorClassLoader for a coprocessor jar path from cache. If not in cache, create
* one.
* @param path the path to the coprocessor jar file to load classes from
* @param parent the parent class loader for exempted classes
* @param pathPrefix a prefix used in temp path name to store the jar file locally
* @param conf the configuration used to create the class loader, if needed
* @return a CoprocessorClassLoader for the coprocessor jar path
*/
public static CoprocessorClassLoader getClassLoader(final Path path, final ClassLoader parent,
final String pathPrefix, final Configuration conf) throws IOException {
CoprocessorClassLoader cl = getIfCached(path);
String pathStr = path.toString();
if (cl != null) {
LOG.debug("Found classloader " + cl + " for " + pathStr);
return cl;
}
if (path.getFileSystem(conf).isFile(path) && !pathStr.endsWith(".jar")) {
throw new IOException(pathStr + ": not a jar file?");
}
Lock lock = locker.acquireLock(pathStr);
try {
cl = getIfCached(path);
if (cl != null) {
LOG.debug("Found classloader " + cl + " for " + pathStr);
return cl;
}
cl = AccessController.doPrivileged(new PrivilegedAction<CoprocessorClassLoader>() {
@Override
public CoprocessorClassLoader run() {
return new CoprocessorClassLoader(parent);
}
});
cl.init(path, pathPrefix, conf);
// Cache class loader as a weak value, will be GC'ed when no reference left
CoprocessorClassLoader prev = classLoadersCache.putIfAbsent(path, cl);
if (prev != null) {
// Lost update race, use already added class loader
LOG.warn("THIS SHOULD NOT HAPPEN, a class loader" + " is already cached for " + pathStr);
cl = prev;
}
return cl;
} finally {
lock.unlock();
}
} | 3.68 |
hibernate-validator_ClassVisitor_visitExecutableAsMethod | /**
* Checks whether the constraints of the given method are valid.
*
* @param element a method under investigation
* @param aVoid
*/
@Override
public Void visitExecutableAsMethod(ExecutableElement element, Void aVoid) {
processClassChecks( element );
return null;
} | 3.68 |
flink_LocalBufferPool_lazyDestroy | /** Destroy is called after the produce or consume phase of a task finishes. */
@Override
public void lazyDestroy() {
// NOTE: if you change this logic, be sure to update recycle() as well!
CompletableFuture<?> toNotify = null;
synchronized (availableMemorySegments) {
if (!isDestroyed) {
MemorySegment segment;
while ((segment = availableMemorySegments.poll()) != null) {
returnMemorySegment(segment);
}
BufferListener listener;
while ((listener = registeredListeners.poll()) != null) {
listener.notifyBufferDestroyed();
}
if (!isAvailable()) {
toNotify = availabilityHelper.getAvailableFuture();
}
isDestroyed = true;
}
}
mayNotifyAvailable(toNotify);
networkBufferPool.destroyBufferPool(this);
} | 3.68 |
hudi_BaseHoodieWriteClient_setWriteSchemaForDeletes | /**
* Sets write schema from last instant since deletes may not have schema set in the config.
*/
protected void setWriteSchemaForDeletes(HoodieTableMetaClient metaClient) {
try {
HoodieActiveTimeline activeTimeline = metaClient.getActiveTimeline();
Option<HoodieInstant> lastInstant =
activeTimeline.filterCompletedInstants().filter(s -> s.getAction().equals(metaClient.getCommitActionType())
|| s.getAction().equals(HoodieActiveTimeline.REPLACE_COMMIT_ACTION))
.lastInstant();
if (lastInstant.isPresent()) {
HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(
activeTimeline.getInstantDetails(lastInstant.get()).get(), HoodieCommitMetadata.class);
String extraSchema = commitMetadata.getExtraMetadata().get(SCHEMA_KEY);
if (!StringUtils.isNullOrEmpty(extraSchema)) {
config.setSchema(commitMetadata.getExtraMetadata().get(SCHEMA_KEY));
} else {
throw new HoodieIOException("Latest commit does not have any schema in commit metadata");
}
} else {
LOG.warn("None rows are deleted because the table is empty");
}
} catch (IOException e) {
throw new HoodieIOException("IOException thrown while reading last commit metadata", e);
}
} | 3.68 |
flink_Path_suffix | /**
* Adds a suffix to the final name in the path.
*
* @param suffix The suffix to be added
* @return the new path including the suffix
*/
public Path suffix(String suffix) {
return new Path(getParent(), getName() + suffix);
} | 3.68 |
flink_FlinkContainersSettings_flinkHome | /**
* Sets the path of the Flink distribution inside the container. Returns a reference to this
* Builder enabling method chaining.
*
* @param flinkHome The {@code flinkHome} to set.
* @return A reference to this Builder.
*/
public Builder flinkHome(String flinkHome) {
this.flinkHome = flinkHome;
return this;
} | 3.68 |
flink_DataStream_project | /**
* Initiates a Project transformation on a {@link Tuple} {@link DataStream}.<br>
* <b>Note: Only Tuple DataStreams can be projected.</b>
*
* <p>The transformation projects each Tuple of the DataSet onto a (sub)set of fields.
*
* @param fieldIndexes The field indexes of the input tuples that are retained. The order of
* fields in the output tuple corresponds to the order of field indexes.
* @return The projected DataStream
* @see Tuple
* @see DataStream
*/
@PublicEvolving
public <R extends Tuple> SingleOutputStreamOperator<R> project(int... fieldIndexes) {
return new StreamProjection<>(this, fieldIndexes).projectTupleX();
} | 3.68 |
morf_GraphBasedUpgradeSchemaChangeVisitor_create | /**
* Creates {@link GraphBasedUpgradeSchemaChangeVisitor} instance.
*
* @param sourceSchema schema prior to upgrade step
* @param sqlDialect dialect to generate statements for the target database
* @param idTable table for id generation
* @param upgradeNodes all the {@link GraphBasedUpgradeNode} instances in the upgrade for
* which the visitor will generate statements
* @return new {@link GraphBasedUpgradeSchemaChangeVisitor} instance
*/
GraphBasedUpgradeSchemaChangeVisitor create(Schema sourceSchema, SqlDialect sqlDialect, Table idTable,
Map<String, GraphBasedUpgradeNode> upgradeNodes) {
return new GraphBasedUpgradeSchemaChangeVisitor(sourceSchema, sqlDialect, idTable, upgradeNodes);
} | 3.68 |
flink_ChannelStateWriteRequestExecutorImpl_waitAndTakeUnsafe | /**
* Retrieves and removes the head request of the {@link #deque}, waiting if necessary until an
* element becomes available.
*
* @return The head request, it can be null when the executor is closed.
*/
@Nullable
private ChannelStateWriteRequest waitAndTakeUnsafe() throws InterruptedException {
ChannelStateWriteRequest request;
while (!wasClosed) {
request = deque.pollFirst();
if (request == null) {
lock.wait();
} else {
return request;
}
}
return null;
} | 3.68 |
rocketmq-connect_KafkaSinkAdaptorConnector_start | /**
* Start the component
*
* @param config component context
*/
@Override
public void start(KeyValue config) {
super.start(config);
sinkConnector.validate(taskConfig);
sinkConnector.initialize(new KafkaConnectorContext(connectorContext));
sinkConnector.start(taskConfig);
} | 3.68 |
hadoop_TFile_getMetaBlock | /**
* Stream access to a meta block.``
*
* @param name
* The name of the meta block.
* @return The input stream.
* @throws IOException
* on I/O error.
* @throws MetaBlockDoesNotExist
* If the meta block with the name does not exist.
*/
public DataInputStream getMetaBlock(String name) throws IOException,
MetaBlockDoesNotExist {
return readerBCF.getMetaBlock(name);
} | 3.68 |
pulsar_ManagedLedgerConfig_setThrottleMarkDelete | /**
* Set the rate limiter on how many mark-delete calls per second are allowed. If the value is set to 0, the rate
* limiter is disabled. Default is 0.
*
* @param throttleMarkDelete
* the max number of mark-delete calls allowed per second
*/
public ManagedLedgerConfig setThrottleMarkDelete(double throttleMarkDelete) {
checkArgument(throttleMarkDelete >= 0.0);
this.throttleMarkDelete = throttleMarkDelete;
return this;
} | 3.68 |
hmily_HmilyTccTransactionExecutor_participantCancel | /**
* Participant cancel object.
*
* @param hmilyParticipants the hmily participants
* @param selfParticipantId the self participant id
* @return the object
*/
public Object participantCancel(final List<HmilyParticipant> hmilyParticipants, final Long selfParticipantId) {
LogUtil.debug(LOGGER, () -> "tcc cancel ...........start!");
if (CollectionUtils.isEmpty(hmilyParticipants)) {
return null;
}
//if cc pattern,can not execute cancel
//update cancel
HmilyParticipant selfHmilyParticipant = filterSelfHmilyParticipant(hmilyParticipants);
if (Objects.nonNull(selfHmilyParticipant)) {
selfHmilyParticipant.setStatus(HmilyActionEnum.CANCELING.getCode());
HmilyRepositoryStorage.updateHmilyParticipantStatus(selfHmilyParticipant);
}
List<Object> results = Lists.newArrayListWithCapacity(hmilyParticipants.size());
for (HmilyParticipant hmilyParticipant : hmilyParticipants) {
try {
if (hmilyParticipant.getParticipantId().equals(selfParticipantId)) {
final Object result = HmilyReflector.executor(HmilyActionEnum.CANCELING, ExecutorTypeEnum.LOCAL, hmilyParticipant);
results.add(result);
HmilyRepositoryStorage.removeHmilyParticipant(hmilyParticipant);
} else {
final Object result = HmilyReflector.executor(HmilyActionEnum.CANCELING, ExecutorTypeEnum.RPC, hmilyParticipant);
results.add(result);
}
} catch (Throwable throwable) {
throw new HmilyRuntimeException(" hmilyParticipant execute cancel exception:" + hmilyParticipant.toString());
} finally {
HmilyContextHolder.remove();
}
}
HmilyParticipantCacheManager.getInstance().removeByKey(selfParticipantId);
return results.get(0);
} | 3.68 |
hadoop_OSSListResult_v1 | /**
* Restricted constructors to ensure v1 or v2, not both.
* @param result v1 result
* @return new list result container
*/
public static OSSListResult v1(ObjectListing result) {
return new OSSListResult(result, null);
} | 3.68 |
hadoop_HSAuditLogger_createSuccessLog | /**
* A helper api for creating an audit log for a successful event.
*/
static String createSuccessLog(String user, String operation, String target) {
StringBuilder b = new StringBuilder();
start(Keys.USER, user, b);
addRemoteIP(b);
add(Keys.OPERATION, operation, b);
add(Keys.TARGET, target, b);
add(Keys.RESULT, AuditConstants.SUCCESS, b);
return b.toString();
} | 3.68 |
hbase_MobUtils_getTableNameString | /**
* Gets the table name from when this cell was written into a mob hfile as a string.
* @param cell to extract tag from
* @return table name as a string. empty if the tag is not found.
*/
public static Optional<String> getTableNameString(Cell cell) {
Optional<Tag> tag = getTableNameTag(cell);
Optional<String> name = Optional.empty();
if (tag.isPresent()) {
name = Optional.of(Tag.getValueAsString(tag.get()));
}
return name;
} | 3.68 |
flink_KvStateLocationRegistry_notifyKvStateUnregistered | /**
* Notifies the registry about an unregistered KvState instance.
*
* @param jobVertexId JobVertexID the KvState instance belongs to
* @param keyGroupRange Key group index the KvState instance belongs to
* @param registrationName Name under which the KvState has been registered
* @throws IllegalArgumentException If another operator registered the state instance
* @throws IllegalArgumentException If the registration name is not known
*/
public void notifyKvStateUnregistered(
JobVertexID jobVertexId, KeyGroupRange keyGroupRange, String registrationName) {
KvStateLocation location = lookupTable.get(registrationName);
if (location != null) {
// Duplicate name if vertex IDs don't match
if (!location.getJobVertexId().equals(jobVertexId)) {
throw new IllegalArgumentException(
"Another operator ("
+ location.getJobVertexId()
+ ") registered the KvState "
+ "under '"
+ registrationName
+ "'.");
}
location.unregisterKvState(keyGroupRange);
if (location.getNumRegisteredKeyGroups() == 0) {
lookupTable.remove(registrationName);
}
} else {
throw new IllegalArgumentException(
"Unknown registration name '"
+ registrationName
+ "'. "
+ "Probably registration/unregistration race.");
}
} | 3.68 |
MagicPlugin_MagicConfigCommandExecutor_applySession | /**
* Note that this gets called asynchronously
*/
protected void applySession(String sessionId, Session session, CommandSender sender, String command, boolean load) {
String missingMessage = magic.getMessages().get("commands.mconfig." + command + ".missing");
String type = session.getType();
if (type == null || type.isEmpty()) {
missingMessage = missingMessage.replace("$field", "type");
AsyncProcessor.fail(controller, sender, missingMessage);
return;
}
boolean isMainConfiguration = type.equals("config");
boolean isMessagesConfiguration = type.equals("messages");
String key = session.getKey();
if (!isMainConfiguration && !isMessagesConfiguration && (key == null || key.isEmpty())) {
missingMessage = missingMessage.replace("$field", "key");
AsyncProcessor.fail(controller, sender, missingMessage);
return;
}
if (key != null && !key.isEmpty()) {
isMainConfiguration = false;
isMessagesConfiguration = false;
}
String contents = session.getContents();
if (contents == null || contents.isEmpty()) {
missingMessage = missingMessage.replace("$field", "contents");
AsyncProcessor.fail(controller, sender, missingMessage);
return;
}
YamlConfiguration testLoad = new YamlConfiguration();
try {
testLoad.loadFromString(contents);
} catch (InvalidConfigurationException e) {
String message = magic.getMessages().get("commands.mconfig." + command + ".invalid");
AsyncProcessor.fail(controller, sender, message);
return;
}
File file;
if (isMainConfiguration || isMessagesConfiguration) {
file = new File(magic.getPlugin().getDataFolder(), type + ".yml");
} else {
String filename = key + ".yml";
filename = filename.replace("|", "_");
File typeFolder = new File(magic.getPlugin().getDataFolder(), type);
if (!typeFolder.exists()) {
typeFolder.mkdir();
}
file = new File(typeFolder, filename);
}
if (file.exists()) {
String message = magic.getMessages().get("commands.mconfig." + command + ".overwrote");
AsyncProcessor.success(controller, sender, message.replace("$file", file.getName()));
} else {
String message = magic.getMessages().get("commands.mconfig." + command + ".created");
AsyncProcessor.success(controller, sender, message.replace("$file", file.getName()));
}
try {
PrintWriter out = new PrintWriter(file, "UTF-8");
out.print(contents);
out.close();
if (load) {
Plugin plugin = controller.getPlugin();
plugin.getServer().getScheduler().runTask(plugin, new Runnable() {
@Override
public void run() {
controller.loadConfigurationQuietly(sender);
}
});
} else {
AsyncProcessor.success(controller, sender, magic.getMessages().get("commands.mconfig." + command + ".load_prompt"));
}
final Plugin plugin = magic.getPlugin();
plugin.getServer().getScheduler().runTask(plugin, new Runnable() {
@Override
public void run() {
Mage mage = controller.getMage(sender);
sessions.put(mage.getId(), sessionId);
}
});
} catch (Exception ex) {
String message = magic.getMessages().get("commands.mconfig." + command + ".error_saving");
AsyncProcessor.fail(controller, sender, message.replace("$file", file.getName()),
"Error writing config file " + file.getAbsolutePath(), ex);
}
} | 3.68 |
flink_DatadogHttpReporter_getTagsFromConfig | /** Get config tags from config 'metrics.reporter.dghttp.tags'. */
private List<String> getTagsFromConfig(String str) {
return Arrays.asList(str.split(","));
} | 3.68 |
framework_VTree_executeEventCommand | /*
* Must wait for Safari to focus before sending click and value change
* events (see #6373, #6374)
*/
private void executeEventCommand(ScheduledCommand command) {
if (BrowserInfo.get().isWebkit() && !treeHasFocus) {
Scheduler.get().scheduleDeferred(command);
} else {
command.execute();
}
} | 3.68 |
framework_VCalendar_isEventCaptionAsHtml | /**
* Checks whether event captions are rendered as HTML
* <p>
* The default is false, i.e. to render that caption as plain text.
*
* @return true if the captions are rendered as HTML, false if rendered as
* plain text
*/
public boolean isEventCaptionAsHtml() {
return eventCaptionAsHtml;
} | 3.68 |
hbase_AsyncTable_putAll | /**
* A simple version of batch put. It will fail if there are any failures.
* @param puts The list of mutations to apply.
* @return A {@link CompletableFuture} that always returns null when complete normally.
*/
default CompletableFuture<Void> putAll(List<Put> puts) {
return allOf(put(puts)).thenApply(r -> null);
} | 3.68 |
hadoop_OBSObjectBucketUtils_renameFile | /**
* Implement rename file.
*
* @param owner OBS File System instance
* @param srcKey source object key
* @param dstKey destination object key
* @param srcStatus source object status
* @throws IOException any problem with rename operation
*/
private static void renameFile(final OBSFileSystem owner,
final String srcKey,
final String dstKey,
final FileStatus srcStatus)
throws IOException {
long startTime = System.nanoTime();
copyFile(owner, srcKey, dstKey, srcStatus.getLen());
objectDelete(owner, srcStatus, false);
if (LOG.isDebugEnabled()) {
long delay = System.nanoTime() - startTime;
LOG.debug("OBSFileSystem rename: "
+ ", {src="
+ srcKey
+ ", dst="
+ dstKey
+ ", delay="
+ delay
+ "}");
}
} | 3.68 |
flink_AbstractBinaryWriter_write | /**
* Writes the specified byte to this output stream. The general contract for <code>write
* </code> is that one byte is written to the output stream. The byte to be written is the
* eight low-order bits of the argument <code>b</code>. The 24 high-order bits of <code>b
* </code> are ignored.
*/
@Override
public void write(int b) throws IOException {
ensureCapacity(1);
segment.put(cursor, (byte) b);
cursor += 1;
} | 3.68 |
dubbo_ScopeClusterInvoker_createInjvmInvoker | /**
* Creates a new Invoker for the current ScopeClusterInvoker and exports it to the local JVM.
*/
private void createInjvmInvoker(Exporter<?> exporter) {
if (injvmInvoker == null) {
synchronized (createLock) {
if (injvmInvoker == null) {
URL url = new ServiceConfigURL(
LOCAL_PROTOCOL,
NetUtils.getLocalHost(),
getUrl().getPort(),
getInterface().getName(),
getUrl().getParameters());
url = url.setScopeModel(getUrl().getScopeModel());
url = url.setServiceModel(getUrl().getServiceModel());
DubboServiceAddressURL consumerUrl = new DubboServiceAddressURL(
url.getUrlAddress(),
url.getUrlParam(),
exporter.getInvoker().getUrl(),
null);
Invoker<?> invoker = protocolSPI.refer(getInterface(), consumerUrl);
List<Invoker<?>> invokers = new ArrayList<>();
invokers.add(invoker);
injvmInvoker = Cluster.getCluster(url.getScopeModel(), Cluster.DEFAULT, false)
.join(new StaticDirectory(url, invokers), true);
}
}
}
} | 3.68 |
framework_FieldBinder_bindField | /**
* Tries to bind the given {@link Component} instance to a member field of
* the bind target. The fields are matched based on localId, id and caption.
*
* @param instance
* the instance to be bound to a field
* @param localId
* the localId used for mapping the field to an instance field
* @return true on success
* @throws FieldBindingException
* if error occurs when trying to bind the instance to a field
*/
public boolean bindField(Component instance, String localId) {
// check that the field exists, is correct type and is null
boolean success = bindFieldByIdentifier(localId, instance);
if (!success) {
success = bindFieldByIdentifier(instance.getId(), instance);
}
if (!success) {
success = bindFieldByIdentifier(instance.getCaption(), instance);
}
if (!success) {
String idInfo = "localId: " + localId + " id: " + instance.getId()
+ " caption: " + instance.getCaption();
getLogger().finest("Could not bind component to a field "
+ instance.getClass().getName() + " " + idInfo);
}
return success;
} | 3.68 |
querydsl_ComparableExpression_gt | /**
* Create a {@code this > right} expression
*
* @param right rhs of the comparison
* @return this > right
* @see java.lang.Comparable#compareTo(Object)
*/
public BooleanExpression gt(Expression<T> right) {
return Expressions.booleanOperation(Ops.GT, mixin, right);
} | 3.68 |
pulsar_BindAddressValidator_migrateBindAddresses | /**
* Generates bind addresses based on legacy configuration properties.
*/
private static List<BindAddress> migrateBindAddresses(ServiceConfiguration config) {
List<BindAddress> addresses = new ArrayList<>(2);
if (config.getBrokerServicePort().isPresent()) {
addresses.add(new BindAddress(null, URI.create(
ServiceConfigurationUtils.brokerUrl(config.getBindAddress(),
config.getBrokerServicePort().get()))));
}
if (config.getBrokerServicePortTls().isPresent()) {
addresses.add(new BindAddress(null, URI.create(
ServiceConfigurationUtils.brokerUrlTls(config.getBindAddress(),
config.getBrokerServicePortTls().get()))));
}
if (config.getWebServicePort().isPresent()) {
addresses.add(new BindAddress(null, URI.create(
ServiceConfigurationUtils.webServiceUrl(config.getBindAddress(),
config.getWebServicePort().get()))));
}
if (config.getWebServicePortTls().isPresent()) {
addresses.add(new BindAddress(null, URI.create(
ServiceConfigurationUtils.webServiceUrlTls(config.getBindAddress(),
config.getWebServicePortTls().get()))));
}
return addresses;
} | 3.68 |
morf_DummyXmlOutputStreamProvider_getXmlString | /**
* @return Convert the output to a String
*/
public String getXmlString() {
return new String(testOutputStream.toByteArray());
} | 3.68 |
flink_SnapshotDirectory_permanent | /**
* Creates a permanent snapshot directory for the given path, which will not delete the
* underlying directory in {@link #cleanup()} after {@link #completeSnapshotAndGetHandle()} was
* called.
*/
public static SnapshotDirectory permanent(@Nonnull Path directory) throws IOException {
return new PermanentSnapshotDirectory(directory);
} | 3.68 |
flink_SliceAssigners_tumbling | /**
* Creates a tumbling window {@link SliceAssigner} that assigns elements to slices of tumbling
* windows.
*
* @param rowtimeIndex the index of rowtime field in the input row, {@code -1} if based on
* processing time.
* @param shiftTimeZone The shift timezone of the window, if the proctime or rowtime type is
* TIMESTAMP_LTZ, the shift timezone is the timezone user configured in TableConfig, other
* cases the timezone is UTC which means never shift when assigning windows.
* @param size the size of the generated windows.
*/
public static TumblingSliceAssigner tumbling(
int rowtimeIndex, ZoneId shiftTimeZone, Duration size) {
return new TumblingSliceAssigner(rowtimeIndex, shiftTimeZone, size.toMillis(), 0);
} | 3.68 |
hbase_PrivateCellUtil_createLastOnRowCol | /**
* Create a Cell that is larger than all other possible Cells for the given Cell's rk:cf:q. Used
* in creating "fake keys" for the multi-column Bloom filter optimization to skip the row/column
* we already know is not in the file.
* @return Last possible Cell on passed Cell's rk:cf:q.
*/
public static Cell createLastOnRowCol(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
return new LastOnRowColByteBufferExtendedCell(
((ByteBufferExtendedCell) cell).getRowByteBuffer(),
((ByteBufferExtendedCell) cell).getRowPosition(), cell.getRowLength(),
((ByteBufferExtendedCell) cell).getFamilyByteBuffer(),
((ByteBufferExtendedCell) cell).getFamilyPosition(), cell.getFamilyLength(),
((ByteBufferExtendedCell) cell).getQualifierByteBuffer(),
((ByteBufferExtendedCell) cell).getQualifierPosition(), cell.getQualifierLength());
}
return new LastOnRowColCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength());
} | 3.68 |
hbase_CreateStoreFileWriterParams_includeMVCCReadpoint | /**
* Whether to include MVCC or not
*/
public CreateStoreFileWriterParams includeMVCCReadpoint(boolean includeMVCCReadpoint) {
this.includeMVCCReadpoint = includeMVCCReadpoint;
return this;
} | 3.68 |
hadoop_HttpReferrerAuditHeader_addAttribute | /**
* Add a query parameter if not null/empty
* There's no need to escape here as it is done in the URI
* constructor.
* @param key query key
* @param value query value
*/
private void addAttribute(String key,
String value) {
if (StringUtils.isNotEmpty(value)) {
attributes.put(key, value);
}
} | 3.68 |
hbase_RSGroupInfo_addTable | /**
* Add a table
* @deprecated Since 3.0.0, will be removed in 4.0.0. The rsgroup information will be stored in
* the configuration of a table so this will be removed.
*/
@Deprecated
public void addTable(TableName table) {
tables.add(table);
} | 3.68 |
hadoop_LeveldbIterator_seekToFirst | /**
* Repositions the iterator so is is at the beginning of the Database.
*/
public void seekToFirst() throws DBException {
try {
iter.seekToFirst();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
} | 3.68 |
morf_AuditRecordHelper_addAuditRecord | /**
* Add the audit record, writing out the SQL for the insert.
*
* @see org.alfasoftware.morf.upgrade.SchemaChangeVisitor#addAuditRecord(java.util.UUID, java.lang.String)
*
* @param visitor The schema change visitor adding the audit record.
* @param schema The schema to add the audit record to.
* @param uuid The UUID of the step which has been applied
* @param description The description of the step.
*/
public static void addAuditRecord(SchemaChangeVisitor visitor, Schema schema, UUID uuid, String description) {
// There's no point adding an UpgradeAudit row if the table isn't there.
if (!schema.tableExists("UpgradeAudit"))
return;
InsertStatement auditRecord = createAuditInsertStatement(uuid, description);
visitor.visit(new ExecuteStatement(auditRecord));
} | 3.68 |
hbase_LruAdaptiveBlockCache_evict | /**
* Eviction method. Evict items in order of use, allowing delete items which haven't been used for
* the longest amount of time.
* @return how many bytes were freed
*/
long evict() {
// Ensure only one eviction at a time
if (!evictionLock.tryLock()) {
return 0;
}
long bytesToFree = 0L;
try {
evictionInProgress = true;
long currentSize = this.size.get();
bytesToFree = currentSize - minSize();
if (LOG.isTraceEnabled()) {
LOG.trace("Block cache LRU eviction started; Attempting to free "
+ StringUtils.byteDesc(bytesToFree) + " of total=" + StringUtils.byteDesc(currentSize));
}
if (bytesToFree <= 0) {
return 0;
}
// Instantiate priority buckets
BlockBucket bucketSingle = new BlockBucket("single", bytesToFree, blockSize, singleSize());
BlockBucket bucketMulti = new BlockBucket("multi", bytesToFree, blockSize, multiSize());
BlockBucket bucketMemory = new BlockBucket("memory", bytesToFree, blockSize, memorySize());
// Scan entire map putting into appropriate buckets
for (LruCachedBlock cachedBlock : map.values()) {
switch (cachedBlock.getPriority()) {
case SINGLE: {
bucketSingle.add(cachedBlock);
break;
}
case MULTI: {
bucketMulti.add(cachedBlock);
break;
}
case MEMORY: {
bucketMemory.add(cachedBlock);
break;
}
}
}
long bytesFreed = 0;
if (forceInMemory || memoryFactor > 0.999f) {
long s = bucketSingle.totalSize();
long m = bucketMulti.totalSize();
if (bytesToFree > (s + m)) {
// this means we need to evict blocks in memory bucket to make room,
// so the single and multi buckets will be emptied
bytesFreed = bucketSingle.free(s);
bytesFreed += bucketMulti.free(m);
if (LOG.isTraceEnabled()) {
LOG.trace(
"freed " + StringUtils.byteDesc(bytesFreed) + " from single and multi buckets");
}
bytesFreed += bucketMemory.free(bytesToFree - bytesFreed);
if (LOG.isTraceEnabled()) {
LOG.trace(
"freed " + StringUtils.byteDesc(bytesFreed) + " total from all three buckets ");
}
} else {
// this means no need to evict block in memory bucket,
// and we try best to make the ratio between single-bucket and
// multi-bucket is 1:2
long bytesRemain = s + m - bytesToFree;
if (3 * s <= bytesRemain) {
// single-bucket is small enough that no eviction happens for it
// hence all eviction goes from multi-bucket
bytesFreed = bucketMulti.free(bytesToFree);
} else if (3 * m <= 2 * bytesRemain) {
// multi-bucket is small enough that no eviction happens for it
// hence all eviction goes from single-bucket
bytesFreed = bucketSingle.free(bytesToFree);
} else {
// both buckets need to evict some blocks
bytesFreed = bucketSingle.free(s - bytesRemain / 3);
if (bytesFreed < bytesToFree) {
bytesFreed += bucketMulti.free(bytesToFree - bytesFreed);
}
}
}
} else {
PriorityQueue<BlockBucket> bucketQueue = new PriorityQueue<>(3);
bucketQueue.add(bucketSingle);
bucketQueue.add(bucketMulti);
bucketQueue.add(bucketMemory);
int remainingBuckets = bucketQueue.size();
BlockBucket bucket;
while ((bucket = bucketQueue.poll()) != null) {
long overflow = bucket.overflow();
if (overflow > 0) {
long bucketBytesToFree =
Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets);
bytesFreed += bucket.free(bucketBytesToFree);
}
remainingBuckets--;
}
}
if (LOG.isTraceEnabled()) {
long single = bucketSingle.totalSize();
long multi = bucketMulti.totalSize();
long memory = bucketMemory.totalSize();
LOG.trace(
"Block cache LRU eviction completed; " + "freed=" + StringUtils.byteDesc(bytesFreed)
+ ", " + "total=" + StringUtils.byteDesc(this.size.get()) + ", " + "single="
+ StringUtils.byteDesc(single) + ", " + "multi=" + StringUtils.byteDesc(multi) + ", "
+ "memory=" + StringUtils.byteDesc(memory));
}
} finally {
stats.evict();
evictionInProgress = false;
evictionLock.unlock();
}
return bytesToFree;
} | 3.68 |
morf_MathsField_getOperator | /**
* @return the operator
*/
public MathsOperator getOperator() {
return operator;
} | 3.68 |
pulsar_MessageAcknowledger_acknowledgeAsync | /**
* The asynchronous version of {@link #acknowledge(MessageId)}.
*/
default CompletableFuture<Void> acknowledgeAsync(MessageId messageId) {
return acknowledgeAsync(messageId, null);
} | 3.68 |
hmily_HmilyRepositoryStorage_releaseHmilyLocks | /**
* Release locks..
*
* @param hmilyLocks hmily locks
*/
public static void releaseHmilyLocks(final Collection<HmilyLock> hmilyLocks) {
if (!hmilyLocks.isEmpty()) {
PUBLISHER.syncPublishEvent(hmilyLocks, EventTypeEnum.RELEASE_HMILY_LOCKS.getCode());
}
} | 3.68 |
framework_VComboBox_dataReceived | /**
* Called by the connector when new data for the last requested filter
* is received from the server.
*/
public void dataReceived() {
if (initialData) {
suggestionPopup.menu.setSuggestions(currentSuggestions);
performSelection(serverSelectedKey, true, true);
updateSuggestionPopupMinWidth();
updateRootWidth();
initialData = false;
return;
}
suggestionPopup.menu.setSuggestions(currentSuggestions);
if (!waitingForFilteringResponse && suggestionPopup.isAttached()) {
showPopup = true;
}
// Don't show popup, if is not focused
if (showPopup && focused) {
suggestionPopup.showSuggestions(currentPage);
}
waitingForFilteringResponse = false;
if (pendingUserInput != null) {
boolean pendingHandled = suggestionPopup.menu.handledNewItem == pendingUserInput;
suggestionPopup.menu
.actOnEnteredValueAfterFiltering(pendingUserInput);
if (!allowNewItems || (pendingHandled
&& suggestionPopup.menu.handledNewItem == null)) {
pendingUserInput = null;
} else {
waitingForFilteringResponse = true;
}
} else if (popupOpenerClicked) {
// make sure the current item is selected in the popup
suggestionPopup.menu.highlightSelectedItem();
} else {
navigateItemAfterPageChange();
}
if (!showPopup) {
suggestionPopup.hide();
}
popupOpenerClicked = false;
showPopup = false;
} | 3.68 |
framework_VComboBox_setTotalSuggestions | /**
* Sets the total number of suggestions.
* <p>
* NOTE: this excluded the possible null selection item!
* <p>
* NOTE: this just updates the state, but doesn't update any UI.
*
* @since 8.0
* @param totalSuggestions
* total number of suggestions
*/
public void setTotalSuggestions(int totalSuggestions) {
this.totalSuggestions = totalSuggestions;
} | 3.68 |
framework_VCalendar_getDateClickListener | /**
* Gets the listener for listening to event clicks.
*
* @return
*/
public DateClickListener getDateClickListener() {
return dateClickListener;
} | 3.68 |
morf_AbstractSelectStatementBuilder_fullOuterJoin | /**
* Specifies an full outer join to a subselect:
*
* <blockquote><pre>
* TableReference sale = tableRef("Sale");
* TableReference customer = tableRef("Customer");
*
* // Define the subselect - a group by showing total sales by age in the
* // previous month.
* SelectStatement amountsByAgeLastMonth = select(field("age"), sum(field("amount")))
* .from(sale)
* .innerJoin(customer, sale.field("customerId").eq(customer.field("id")))
* .where(sale.field("month").eq(5))
* .groupBy(customer.field("age")
* .alias("amountByAge");
*
* // The outer select, showing each sale this month as a percentage of the sales
* // to that age the previous month
* SelectStatement outer = select(
* sale.field("id"),
* sale.field("amount")
* // May cause division by zero (!)
* .divideBy(isNull(amountsByAgeLastMonth.asTable().field("amount"), 0))
* .multiplyBy(literal(100))
* )
* .from(sale)
* .innerJoin(customer, sale.field("customerId").eq(customer.field("id")))
* .fullOuterJoin(amountsByAgeLastMonth, amountsByAgeLastMonth.asTable().field("age").eq(customer.field("age")));
* </pre></blockquote>
*
* @param subSelect the sub select statement to join on to
* @param criterion the criteria on which to join the tables
* @return this, for method chaining.
*/
public T fullOuterJoin(SelectStatement subSelect, Criterion criterion) {
joins.add(new Join(JoinType.FULL_OUTER_JOIN, subSelect, criterion));
return castToChild(this);
} | 3.68 |
hadoop_AzureNativeFileSystemStore_configureAzureStorageSession | /**
* Set the configuration parameters for this client storage session with
* Azure.
*
* @throws AzureException
*/
private void configureAzureStorageSession() throws AzureException {
// Assertion: Target session URI already should have been captured.
if (sessionUri == null) {
throw new AssertionError(
"Expected a non-null session URI when configuring storage session");
}
// Assertion: A client session already should have been established with
// Azure.
if (storageInteractionLayer == null) {
throw new AssertionError(String.format(
"Cannot configure storage session for URI '%s' "
+ "if storage session has not been established.",
sessionUri.toString()));
}
// Determine whether or not reads are allowed concurrent with OOB writes.
tolerateOobAppends = sessionConfiguration.getBoolean(
KEY_READ_TOLERATE_CONCURRENT_APPEND,
DEFAULT_READ_TOLERATE_CONCURRENT_APPEND);
// Retrieve configuration for the minimum stream read and write block size.
//
this.downloadBlockSizeBytes = sessionConfiguration.getInt(
KEY_STREAM_MIN_READ_SIZE, DEFAULT_DOWNLOAD_BLOCK_SIZE);
this.uploadBlockSizeBytes = sessionConfiguration.getInt(
KEY_WRITE_BLOCK_SIZE, DEFAULT_UPLOAD_BLOCK_SIZE);
this.hadoopBlockSize = sessionConfiguration.getLong(
HADOOP_BLOCK_SIZE_PROPERTY_NAME, DEFAULT_HADOOP_BLOCK_SIZE);
this.inputStreamVersion = sessionConfiguration.getInt(
KEY_INPUT_STREAM_VERSION, DEFAULT_INPUT_STREAM_VERSION);
// The job may want to specify a timeout to use when engaging the
// storage service. The default is currently 90 seconds. It may
// be necessary to increase this value for long latencies in larger
// jobs. If the timeout specified is greater than zero seconds use
// it, otherwise use the default service client timeout.
int storageConnectionTimeout = sessionConfiguration.getInt(
KEY_STORAGE_CONNECTION_TIMEOUT, 0);
if (0 < storageConnectionTimeout) {
storageInteractionLayer.setTimeoutInMs(storageConnectionTimeout * 1000);
}
// Set the concurrency values equal to the that specified in the
// configuration file. If it does not exist, set it to the default
// value calculated as double the number of CPU cores on the client
// machine. The concurrency value is minimum of double the cores and
// the read/write property.
int cpuCores = 2 * Runtime.getRuntime().availableProcessors();
concurrentWrites = sessionConfiguration.getInt(
KEY_CONCURRENT_CONNECTION_VALUE_OUT,
Math.min(cpuCores, DEFAULT_CONCURRENT_WRITES));
// Set up the exponential retry policy.
//
minBackoff = sessionConfiguration.getInt(
KEY_MIN_BACKOFF_INTERVAL, DEFAULT_MIN_BACKOFF_INTERVAL);
maxBackoff = sessionConfiguration.getInt(
KEY_MAX_BACKOFF_INTERVAL, DEFAULT_MAX_BACKOFF_INTERVAL);
deltaBackoff = sessionConfiguration.getInt(
KEY_BACKOFF_INTERVAL, DEFAULT_BACKOFF_INTERVAL);
maxRetries = sessionConfiguration.getInt(
KEY_MAX_IO_RETRIES, DEFAULT_MAX_RETRY_ATTEMPTS);
storageInteractionLayer.setRetryPolicyFactory(
new RetryExponentialRetry(minBackoff, deltaBackoff, maxBackoff, maxRetries));
// read the self-throttling config.
selfThrottlingEnabled = sessionConfiguration.getBoolean(
KEY_SELF_THROTTLE_ENABLE, DEFAULT_SELF_THROTTLE_ENABLE);
selfThrottlingReadFactor = sessionConfiguration.getFloat(
KEY_SELF_THROTTLE_READ_FACTOR, DEFAULT_SELF_THROTTLE_READ_FACTOR);
selfThrottlingWriteFactor = sessionConfiguration.getFloat(
KEY_SELF_THROTTLE_WRITE_FACTOR, DEFAULT_SELF_THROTTLE_WRITE_FACTOR);
if (!selfThrottlingEnabled) {
autoThrottlingEnabled = sessionConfiguration.getBoolean(
KEY_AUTO_THROTTLE_ENABLE,
DEFAULT_AUTO_THROTTLE_ENABLE);
if (autoThrottlingEnabled) {
ClientThrottlingIntercept.initializeSingleton();
}
} else {
// cannot enable both self-throttling and client-throttling
autoThrottlingEnabled = false;
}
OperationContext.setLoggingEnabledByDefault(sessionConfiguration.
getBoolean(KEY_ENABLE_STORAGE_CLIENT_LOGGING, false));
LOG.debug(
"AzureNativeFileSystemStore init. Settings={},{},{},{{},{},{},{}},{{},{},{}}",
concurrentWrites, tolerateOobAppends,
((storageConnectionTimeout > 0) ? storageConnectionTimeout
: STORAGE_CONNECTION_TIMEOUT_DEFAULT), minBackoff,
deltaBackoff, maxBackoff, maxRetries, selfThrottlingEnabled,
selfThrottlingReadFactor, selfThrottlingWriteFactor);
} | 3.68 |
flink_NFAStateNameHandler_getUniqueInternalName | /**
* Used to give a unique name to {@link org.apache.flink.cep.nfa.NFA} states created during the
* translation process. The name format will be {@code baseName:counter} , where the counter is
* increasing for states with the same {@code baseName}.
*
* @param baseName The base of the name.
* @return The (unique) name that is going to be used internally for the state.
*/
public String getUniqueInternalName(String baseName) {
int counter = 0;
String candidate = baseName;
while (usedNames.contains(candidate)) {
candidate = baseName + STATE_NAME_DELIM + counter++;
}
usedNames.add(candidate);
return candidate;
} | 3.68 |
dubbo_PropertySourcesUtils_getSubProperties | /**
* Get prefixed {@link Properties}
*
* @param propertySources {@link PropertySources}
* @param propertyResolver {@link PropertyResolver} to resolve the placeholder if present
* @param prefix the prefix of property name
* @return Map
* @see Properties
*/
public static Map<String, Object> getSubProperties(
PropertySources propertySources, PropertyResolver propertyResolver, String prefix) {
Map<String, Object> subProperties = new LinkedHashMap<String, Object>();
String normalizedPrefix = normalizePrefix(prefix);
Iterator<PropertySource<?>> iterator = propertySources.iterator();
while (iterator.hasNext()) {
PropertySource<?> source = iterator.next();
for (String name : getPropertyNames(source)) {
if (!subProperties.containsKey(name) && name.startsWith(normalizedPrefix)) {
String subName = name.substring(normalizedPrefix.length());
if (!subProperties.containsKey(subName)) { // take first one
Object value = source.getProperty(name);
if (value instanceof String) {
// Resolve placeholder
value = propertyResolver.resolvePlaceholders((String) value);
}
subProperties.put(subName, value);
}
}
}
}
return unmodifiableMap(subProperties);
} | 3.68 |
hadoop_AbstractTask_setTaskType | /**
* Set TaskType for a Task.
* @param type Simple or Composite Task
*/
public final void setTaskType(final TaskType type) {
this.taskType = type;
} | 3.68 |
dubbo_AbstractJSONImpl_getListOfStrings | /**
* Gets a list from an object for the given key, and verifies all entries are strings. If the key
* is not present, this returns null. If the value is not a List or an entry is not a string,
* throws an exception.
*/
@Override
public List<String> getListOfStrings(Map<String, ?> obj, String key) {
assert obj != null;
List<?> list = getList(obj, key);
if (list == null) {
return null;
}
return checkStringList(list);
} | 3.68 |
hadoop_Validate_checkNotNullAndNotEmpty | /**
* Validates that the given buffer is not null and has non-zero capacity.
* @param <T> the type of iterable's elements.
* @param iter the argument reference to validate.
* @param argName the name of the argument being validated.
*/
public static <T> void checkNotNullAndNotEmpty(Iterable<T> iter,
String argName) {
checkNotNull(iter, argName);
int minNumElements = iter.iterator().hasNext() ? 1 : 0;
checkNotEmpty(minNumElements, argName);
} | 3.68 |
hbase_CompactionProgress_getTotalCompactingKVs | /** Returns the total compacting key values in currently running compaction */
public long getTotalCompactingKVs() {
if (totalCompactingKVs < currentCompactedKVs) {
LOG.debug("totalCompactingKVs={} less than currentCompactedKVs={}", totalCompactingKVs,
currentCompactedKVs);
return currentCompactedKVs;
}
return totalCompactingKVs;
} | 3.68 |
flink_JoinOperator_projectTuple18 | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>
ProjectJoin<
I1,
I2,
Tuple18<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17>>
projectTuple18() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<
Tuple18<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17>>
tType =
new TupleTypeInfo<
Tuple18<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17>>(fTypes);
return new ProjectJoin<
I1,
I2,
Tuple18<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17>>(
this.ds1,
this.ds2,
this.keys1,
this.keys2,
this.hint,
this.fieldIndexes,
this.isFieldInFirst,
tType,
this);
} | 3.68 |
hadoop_FSEditLogAsync_tryRelease | // while draining, count the releases until release(int)
private void tryRelease(int permits) {
pendingReleases.getAndAdd(permits);
if (!draining.get()) {
super.release(pendingReleases.getAndSet(0));
}
} | 3.68 |
zxing_IntentIntegrator_startActivityForResult | /**
* Start an activity. This method is defined to allow different methods of activity starting for
* newer versions of Android and for compatibility library.
*
* @param intent Intent to start.
* @param code Request code for the activity
* @see Activity#startActivityForResult(Intent, int)
* @see Fragment#startActivityForResult(Intent, int)
*/
protected void startActivityForResult(Intent intent, int code) {
if (fragment == null) {
activity.startActivityForResult(intent, code);
} else {
fragment.startActivityForResult(intent, code);
}
} | 3.68 |
hbase_LruAdaptiveBlockCache_containsBlock | /**
* Whether the cache contains block with specified cacheKey
* @return true if contains the block
*/
@Override
public boolean containsBlock(BlockCacheKey cacheKey) {
return map.containsKey(cacheKey);
} | 3.68 |
flink_InputSelection_select | /**
* Selects an input identified by the given {@code inputId}.
*
* @param inputId the input id numbered starting from 1 to 64, and `1` indicates the first
* input. Specially, `-1` indicates all inputs.
* @return a reference to this object.
*/
public Builder select(int inputId) {
if (inputId > 0 && inputId <= 64) {
inputMask |= 1L << (inputId - 1);
} else if (inputId == -1L) {
inputMask = -1L;
} else {
throw new IllegalArgumentException(
"The inputId must be in the range of 1 to 64, or be -1.");
}
return this;
} | 3.68 |
flink_TypeExtractionUtils_hasSuperclass | /**
* Returns true if the given class has a superclass of given name.
*
* @param clazz class to be analyzed
* @param superClassName class name of the super class
*/
public static boolean hasSuperclass(Class<?> clazz, String superClassName) {
List<Type> hierarchy = new ArrayList<>();
getTypeHierarchy(hierarchy, clazz, Object.class);
for (Type t : hierarchy) {
if (isClassType(t) && typeToClass(t).getName().equals(superClassName)) {
return true;
}
}
return false;
} | 3.68 |
flink_FileInputFormat_getStatistics | /**
* Obtains basic file statistics containing only file size. If the input is a directory, then
* the size is the sum of all contained files.
*
* @see
* org.apache.flink.api.common.io.InputFormat#getStatistics(org.apache.flink.api.common.io.statistics.BaseStatistics)
*/
@Override
public FileBaseStatistics getStatistics(BaseStatistics cachedStats) throws IOException {
final FileBaseStatistics cachedFileStats =
cachedStats instanceof FileBaseStatistics ? (FileBaseStatistics) cachedStats : null;
try {
return getFileStats(
cachedFileStats, getFilePaths(), new ArrayList<>(getFilePaths().length));
} catch (IOException ioex) {
if (LOG.isWarnEnabled()) {
LOG.warn(
"Could not determine statistics for paths '"
+ Arrays.toString(getFilePaths())
+ "' due to an io error: "
+ ioex.getMessage());
}
} catch (Throwable t) {
if (LOG.isErrorEnabled()) {
LOG.error(
"Unexpected problem while getting the file statistics for paths '"
+ Arrays.toString(getFilePaths())
+ "': "
+ t.getMessage(),
t);
}
}
// no statistics available
return null;
} | 3.68 |
framework_VScrollTable_ensureFocus | /**
* Ensure the component has a focus.
*
* TODO the current implementation simply always calls focus for the
* component. In case the Table at some point implements focus/blur
* listeners, this method needs to be evolved to conditionally call
* focus only if not currently focused.
*/
protected void ensureFocus() {
if (!hasFocus()) {
scrollBodyPanel.setFocus(true);
}
} | 3.68 |
flink_StreamTask_getCheckpointBarrierHandler | /**
* Acquires the optional {@link CheckpointBarrierHandler} associated with this stream task. The
* {@code CheckpointBarrierHandler} should exist if the task has data inputs and requires to
* align the barriers.
*/
protected Optional<CheckpointBarrierHandler> getCheckpointBarrierHandler() {
return Optional.empty();
} | 3.68 |
hadoop_ReadWriteDiskValidatorMetrics_sourceName | /**
* Get a source name by given directory name.
*
* @param dirName directory name
* @return the source name
*/
protected static String sourceName(String dirName) {
StringBuilder sb = new StringBuilder(RECORD_INFO.name());
sb.append(",dir=").append(dirName);
return sb.toString();
} | 3.68 |
AreaShop_RegionGroup_getName | /**
* Get the name of the group.
* @return The name of the group
*/
public String getName() {
return name;
} | 3.68 |
flink_CliFrontend_run | /**
* Executions the run action.
*
* @param args Command line arguments for the run action.
*/
protected void run(String[] args) throws Exception {
LOG.info("Running 'run' command.");
final Options commandOptions = CliFrontendParser.getRunCommandOptions();
final CommandLine commandLine = getCommandLine(commandOptions, args, true);
// evaluate help flag
if (commandLine.hasOption(HELP_OPTION.getOpt())) {
CliFrontendParser.printHelpForRun(customCommandLines);
return;
}
final CustomCommandLine activeCommandLine =
validateAndGetActiveCommandLine(checkNotNull(commandLine));
final ProgramOptions programOptions = ProgramOptions.create(commandLine);
final List<URL> jobJars = getJobJarAndDependencies(programOptions);
final Configuration effectiveConfiguration =
getEffectiveConfiguration(activeCommandLine, commandLine, programOptions, jobJars);
LOG.debug("Effective executor configuration: {}", effectiveConfiguration);
try (PackagedProgram program = getPackagedProgram(programOptions, effectiveConfiguration)) {
executeProgram(effectiveConfiguration, program);
}
} | 3.68 |
rocketmq-connect_BrokerBasedLog_send | /**
* send data to all workers
*
* @param key
* @param value
* @param callback
*/
@Override
public void send(K key, V value, Callback callback) {
try {
Map.Entry<byte[], byte[]> encode = encode(key, value);
byte[] body = encode.getValue();
if (body.length > MAX_MESSAGE_SIZE) {
log.error("Message size is greater than {} bytes, key: {}, value {}", MAX_MESSAGE_SIZE, key, value);
return;
}
String encodeKey = Base64Util.base64Encode(encode.getKey());
Message message = new Message(topicName, null, encodeKey, body);
producer.send(message, new SelectMessageQueueByHash(), encodeKey, new SendCallback() {
@Override
public void onSuccess(org.apache.rocketmq.client.producer.SendResult result) {
log.info("Send async message OK, msgId: {},topic:{}", result.getMsgId(), topicName);
callback.onCompletion(null, value);
}
@Override
public void onException(Throwable throwable) {
if (null != throwable) {
log.error("Send async message Failed, error: {}", throwable);
// Keep sending until success
send(key, value, callback);
}
}
});
} catch (Exception e) {
log.error("BrokerBaseLog send async message Failed.", e);
}
} | 3.68 |
hadoop_AzureNativeFileSystemStore_explicitFileExists | /**
* Checks whether an explicit file/folder exists.
* This is used by redo of atomic rename.
* There was a bug(apache jira HADOOP-12780) during atomic rename if
* process crashes after an inner directory has been renamed but still
* there are file under that directory to be renamed then after the
* process comes again it tries to redo the renames. It checks whether
* the directory exists or not by calling filesystem.exist.
* But filesystem.Exists will treat that directory as implicit directory
* and return true as file exists under that directory. So It will try
* try to rename that directory and will fail as the corresponding blob
* does not exist. So this method explicitly checks for the blob.
*/
@Override
public boolean explicitFileExists(String key) throws AzureException {
CloudBlobWrapper blob;
try {
blob = getBlobReference(key);
if (null != blob && blob.exists(getInstrumentedContext())) {
return true;
}
return false;
} catch (StorageException e) {
throw new AzureException(e);
} catch (URISyntaxException e) {
throw new AzureException(e);
}
} | 3.68 |
querydsl_AbstractSQLQuery_as | /**
* Create an alias for the expression
*
* @param alias alias
* @return this as alias
*/
@SuppressWarnings("unchecked")
public SimpleExpression<T> as(Path<?> alias) {
return Expressions.as(this, (Path) alias);
} | 3.68 |
hadoop_FilePosition_setAbsolute | /**
* If the given {@code pos} lies within the current buffer, updates the current position to
* the specified value and returns true; otherwise returns false without changing the position.
*
* @param pos the absolute position to change the current position to if possible.
* @return true if the given current position was updated, false otherwise.
*/
public boolean setAbsolute(long pos) {
if (isValid() && isWithinCurrentBuffer(pos)) {
int relativePos = (int) (pos - bufferStartOffset);
buffer.position(relativePos);
return true;
} else {
return false;
}
} | 3.68 |
framework_VComboBox_setPageLength | /**
* Sets the number of items to show per page, or 0 for showing all items.
*
* @param pageLength
* new page length or 0 for all items
*/
public void setPageLength(int pageLength) {
this.pageLength = pageLength;
} | 3.68 |
hadoop_DefaultStringifier_load | /**
* Restores the object from the configuration.
*
* @param <K> the class of the item
* @param conf the configuration to use
* @param keyName the name of the key to use
* @param itemClass the class of the item
* @return restored object
* @throws IOException : forwards Exceptions from the underlying
* {@link Serialization} classes.
*/
public static <K> K load(Configuration conf, String keyName,
Class<K> itemClass) throws IOException {
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf,
itemClass);
try {
String itemStr = conf.get(keyName);
return stringifier.fromString(itemStr);
} finally {
stringifier.close();
}
} | 3.68 |
hadoop_ManifestCommitter_cleanupJob | /**
* Execute the {@code CleanupJobStage} to remove the job attempt dir.
* This does
* @param jobContext Context of the job whose output is being written.
* @throws IOException failure during cleanup
*/
@SuppressWarnings("deprecation")
@Override
public void cleanupJob(final JobContext jobContext) throws IOException {
ManifestCommitterConfig committerConfig = enterCommitter(false,
jobContext);
try {
executeCleanup(OP_STAGE_JOB_CLEANUP, jobContext, committerConfig);
} finally {
logCommitterStatisticsAtDebug();
updateCommonContextOnCommitterExit();
}
} | 3.68 |
flink_StreamProjection_projectTuple11 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>
SingleOutputStreamOperator<Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>>
projectTuple11() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>> tType =
new TupleTypeInfo<Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>>(fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<IN, Tuple11<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
hadoop_OBSPosixBucketUtils_fsRemoveSubdir | // Delete a sub dir.
private static int fsRemoveSubdir(final OBSFileSystem owner,
final String subdirKey,
final List<KeyAndVersion> subdirList)
throws IOException {
fsRecursivelyDeleteDir(owner, subdirKey, false);
subdirList.add(new KeyAndVersion(subdirKey));
if (subdirList.size() == owner.getMaxEntriesToDelete()) {
// batch delete subdirs.
OBSCommonUtils.removeKeys(owner, subdirList, true, false);
return owner.getMaxEntriesToDelete();
}
return 0;
} | 3.68 |
flink_FlinkCalciteSqlValidator_declaredDescriptorColumn | /**
* Returns whether the given column has been declared in a {@link SqlKind#DESCRIPTOR} next to a
* {@link SqlKind#EXPLICIT_TABLE} within TVF operands.
*/
private static boolean declaredDescriptorColumn(SelectScope scope, Column column) {
if (!(scope.getNode() instanceof ExplicitTableSqlSelect)) {
return false;
}
final ExplicitTableSqlSelect select = (ExplicitTableSqlSelect) scope.getNode();
return select.descriptors.stream()
.map(SqlIdentifier::getSimple)
.anyMatch(id -> id.equals(column.getName()));
} | 3.68 |
hbase_DeleteNamespaceProcedure_removeNamespaceQuota | /**
* remove quota for the namespace
* @param env MasterProcedureEnv
* @param namespaceName name of the namespace in string format
**/
private static void removeNamespaceQuota(final MasterProcedureEnv env, final String namespaceName)
throws IOException {
env.getMasterServices().getMasterQuotaManager().removeNamespaceQuota(namespaceName);
} | 3.68 |
pulsar_TxnMetaImpl_addAckedPartitions | /**
* Remove the list partitions that the transaction acknowledges to.
*
* @param partitions the list of partitions that the txn acknowledges to
* @return the transaction itself.
* @throws InvalidTxnStatusException
*/
@Override
public synchronized TxnMetaImpl addAckedPartitions(List<TransactionSubscription> partitions)
throws InvalidTxnStatusException {
checkTxnStatus(TxnStatus.OPEN);
this.ackedPartitions.addAll(partitions);
return this;
} | 3.68 |
framework_AbstractSplitPanel_setMinSplitPosition | /**
* Sets the minimum split position to the given position and unit. If the
* split position is reversed, maximum and minimum are also reversed.
*
* @param pos
* the minimum position of the split
* @param unit
* the unit (from {@link Sizeable}) in which the size is given.
* Allowed units are UNITS_PERCENTAGE and UNITS_PIXELS
*/
public void setMinSplitPosition(float pos, Unit unit) {
setSplitPositionLimits(pos, unit, getSplitterState(false).maxPosition,
posMaxUnit);
} | 3.68 |
framework_VTwinColSelect_getSelectionsCaption | /**
* Gets the selections caption HTML widget.
*
* @return the selections caption widget
*/
protected HTML getSelectionsCaption() {
if (selectionsCaption == null) {
selectionsCaption = new HTML();
selectionsCaption.setStyleName(CLASSNAME + "-caption-right");
selectionsCaption.getElement().getStyle()
.setFloat(com.google.gwt.dom.client.Style.Float.RIGHT);
captionWrapper.add(selectionsCaption);
}
return selectionsCaption;
} | 3.68 |
flink_CatalogManager_getTable | /**
* Retrieves a fully qualified table with a specific time. If the path is not yet fully
* qualified, use {@link #qualifyIdentifier(UnresolvedIdentifier)} first.
*
* @param objectIdentifier full path of the table to retrieve
* @param timestamp Timestamp of the table snapshot, which is milliseconds since 1970-01-01
* 00:00:00 UTC
* @return table at a specific time that the path points to.
*/
public Optional<ContextResolvedTable> getTable(
ObjectIdentifier objectIdentifier, long timestamp) {
CatalogBaseTable temporaryTable = temporaryTables.get(objectIdentifier);
if (temporaryTable != null) {
final ResolvedCatalogBaseTable<?> resolvedTable =
resolveCatalogBaseTable(temporaryTable);
return Optional.of(ContextResolvedTable.temporary(objectIdentifier, resolvedTable));
} else {
return getPermanentTable(objectIdentifier, timestamp);
}
} | 3.68 |