name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
pulsar_ManagedCursorContainer_isEmpty | /**
* Check whether there are any cursors.
* @return true is there are no cursors and false if there are
*/
public boolean isEmpty() {
long stamp = rwLock.tryOptimisticRead();
boolean isEmpty = cursors.isEmpty();
if (!rwLock.validate(stamp)) {
// Fallback to read lock
stamp = rwLock.readLock();
try {
isEmpty = cursors.isEmpty();
} finally {
rwLock.unlockRead(stamp);
}
}
return isEmpty;
} | 3.68 |
hudi_HoodieAvroUtils_getNestedFieldValAsString | /**
* Obtain value of the provided field as string, denoted by dot notation. e.g: a.b.c
*/
public static String getNestedFieldValAsString(GenericRecord record, String fieldName, boolean returnNullIfNotFound, boolean consistentLogicalTimestampEnabled) {
Object obj = getNestedFieldVal(record, fieldName, returnNullIfNotFound, consistentLogicalTimestampEnabled);
return StringUtils.objToString(obj);
} | 3.68 |
hbase_TimeoutExceptionInjector_complete | /**
* For all time forward, do not throw an error because the process has completed.
*/
public void complete() {
synchronized (this.timerTask) {
if (this.complete) {
LOG.warn("Timer already marked completed, ignoring!");
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Marking timer as complete - no error notifications will be received for "
+ "this timer.");
}
this.complete = true;
}
this.timer.cancel();
} | 3.68 |
hadoop_ExtensionHelper_close | /**
* Close an extension if it is closeable.
* Any error raised is caught and logged.
* @param extension extension instance.
*/
public static void close(Object extension) {
ifBoundDTExtension(extension,
v -> {
IOUtils.closeStreams(v);
return null;
});
} | 3.68 |
rocketmq-connect_ConnectorConfig_originalConfig | /**
* original config
*
* @return
*/
public Map<String, String> originalConfig() {
return config.getProperties();
} | 3.68 |
hadoop_Server_ensureOperational | /**
* Verifies the server is operational.
*
* @throws IllegalStateException thrown if the server is not operational.
*/
protected void ensureOperational() {
if (!getStatus().isOperational()) {
throw new IllegalStateException("Server is not running");
}
} | 3.68 |
hadoop_ReadBufferManager_purgeBuffersForStream | /**
* Purging the buffers associated with an {@link AbfsInputStream}
* from {@link ReadBufferManager} when stream is closed.
* @param stream input stream.
*/
public synchronized void purgeBuffersForStream(AbfsInputStream stream) {
LOGGER.debug("Purging stale buffers for AbfsInputStream {} ", stream);
readAheadQueue.removeIf(readBuffer -> readBuffer.getStream() == stream);
purgeList(stream, completedReadList);
} | 3.68 |
flink_ParquetColumnarRowSplitReader_nextBatch | /** Advances to the next batch of rows. Returns false if there are no more. */
private boolean nextBatch() throws IOException {
for (WritableColumnVector v : writableVectors) {
v.reset();
}
columnarBatch.setNumRows(0);
if (rowsReturned >= totalRowCount) {
return false;
}
if (rowsReturned == totalCountLoadedSoFar) {
readNextRowGroup();
}
int num = (int) Math.min(batchSize, totalCountLoadedSoFar - rowsReturned);
for (int i = 0; i < columnReaders.length; ++i) {
//noinspection unchecked
columnReaders[i].readToVector(num, writableVectors[i]);
}
rowsReturned += num;
columnarBatch.setNumRows(num);
rowsInBatch = num;
return true;
} | 3.68 |
hbase_MapReduceBackupCopyJob_updateProgress | /**
* Update the ongoing backup with new progress.
* @param backupInfo backup info
* @param newProgress progress
* @param bytesCopied bytes copied
* @throws IOException exception
*/
static void updateProgress(BackupInfo backupInfo, BackupManager backupManager, int newProgress,
long bytesCopied) throws IOException {
// compose the new backup progress data, using fake number for now
String backupProgressData = newProgress + "%";
backupInfo.setProgress(newProgress);
backupManager.updateBackupInfo(backupInfo);
LOG.debug("Backup progress data \"" + backupProgressData
+ "\" has been updated to backup system table for " + backupInfo.getBackupId());
} | 3.68 |
hbase_Result_getColumnCells | /**
* Return the Cells for the specific column. The Cells are sorted in the {@link CellComparator}
* order. That implies the first entry in the list is the most recent column. If the query (Scan
* or Get) only requested 1 version the list will contain at most 1 entry. If the column did not
* exist in the result set (either the column does not exist or the column was not selected in the
* query) the list will be empty. Also see getColumnLatest which returns just a Cell
* @param family the family
* @return a list of Cells for this column or empty list if the column did not exist in the result
* set
*/
public List<Cell> getColumnCells(byte[] family, byte[] qualifier) {
List<Cell> result = new ArrayList<>();
Cell[] kvs = rawCells();
if (kvs == null || kvs.length == 0) {
return result;
}
int pos = binarySearch(kvs, family, qualifier);
if (pos == -1) {
return result; // cant find it
}
for (int i = pos; i < kvs.length; i++) {
if (CellUtil.matchingColumn(kvs[i], family, qualifier)) {
result.add(kvs[i]);
} else {
break;
}
}
return result;
} | 3.68 |
hadoop_ZStandardCompressor_getBytesRead | /**
* <p>Returns the total number of uncompressed bytes input so far.</p>
*
* @return the total (non-negative) number of uncompressed bytes input so far
*/
@Override
public long getBytesRead() {
checkStream();
return bytesRead;
} | 3.68 |
hbase_AuthUtil_loginClient | /**
* For kerberized cluster, return login user (from kinit or from keytab if specified). For
* non-kerberized cluster, return system user.
* @param conf configuartion file
* @throws IOException login exception
*/
@InterfaceAudience.Private
public static User loginClient(Configuration conf) throws IOException {
UserProvider provider = UserProvider.instantiate(conf);
User user = provider.getCurrent();
boolean securityOn = provider.isHBaseSecurityEnabled() && provider.isHadoopSecurityEnabled();
if (securityOn) {
boolean fromKeytab = provider.shouldLoginFromKeytab();
if (user.getUGI().hasKerberosCredentials()) {
// There's already a login user.
// But we should avoid misuse credentials which is a dangerous security issue,
// so here check whether user specified a keytab and a principal:
// 1. Yes, check if user principal match.
// a. match, just return.
// b. mismatch, login using keytab.
// 2. No, user may login through kinit, this is the old way, also just return.
if (fromKeytab) {
return checkPrincipalMatch(conf, user.getUGI().getUserName())
? user
: loginFromKeytabAndReturnUser(provider);
}
return user;
} else if (fromKeytab) {
// Kerberos is on and client specify a keytab and principal, but client doesn't login yet.
return loginFromKeytabAndReturnUser(provider);
}
}
return user;
} | 3.68 |
hbase_WALActionsListener_postLogArchive | /**
* The WAL has been archived.
* @param oldPath the path to the old wal
* @param newPath the path to the new wal
*/
default void postLogArchive(Path oldPath, Path newPath) throws IOException {
} | 3.68 |
querydsl_PolygonExpression_exteriorRing | /**
* Returns the exterior ring of this Polygon.
*
* @return exterior ring
*/
public LineStringExpression<?> exteriorRing() {
if (exterorRing == null) {
exterorRing = GeometryExpressions.lineStringOperation(SpatialOps.EXTERIOR_RING, mixin);
}
return exterorRing;
} | 3.68 |
hudi_HoodieWriteHandle_canWrite | /**
* Determines whether we can accept the incoming records, into the current file. Depending on
* <p>
* - Whether it belongs to the same partitionPath as existing records - Whether the current file written bytes lt max
* file size
*/
public boolean canWrite(HoodieRecord record) {
return false;
} | 3.68 |
hadoop_ReverseZoneUtils_getSubnetCountForReverseZones | /**
* When splitting the reverse zone, return the number of subnets needed,
* given the range and netmask.
*
* @param conf the Hadoop configuration.
* @return The number of subnets given the range and netmask.
*/
protected static long getSubnetCountForReverseZones(Configuration conf) {
String subnet = conf.get(KEY_DNS_ZONE_SUBNET);
String mask = conf.get(KEY_DNS_ZONE_MASK);
String range = conf.get(KEY_DNS_SPLIT_REVERSE_ZONE_RANGE);
int parsedRange;
try {
parsedRange = Integer.parseInt(range);
} catch (NumberFormatException e) {
LOG.error("The supplied range is not a valid integer: Supplied range: ",
range);
throw e;
}
if (parsedRange < 0) {
String msg = String
.format("Range cannot be negative: Supplied range: %d", parsedRange);
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
long ipCount;
try {
SubnetUtils subnetUtils = new SubnetUtils(subnet, mask);
subnetUtils.setInclusiveHostCount(true);
ipCount = subnetUtils.getInfo().getAddressCountLong();
} catch (IllegalArgumentException e) {
LOG.error("The subnet or mask is invalid: Subnet: {} Mask: {}", subnet,
mask);
throw e;
}
if (parsedRange == 0) {
return ipCount;
}
return ipCount / parsedRange;
} | 3.68 |
flink_KeyGroupRange_contains | /**
* Checks whether or not a single key-group is contained in the range.
*
* @param keyGroup Key-group to check for inclusion.
* @return True, only if the key-group is in the range.
*/
@Override
public boolean contains(int keyGroup) {
return keyGroup >= startKeyGroup && keyGroup <= endKeyGroup;
} | 3.68 |
pulsar_PulsarClientImpl_newTableViewBuilder | /**
* @deprecated use {@link #newTableView(Schema)} instead.
*/
@Override
@Deprecated
public <T> TableViewBuilder<T> newTableViewBuilder(Schema<T> schema) {
return new TableViewBuilderImpl<>(this, schema);
} | 3.68 |
hbase_MasterObserver_preEnableTable | /**
* Called prior to enabling a table. Called as part of enable table RPC call.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
*/
default void preEnableTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName) throws IOException {
} | 3.68 |
hmily_XaState_valueOf | /**
* Value of xa state.
*
* @param state the state
* @return the xa state
*/
public static XaState valueOf(final int state) {
return Arrays.stream(XaState.values()).filter(e -> e.getState() == state).findFirst().orElse(STATUS_UNKNOWN);
} | 3.68 |
hbase_HBaseServerBase_setupWindows | /**
* If running on Windows, do windows-specific setup.
*/
private static void setupWindows(final Configuration conf, ConfigurationManager cm) {
if (!SystemUtils.IS_OS_WINDOWS) {
HBasePlatformDependent.handle("HUP", (number, name) -> {
conf.reloadConfiguration();
cm.notifyAllObservers(conf);
});
}
} | 3.68 |
hbase_Pair_getSecond | /**
* Return the second element stored in the pair.
*/
public T2 getSecond() {
return second;
} | 3.68 |
hadoop_RpcProgramPortmap_unset | /**
* When a program becomes unavailable, it should unregister itself with the
* port mapper program on the same machine. The parameters and results have
* meanings identical to those of "PMAPPROC_SET". The protocol and port number
* fields of the argument are ignored.
*/
private XDR unset(int xid, XDR in, XDR out) {
PortmapMapping mapping = PortmapRequest.mapping(in);
String key = PortmapMapping.key(mapping);
if (LOG.isDebugEnabled())
LOG.debug("Portmap remove key=" + key);
map.remove(key);
return PortmapResponse.booleanReply(out, xid, true);
} | 3.68 |
framework_TabSheet_writeDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractComponent#writeDesign(org.jsoup.nodes.Element
* , com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void writeDesign(Element design, DesignContext designContext) {
super.writeDesign(design, designContext);
TabSheet def = designContext.getDefaultInstance(this);
design.attributes();
// write tabs
if (!designContext.shouldWriteChildren(this, def)) {
return;
}
for (Component component : this) {
Tab tab = this.getTab(component);
writeTabToDesign(design, designContext, tab);
}
} | 3.68 |
morf_MySqlDialect_getSqlForLastDayOfMonth | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForLastDayOfMonth
*/
@Override
protected String getSqlForLastDayOfMonth(AliasedField date) {
return "LAST_DAY(" + getSqlFrom(date) + ")";
} | 3.68 |
framework_ContainerHierarchicalWrapper_areChildrenAllowed | /*
* Can the specified Item have any children? Don't add a JavaDoc comment
* here, we use the default documentation from implemented interface.
*/
@Override
public boolean areChildrenAllowed(Object itemId) {
// If the wrapped container implements the method directly, use it
if (hierarchical) {
return ((Container.Hierarchical) container)
.areChildrenAllowed(itemId);
}
if (noChildrenAllowed.contains(itemId)) {
return false;
}
return containsId(itemId);
} | 3.68 |
flink_SkipListUtils_removeLevelIndex | /**
* Remove the level index for the node from the skip list.
*
* @param node the node.
* @param spaceAllocator the space allocator.
* @param levelIndexHeader the head level index.
*/
static void removeLevelIndex(
long node, Allocator spaceAllocator, LevelIndexHeader levelIndexHeader) {
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
int level = getLevel(segment, offsetInByteBuffer);
for (int i = 1; i <= level; i++) {
long prevNode = getPrevIndexNode(segment, offsetInByteBuffer, level, i);
long nextNode = getNextIndexNode(segment, offsetInByteBuffer, i);
helpSetNextNode(prevNode, nextNode, i, levelIndexHeader, spaceAllocator);
helpSetPrevNode(nextNode, prevNode, i, spaceAllocator);
}
} | 3.68 |
framework_AbstractSelect_getItem | /**
* Gets the item from the container with given id. If the container does not
* contain the requested item, null is returned.
*
* @param itemId
* the item id.
* @return the item from the container.
*/
@Override
public Item getItem(Object itemId) {
return items.getItem(itemId);
} | 3.68 |
hadoop_AppPlacementAllocator_initialize | /**
* Initialize this allocator, this will be called by Factory automatically.
*
* @param appSchedulingInfo appSchedulingInfo
* @param schedulerRequestKey schedulerRequestKey
* @param rmContext rmContext
*/
public void initialize(AppSchedulingInfo appSchedulingInfo,
SchedulerRequestKey schedulerRequestKey, RMContext rmContext) {
this.appSchedulingInfo = appSchedulingInfo;
this.rmContext = rmContext;
this.schedulerRequestKey = schedulerRequestKey;
multiNodeSortPolicyName = appSchedulingInfo
.getApplicationSchedulingEnvs().get(
ApplicationSchedulingConfig.ENV_MULTI_NODE_SORTING_POLICY_CLASS);
multiNodeSortingManager = (MultiNodeSortingManager<N>) rmContext
.getMultiNodeSortingManager();
if (LOG.isDebugEnabled()) {
LOG.debug(
"nodeLookupPolicy used for " + appSchedulingInfo.getApplicationId()
+ " is " + ((multiNodeSortPolicyName != null)
? multiNodeSortPolicyName : ""));
}
} | 3.68 |
hibernate-validator_MethodInheritanceTree_getOverriddenMethods | /**
* Returns a set containing all the overridden methods.
*
* @return a set containing all the overridden methods
*/
public Set<ExecutableElement> getOverriddenMethods() {
return overriddenMethods;
} | 3.68 |
flink_ZooKeeperUtils_createFileSystemStateStorage | /**
* Creates a {@link FileSystemStateStorageHelper} instance.
*
* @param configuration {@link Configuration} object
* @param prefix Prefix for the created files
* @param <T> Type of the state objects
* @return {@link FileSystemStateStorageHelper} instance
* @throws IOException if file system state storage cannot be created
*/
public static <T extends Serializable>
FileSystemStateStorageHelper<T> createFileSystemStateStorage(
Configuration configuration, String prefix) throws IOException {
return new FileSystemStateStorageHelper<>(
HighAvailabilityServicesUtils.getClusterHighAvailableStoragePath(configuration),
prefix);
} | 3.68 |
hadoop_ServiceTimelinePublisher_publishMetrics | /**
* Called from ServiceMetricsSink at regular interval of time.
* @param metrics of service or components
* @param entityId Id of entity
* @param entityType Type of entity
* @param timestamp
*/
public void publishMetrics(Iterable<AbstractMetric> metrics, String entityId,
String entityType, long timestamp) {
TimelineEntity entity = createTimelineEntity(entityId, entityType);
Set<TimelineMetric> entityMetrics = new HashSet<TimelineMetric>();
for (AbstractMetric metric : metrics) {
TimelineMetric timelineMetric = new TimelineMetric();
timelineMetric.setId(metric.name());
timelineMetric.addValue(timestamp, metric.value());
entityMetrics.add(timelineMetric);
}
entity.setMetrics(entityMetrics);
putEntity(entity);
} | 3.68 |
hadoop_LoggingAuditor_getReferrer | /**
* Get the referrer; visible for tests.
* @return the referrer.
*/
HttpReferrerAuditHeader getReferrer() {
return referrer;
} | 3.68 |
hbase_AssignmentVerificationReport_getUnassignedRegions | /**
* Return the unassigned regions
* @return unassigned regions
*/
List<RegionInfo> getUnassignedRegions() {
return unAssignedRegionsList;
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_compactIfNecessary | /**
* Perform a compaction on the Metadata Table.
* <p>
* Cases to be handled:
* 1. We cannot perform compaction if there are previous inflight operations on the dataset. This is because
* a compacted metadata base file at time Tx should represent all the actions on the dataset till time Tx.
* <p>
* 2. In multi-writer scenario, a parallel operation with a greater instantTime may have completed creating a
* deltacommit.
*/
protected void compactIfNecessary(BaseHoodieWriteClient writeClient, String latestDeltacommitTime) {
// Trigger compaction with suffixes based on the same instant time. This ensures that any future
// delta commits synced over will not have an instant time lesser than the last completed instant on the
// metadata table.
final String compactionInstantTime = writeClient.createNewInstantTime(false);
// we need to avoid checking compaction w/ same instant again.
// let's say we trigger compaction after C5 in MDT and so compaction completes with C4001. but C5 crashed before completing in MDT.
// and again w/ C6, we will re-attempt compaction at which point latest delta commit is C4 in MDT.
// and so we try compaction w/ instant C4001. So, we can avoid compaction if we already have compaction w/ same instant time.
if (metadataMetaClient.getActiveTimeline().filterCompletedInstants().containsInstant(compactionInstantTime)) {
LOG.info(String.format("Compaction with same %s time is already present in the timeline.", compactionInstantTime));
} else if (writeClient.scheduleCompactionAtInstant(compactionInstantTime, Option.empty())) {
LOG.info("Compaction is scheduled for timestamp " + compactionInstantTime);
writeClient.compact(compactionInstantTime);
} else if (metadataWriteConfig.isLogCompactionEnabled()) {
// Schedule and execute log compaction with suffixes based on the same instant time. This ensures that any future
// delta commits synced over will not have an instant time lesser than the last completed instant on the
// metadata table.
final String logCompactionInstantTime = HoodieTableMetadataUtil.createLogCompactionTimestamp(latestDeltacommitTime);
if (metadataMetaClient.getActiveTimeline().filterCompletedInstants().containsInstant(logCompactionInstantTime)) {
LOG.info(String.format("Log compaction with same %s time is already present in the timeline.", logCompactionInstantTime));
} else if (writeClient.scheduleLogCompactionAtInstant(logCompactionInstantTime, Option.empty())) {
LOG.info("Log compaction is scheduled for timestamp " + logCompactionInstantTime);
writeClient.logCompact(logCompactionInstantTime);
}
}
} | 3.68 |
hbase_RSGroupInfoManagerImpl_getDefaultServers | // Called by ServerEventsListenerThread. Presume it has lock on this manager when it runs.
private SortedSet<Address> getDefaultServers(List<RSGroupInfo> rsGroupInfoList) {
// Build a list of servers in other groups than default group, from rsGroupMap
Set<Address> serversInOtherGroup = new HashSet<>();
for (RSGroupInfo group : rsGroupInfoList) {
if (!RSGroupInfo.DEFAULT_GROUP.equals(group.getName())) { // not default group
serversInOtherGroup.addAll(group.getServers());
}
}
// Get all online servers from Zookeeper and find out servers in default group
SortedSet<Address> defaultServers = Sets.newTreeSet();
for (ServerName serverName : masterServices.getServerManager().getOnlineServers().keySet()) {
Address server = Address.fromParts(serverName.getHostname(), serverName.getPort());
if (!serversInOtherGroup.contains(server)) { // not in other groups
defaultServers.add(server);
}
}
return defaultServers;
} | 3.68 |
hbase_RestoreSnapshotHelper_removeHdfsRegions | /**
* Remove specified regions from the file-system, using the archiver.
*/
private void removeHdfsRegions(final ThreadPoolExecutor exec, final List<RegionInfo> regions)
throws IOException {
if (regions == null || regions.isEmpty()) return;
ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
@Override
public void editRegion(final RegionInfo hri) throws IOException {
HFileArchiver.archiveRegion(conf, fs, hri);
}
});
} | 3.68 |
flink_ApiExpressionUtils_objectToExpression | /**
* Converts a given object to an expression.
*
* <p>It converts:
*
* <ul>
* <li>{@code null} to null literal
* <li>{@link Row} to a call to a row constructor expression
* <li>{@link Map} to a call to a map constructor expression
* <li>{@link List} to a call to an array constructor expression
* <li>arrays to a call to an array constructor expression
* <li>Scala's {@code Seq} to an array constructor via reflection
* <li>Scala's {@code Map} to a map constructor via reflection
* <li>Scala's {@code BigDecimal} to a DECIMAL literal
* <li>if none of the above applies, the function tries to convert the object to a value
* literal with {@link #valueLiteral(Object)}
* </ul>
*
* @param expression An object to convert to an expression
*/
public static Expression objectToExpression(Object expression) {
if (expression == null) {
return valueLiteral(null, DataTypes.NULL());
} else if (expression instanceof ApiExpression) {
return ((ApiExpression) expression).toExpr();
} else if (expression instanceof Expression) {
return (Expression) expression;
} else if (expression instanceof Row) {
RowKind kind = ((Row) expression).getKind();
if (kind != RowKind.INSERT) {
throw new ValidationException(
String.format(
"Unsupported kind '%s' of a row [%s]. Only rows with 'INSERT' kind are supported when"
+ " converting to an expression.",
kind, expression));
}
return convertRow((Row) expression);
} else if (expression instanceof Map) {
return convertJavaMap((Map<?, ?>) expression);
} else if (expression instanceof byte[]) {
// BINARY LITERAL
return valueLiteral(expression);
} else if (expression.getClass().isArray()) {
return convertArray(expression);
} else if (expression instanceof List) {
return convertJavaList((List<?>) expression);
} else {
return convertScala(expression).orElseGet(() -> valueLiteral(expression));
}
} | 3.68 |
hbase_ByteBufferUtils_moveBufferToStream | /**
* Copy the data to the output stream and update position in buffer.
* @param out the stream to write bytes to
* @param in the buffer to read bytes from
* @param length the number of bytes to copy
*/
public static void moveBufferToStream(OutputStream out, ByteBuffer in, int length)
throws IOException {
copyBufferToStream(out, in, in.position(), length);
skip(in, length);
} | 3.68 |
hbase_Get_getFingerprint | /**
* Compile the table and column family (i.e. schema) information into a String. Useful for parsing
* and aggregation by debugging, logging, and administration tools.
*/
@Override
public Map<String, Object> getFingerprint() {
Map<String, Object> map = new HashMap<>();
List<String> families = new ArrayList<>(this.familyMap.entrySet().size());
map.put("families", families);
for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) {
families.add(Bytes.toStringBinary(entry.getKey()));
}
return map;
} | 3.68 |
flink_FutureCompletingBlockingQueue_size | /** Gets the size of the queue. */
public int size() {
lock.lock();
try {
return queue.size();
} finally {
lock.unlock();
}
} | 3.68 |
hadoop_JobMetaData_setJobFinishTime | /**
* Set job finish time.
*
* @param jobFinishTimeConfig job finish time.
* @return the reference to current {@link JobMetaData}.
*/
public final JobMetaData setJobFinishTime(final long jobFinishTimeConfig) {
resourceSkyline.setJobFinishTime(jobFinishTimeConfig);
return this;
} | 3.68 |
druid_ListDG_print | /*
* 打印矩阵队列图
*/
public void print() {
System.out.printf("== List Graph:\n");
for (int i = 0; i < mVexs.size(); i++) {
System.out.printf("%d(%c): ", i, mVexs.get(i).data);
ENode node = mVexs.get(i).firstEdge;
while (node != null) {
System.out.printf("%d(%c) ", node.ivex, mVexs.get(node.ivex).data);
node = node.nextEdge;
}
}
} | 3.68 |
hudi_HoodieMergedLogRecordReader_scanByFullKeys | /**
* Provides incremental scanning capability where only provided keys will be looked
* up in the delta-log files, scanned and subsequently materialized into the internal
* cache
*
* @param keys to be looked up
*/
public void scanByFullKeys(List<String> keys) {
// We can skip scanning in case reader is in full-scan mode, in which case all blocks
// are processed upfront (no additional scanning is necessary)
if (forceFullScan) {
return; // no-op
}
List<String> missingKeys = keys.stream()
.filter(key -> !recordBuffer.containsLogRecord(key))
.collect(Collectors.toList());
if (missingKeys.isEmpty()) {
// All the required records are already fetched, no-op
return;
}
scanInternal(Option.of(KeySpec.fullKeySpec(missingKeys)), false);
} | 3.68 |
flink_ResourceCounter_subtract | /**
* Subtracts decrement from the count of the given resourceProfile and returns the new value.
*
* @param resourceProfile resourceProfile from which to subtract decrement
* @param decrement decrement is the number by which to decrease resourceProfile
* @return new ResourceCounter containing the new value
*/
public ResourceCounter subtract(ResourceProfile resourceProfile, int decrement) {
final Map<ResourceProfile, Integer> newValues = new HashMap<>(resources);
final int newValue = resources.getOrDefault(resourceProfile, 0) - decrement;
updateNewValue(newValues, resourceProfile, newValue);
return new ResourceCounter(newValues);
} | 3.68 |
hadoop_ServiceLauncher_registerFailureHandling | /**
* Override point: register this class as the handler for the control-C
* and SIGINT interrupts.
*
* Subclasses can extend this with extra operations, such as
* an exception handler:
* <pre>
* Thread.setDefaultUncaughtExceptionHandler(
* new YarnUncaughtExceptionHandler());
* </pre>
*/
protected void registerFailureHandling() {
try {
interruptEscalator = new InterruptEscalator(this,
SHUTDOWN_TIME_ON_INTERRUPT);
interruptEscalator.register(IrqHandler.CONTROL_C);
interruptEscalator.register(IrqHandler.SIGTERM);
} catch (IllegalArgumentException e) {
// downgrade interrupt registration to warnings
LOG.warn("{}", e, e);
}
Thread.setDefaultUncaughtExceptionHandler(
new HadoopUncaughtExceptionHandler(this));
} | 3.68 |
hudi_ThreadUtils_collectActiveThreads | /**
* Fetches all active threads currently running in the JVM
*/
public static List<Thread> collectActiveThreads() {
ThreadGroup threadGroup = Thread.currentThread().getThreadGroup();
while (threadGroup.getParent() != null) {
threadGroup = threadGroup.getParent();
}
Thread[] activeThreads = new Thread[threadGroup.activeCount()];
threadGroup.enumerate(activeThreads);
return Arrays.asList(activeThreads);
} | 3.68 |
flink_PojoFieldUtils_getField | /**
* Finds a field by name from its declaring class. This also searches for the field in super
* classes.
*
* @param fieldName the name of the field to find.
* @param declaringClass the declaring class of the field.
* @return the field.
*/
@Nullable
static Field getField(String fieldName, Class<?> declaringClass) {
Class<?> clazz = declaringClass;
while (clazz != null) {
try {
Field field = clazz.getDeclaredField(fieldName);
field.setAccessible(true);
return field;
} catch (NoSuchFieldException e) {
clazz = clazz.getSuperclass();
}
}
return null;
} | 3.68 |
hudi_FlinkWriteHandleFactory_getFactory | /**
* Returns the write handle factory with given write config.
*/
public static <T, I, K, O> Factory<T, I, K, O> getFactory(
HoodieTableConfig tableConfig,
HoodieWriteConfig writeConfig,
boolean overwrite) {
if (overwrite) {
return CommitWriteHandleFactory.getInstance();
}
if (writeConfig.allowDuplicateInserts()) {
return ClusterWriteHandleFactory.getInstance();
}
if (tableConfig.getTableType().equals(HoodieTableType.MERGE_ON_READ)) {
return DeltaCommitWriteHandleFactory.getInstance();
} else if (tableConfig.isCDCEnabled()) {
return CdcWriteHandleFactory.getInstance();
} else {
return CommitWriteHandleFactory.getInstance();
}
} | 3.68 |
hbase_MasterWalManager_checkFileSystem | /**
* Checks to see if the file system is still accessible. If not, sets closed
* @return false if file system is not available
*/
private boolean checkFileSystem() {
if (this.fsOk) {
try {
FSUtils.checkFileSystemAvailable(this.fs);
FSUtils.checkDfsSafeMode(this.conf);
} catch (IOException e) {
services.abort("Shutting down HBase cluster: file system not available", e);
this.fsOk = false;
}
}
return this.fsOk;
} | 3.68 |
hmily_GrpcInvokeContext_setArgs | /**
* set args.
*
* @param args args
*/
public void setArgs(final Object[] args) {
this.args = args;
} | 3.68 |
hadoop_OBSDataBlocks_firstBlockSize | /**
* Returns the block first block size.
*
* @return the block first block size
*/
@VisibleForTesting
public int firstBlockSize() {
return this.firstBlockSize;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithQualifiedFieldNames | /**
* Tests a select with fields qualified with table specifiers.
*/
@Test
public void testSelectWithQualifiedFieldNames() {
SelectStatement stmt = new SelectStatement(new FieldReference(new TableReference(TEST_TABLE), STRING_FIELD),
new FieldReference(new TableReference(TEST_TABLE), INT_FIELD),
new FieldReference(new TableReference(TEST_TABLE), DATE_FIELD).as("aliasDate"))
.from(new TableReference(TEST_TABLE));
String expectedSql = "SELECT Test.stringField, Test.intField, Test.dateField AS aliasDate FROM " + tableName(TEST_TABLE);
assertEquals("Select statement with qualified field names", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
morf_GraphBasedUpgradeSchemaChangeVisitor_writeStatement | /**
* Write statement to the current node
*/
private void writeStatement(String statement) {
currentNode.addUpgradeStatements(statement);
} | 3.68 |
hibernate-validator_AnnotationMetaDataProvider_findTypeAnnotationConstraints | /**
* Finds type arguments constraints for method return values.
*/
protected Set<MetaConstraint<?>> findTypeAnnotationConstraints(JavaBeanExecutable<?> javaBeanExecutable) {
return findTypeArgumentsConstraints(
javaBeanExecutable,
new TypeArgumentReturnValueLocation( javaBeanExecutable ),
javaBeanExecutable.getAnnotatedType()
);
} | 3.68 |
hadoop_Cluster_getDelegationToken | /**
* Get a delegation token for the user from the JobTracker.
* @param renewer the user who can renew the token
* @return the new token
* @throws IOException
*/
public Token<DelegationTokenIdentifier>
getDelegationToken(Text renewer) throws IOException, InterruptedException{
// client has already set the service
return client.getDelegationToken(renewer);
} | 3.68 |
streampipes_SplitParagraphBlocksFilter_getInstance | /**
* Returns the singleton instance for TerminatingBlocksFinder.
*/
public static SplitParagraphBlocksFilter getInstance() {
return INSTANCE;
} | 3.68 |
flink_BooleanConditions_trueFunction | /** @return An {@link IterativeCondition} that always returns {@code true}. */
public static <T> IterativeCondition<T> trueFunction() {
return SimpleCondition.of(value -> true);
} | 3.68 |
zxing_Mode_forBits | /**
* @param bits four bits encoding a QR Code data mode
* @return Mode encoded by these bits
* @throws IllegalArgumentException if bits do not correspond to a known mode
*/
public static Mode forBits(int bits) {
switch (bits) {
case 0x0:
return TERMINATOR;
case 0x1:
return NUMERIC;
case 0x2:
return ALPHANUMERIC;
case 0x3:
return STRUCTURED_APPEND;
case 0x4:
return BYTE;
case 0x5:
return FNC1_FIRST_POSITION;
case 0x7:
return ECI;
case 0x8:
return KANJI;
case 0x9:
return FNC1_SECOND_POSITION;
case 0xD:
// 0xD is defined in GBT 18284-2000, may not be supported in foreign country
return HANZI;
default:
throw new IllegalArgumentException();
}
} | 3.68 |
hadoop_AbstractS3ACommitter_commitJob | /**
* Commit work.
* This consists of two stages: precommit and commit.
* <p>
* Precommit: identify pending uploads, then allow subclasses
* to validate the state of the destination and the pending uploads.
* Any failure here triggers an abort of all pending uploads.
* <p>
* Commit internal: do the final commit sequence.
* <p>
* The final commit action is to build the {@code _SUCCESS} file entry.
* </p>
* @param context job context
* @throws IOException any failure
*/
@Override
public void commitJob(JobContext context) throws IOException {
String id = jobIdString(context);
// the commit context is created outside a try-with-resources block
// so it can be used in exception handling.
CommitContext commitContext = null;
SuccessData successData = null;
IOException failure = null;
String stage = "preparing";
try (DurationInfo d = new DurationInfo(LOG,
"%s: commitJob(%s)", getRole(), id)) {
commitContext = initiateJobOperation(context);
ActiveCommit pending
= listPendingUploadsToCommit(commitContext);
stage = "precommit";
preCommitJob(commitContext, pending);
stage = "commit";
commitJobInternal(commitContext, pending);
stage = "completed";
jobCompleted(true);
stage = "marker";
successData = maybeCreateSuccessMarkerFromCommits(commitContext, pending);
stage = "cleanup";
cleanup(commitContext, false);
} catch (IOException e) {
// failure. record it for the summary
failure = e;
LOG.warn("Commit failure for job {}", id, e);
jobCompleted(false);
abortJobInternal(commitContext, true);
throw e;
} finally {
// save the report summary, even on failure
if (commitContext != null) {
if (successData == null) {
// if the commit did not get as far as creating success data, create one.
successData = createSuccessData(context, null, null,
getDestFS().getConf());
}
// save quietly, so no exceptions are raised
maybeSaveSummary(stage,
commitContext,
successData,
failure,
true,
true);
// and close that commit context
commitContext.close();
}
}
} | 3.68 |
flink_CompactCoordinator_coordinate | /** Do stable compaction coordination. */
private void coordinate(long checkpointId, Map<String, List<Path>> partFiles) {
Function<Path, Long> sizeFunc =
path -> {
try {
return fileSystem.getFileStatus(path).getLen();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
};
// We need a stable compaction algorithm.
Map<String, List<List<Path>>> compactUnits = new HashMap<>();
partFiles.forEach(
(p, files) -> {
// Sort files for stable compaction algorithm.
files.sort(Comparator.comparing(Path::getPath));
compactUnits.put(p, BinPacking.pack(files, sizeFunc, targetFileSize));
});
// Now, send this stable pack list to compactor.
// NOTE, use broadcast emitting (Because it needs to emit checkpoint barrier),
// operators will pick its units by unit id and task id.
int unitId = 0;
for (Map.Entry<String, List<List<Path>>> unitsEntry : compactUnits.entrySet()) {
String partition = unitsEntry.getKey();
for (List<Path> unit : unitsEntry.getValue()) {
output.collect(new StreamRecord<>(new CompactionUnit(unitId, partition, unit)));
unitId++;
}
}
LOG.debug("Coordinate checkpoint-{}, compaction units are: {}", checkpointId, compactUnits);
// Emit checkpoint barrier
output.collect(new StreamRecord<>(new EndCompaction(checkpointId)));
} | 3.68 |
hudi_StreamerUtil_flinkConf2TypedProperties | /**
* Converts the give {@link Configuration} to {@link TypedProperties}.
* The default values are also set up.
*
* @param conf The flink configuration
* @return a TypedProperties instance
*/
public static TypedProperties flinkConf2TypedProperties(Configuration conf) {
Configuration flatConf = FlinkOptions.flatOptions(conf);
Properties properties = new Properties();
// put all the set options
flatConf.addAllToProperties(properties);
// put all the default options
for (ConfigOption<?> option : FlinkOptions.optionalOptions()) {
if (!flatConf.contains(option) && option.hasDefaultValue()) {
properties.put(option.key(), option.defaultValue());
}
}
properties.put(HoodieTableConfig.TYPE.key(), conf.getString(FlinkOptions.TABLE_TYPE));
return new TypedProperties(properties);
} | 3.68 |
morf_SqlScriptExecutor_executeQuery | /**
* Runs a {@link NamedParameterPreparedStatement} (with parameters), allowing
* its {@link ResultSet} to be processed by the supplied implementation of
* {@link ResultSetProcessor}. {@link ResultSetProcessor#process(ResultSet)}
* can return a value of any type, which will form the return value of this
* method.
*
* @param preparedStatement Prepared statement to run.
* @param parameterMetadata the metadata describing the parameters.
* @param parameterData the values to insert.
* @param processor the code to be run to process the {@link ResultSet}.
* @param maxRows The maximum number of rows to be returned. Will inform the
* JDBC driver to tell the server not to return any more rows than
* this.
* @param queryTimeout the timeout in <b>seconds</b> after which the query
* will time out on the database side
* @return the result from {@link ResultSetProcessor#process(ResultSet)}.
*/
private <T> T executeQuery(NamedParameterPreparedStatement preparedStatement, Iterable<SqlParameter> parameterMetadata,
DataValueLookup parameterData, ResultSetProcessor<T> processor, Optional<Integer> maxRows, Optional<Integer> queryTimeout) {
if (sqlDialect == null) {
throw new IllegalStateException("Must construct with dialect");
}
try {
sqlDialect.prepareStatementParameters(preparedStatement, parameterMetadata, parameterData);
if (maxRows.isPresent()) {
preparedStatement.setMaxRows(maxRows.get());
}
if (queryTimeout.isPresent()) {
preparedStatement.setQueryTimeout(queryTimeout.get());
}
ResultSet resultSet = preparedStatement.executeQuery();
try {
T result = processor.process(resultSet);
visitor.afterExecute(preparedStatement.toString(), 0);
return result;
} finally {
resultSet.close();
}
} catch (SQLException e) {
throw reclassifiedRuntimeException(e, "SQL exception when executing query: [" + preparedStatement + "]");
}
} | 3.68 |
framework_UIDL_getDoubleAttribute | /**
* Gets the named attribute as a double.
*
* @param name
* the name of the attribute to get
* @return the attribute value
*/
public double getDoubleAttribute(String name) {
return attr().getRawNumber(name);
} | 3.68 |
hbase_SnapshotManager_isTakingAnySnapshot | /**
* The snapshot operation processing as following: <br>
* 1. Create a Snapshot Handler, and do some initialization; <br>
* 2. Put the handler into snapshotHandlers <br>
* So when we consider if any snapshot is taking, we should consider both the takingSnapshotLock
* and snapshotHandlers;
* @return true to indicate that there're some running snapshots.
*/
public synchronized boolean isTakingAnySnapshot() {
return this.takingSnapshotLock.getReadHoldCount() > 0 || this.snapshotHandlers.size() > 0
|| this.snapshotToProcIdMap.size() > 0;
} | 3.68 |
pulsar_TimeWindow_current | /**
* return current time window data.
*
* @param function generate data.
* @return
*/
public synchronized WindowWrap<T> current(Function<T, T> function) {
long millis = System.currentTimeMillis();
if (millis < 0) {
return null;
}
int idx = calculateTimeIdx(millis);
long windowStart = calculateWindowStart(millis);
while (true) {
WindowWrap<T> old = array.get(idx);
if (old == null) {
WindowWrap<T> window = new WindowWrap<>(interval, windowStart, null);
if (array.compareAndSet(idx, null, window)) {
T value = null == function ? null : function.apply(null);
window.value(value);
return window;
} else {
Thread.yield();
}
} else if (windowStart == old.start()) {
return old;
} else if (windowStart > old.start()) {
T value = null == function ? null : function.apply(old.value());
old.value(value);
old.resetWindowStart(windowStart);
return old;
} else {
//it should never goes here
throw new IllegalStateException();
}
}
} | 3.68 |
morf_RemoveIndex_reverse | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#reverse(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema reverse(Schema schema) {
Table original = schema.getTable(tableName);
List<String> indexes = new ArrayList<>();
for (Index index : original.indexes()) {
indexes.add(index.getName());
}
indexes.add(indexToBeRemoved.getName());
return new TableOverrideSchema(schema, new AlteredTable(original, null, null, indexes, Arrays.asList(new Index[] {indexToBeRemoved})));
} | 3.68 |
flink_DeclarativeSlotPoolService_onReleaseTaskManager | /**
* This method is called when a TaskManager is released. It can be overridden by subclasses.
*
* @param previouslyFulfilledRequirement previouslyFulfilledRequirement by the released
* TaskManager
*/
protected void onReleaseTaskManager(ResourceCounter previouslyFulfilledRequirement) {} | 3.68 |
hadoop_TimelineEvents_getEntityId | /**
* Get the entity Id
*
* @return the entity Id
*/
@XmlElement(name = "entity")
public String getEntityId() {
return entityId;
} | 3.68 |
framework_DateCell_updateGroup | /* Update top and bottom date range values. Add new index to the group. */
private void updateGroup(DateCellGroup targetGroup, DateCellGroup byGroup) {
Date newStart = targetGroup.getStart();
Date newEnd = targetGroup.getEnd();
if (byGroup.getStart().before(targetGroup.getStart())) {
newStart = byGroup.getEnd();
}
if (byGroup.getStart().after(targetGroup.getEnd())) {
newStart = byGroup.getStart();
}
targetGroup.setDateRange(new WeekGridMinuteTimeRange(newStart, newEnd));
for (Integer index : byGroup.getItems()) {
if (!targetGroup.getItems().contains(index)) {
targetGroup.add(index);
}
}
} | 3.68 |
hbase_ReplicationPeerManager_preTransitPeerSyncReplicationState | /** Returns the old desciption of the peer */
ReplicationPeerDescription preTransitPeerSyncReplicationState(String peerId,
SyncReplicationState state) throws DoNotRetryIOException {
ReplicationPeerDescription desc = checkPeerExists(peerId);
SyncReplicationState fromState = desc.getSyncReplicationState();
EnumSet<SyncReplicationState> allowedToStates = allowedTransition.get(fromState);
if (allowedToStates == null || !allowedToStates.contains(state)) {
throw new DoNotRetryIOException("Can not transit current cluster state from " + fromState
+ " to " + state + " for peer id=" + peerId);
}
return desc;
} | 3.68 |
querydsl_JDOQueryFactory_select | /**
* Create a new {@link JDOQuery} instance with the given projection
*
* @param exprs projection
* @return select(exprs)
*/
public JDOQuery<Tuple> select(Expression<?>... exprs) {
return query().select(exprs);
} | 3.68 |
flink_BatchTask_getLastOutputCollector | /** @return the last output collector in the collector chain */
@SuppressWarnings("unchecked")
protected Collector<OT> getLastOutputCollector() {
int numChained = this.chainedTasks.size();
return (numChained == 0)
? output
: (Collector<OT>) chainedTasks.get(numChained - 1).getOutputCollector();
} | 3.68 |
flink_Router_aggregateRoutes | /** Helper for toString. */
private static <T> void aggregateRoutes(
String method,
Map<PathPattern, T> routes,
List<String> accMethods,
List<String> accPatterns,
List<String> accTargets) {
for (Map.Entry<PathPattern, T> entry : routes.entrySet()) {
accMethods.add(method);
accPatterns.add("/" + entry.getKey().pattern());
accTargets.add(targetToString(entry.getValue()));
}
} | 3.68 |
flink_BaseMappingExtractor_putUniqueResultMappings | /** Result only template with explicit or extracted signatures. */
private void putUniqueResultMappings(
Map<FunctionSignatureTemplate, FunctionResultTemplate> collectedMappings,
@Nullable FunctionResultTemplate uniqueResult,
Set<FunctionSignatureTemplate> signatureOnly,
Method method) {
if (uniqueResult == null) {
return;
}
// input only templates are valid everywhere if they don't exist fallback to extraction
if (!signatureOnly.isEmpty()) {
signatureOnly.forEach(s -> putMapping(collectedMappings, s, uniqueResult));
} else {
putMapping(collectedMappings, signatureExtraction.extract(this, method), uniqueResult);
}
} | 3.68 |
hmily_OrderServiceImpl_mockInventoryWithConfirmTimeout | /**
* 模拟在订单支付操作中,库存在Confirm阶段中的timeout
*
* @param count 购买数量
* @param amount 支付金额
* @return string
*/
@Override
public String mockInventoryWithConfirmTimeout(Integer count, BigDecimal amount) {
Order order = saveOrder(count, amount);
paymentService.mockPaymentInventoryWithConfirmTimeout(order);
return "success";
} | 3.68 |
hudi_TimelineDiffHelper_getPendingLogCompactionTransitions | /**
* Getting pending log compaction transitions.
*/
private static List<Pair<HoodieInstant, HoodieInstant>> getPendingLogCompactionTransitions(HoodieTimeline oldTimeline,
HoodieTimeline newTimeline) {
Set<HoodieInstant> newTimelineInstants = newTimeline.getInstantsAsStream().collect(Collectors.toSet());
return oldTimeline.filterPendingLogCompactionTimeline().getInstantsAsStream().map(instant -> {
if (newTimelineInstants.contains(instant)) {
return Pair.of(instant, instant);
} else {
HoodieInstant logCompacted =
new HoodieInstant(State.COMPLETED, HoodieTimeline.DELTA_COMMIT_ACTION, instant.getTimestamp());
if (newTimelineInstants.contains(logCompacted)) {
return Pair.of(instant, logCompacted);
}
HoodieInstant inflightLogCompacted =
new HoodieInstant(State.INFLIGHT, HoodieTimeline.LOG_COMPACTION_ACTION, instant.getTimestamp());
if (newTimelineInstants.contains(inflightLogCompacted)) {
return Pair.of(instant, inflightLogCompacted);
}
return Pair.<HoodieInstant, HoodieInstant>of(instant, null);
}
}).collect(Collectors.toList());
} | 3.68 |
hudi_AvroInternalSchemaConverter_computeMinBytesForPrecision | /**
* Return the minimum number of bytes needed to store a decimal with a give 'precision'.
* reference from Spark release 3.1 .
*/
private static int computeMinBytesForPrecision(int precision) {
int numBytes = 1;
while (Math.pow(2.0, 8 * numBytes - 1) < Math.pow(10.0, precision)) {
numBytes += 1;
}
return numBytes;
} | 3.68 |
framework_VCalendarAction_execute | /*
* (non-Javadoc)
*
* @see com.vaadin.terminal.gwt.client.ui.Action#execute()
*/
@Override
public void execute() {
String startDate = DATE_FORMAT_DATE_TIME.format(actionStartDate);
String endDate = DATE_FORMAT_DATE_TIME.format(actionEndDate);
if (event == null) {
rpc.actionOnEmptyCell(actionKey.split("-")[0], startDate, endDate);
} else {
rpc.actionOnEvent(actionKey.split("-")[0], startDate, endDate,
event.getIndex());
}
owner.getClient().getContextMenu().hide();
} | 3.68 |
framework_NativeSelect_setEmptySelectionCaption | /**
* Sets the empty selection caption.
* <p>
* The empty string {@code ""} is the default empty selection caption.
* <p>
* If empty selection is allowed via the
* {@link #setEmptySelectionAllowed(boolean)} method (it is by default) then
* the empty item will be shown with the given caption.
*
* @param caption
* the caption to set, not {@code null}
* @see #isSelected(Object)
* @since 8.0
*/
public void setEmptySelectionCaption(String caption) {
Objects.nonNull(caption);
getState().emptySelectionCaption = caption;
} | 3.68 |
framework_BrowserWindowOpener_getUrl | /**
* Returns the URL for this BrowserWindowOpener instance. Returns
* {@code null} if this instance is not URL resource based (a non URL based
* resource has been set for it).
*
* @since 7.4
*
* @return URL to open in the new browser window/tab when the extended
* component is clicked
*/
public String getUrl() {
Resource resource = getResource();
if (resource instanceof ExternalResource) {
return ((ExternalResource) resource).getURL();
}
return null;
} | 3.68 |
morf_SqlDialect_getSqlForAnalyseTable | /**
* Generate the SQL to run analysis on a table.
*
* @param table The table to run the analysis on.
* @return The SQL statements to analyse the table.
*/
public Collection<String> getSqlForAnalyseTable(@SuppressWarnings("unused") Table table) {
return SqlDialect.NO_STATEMENTS;
} | 3.68 |
hadoop_ClusterSummarizer_getNumBlacklistedTrackers | // Getters
protected int getNumBlacklistedTrackers() {
return numBlacklistedTrackers;
} | 3.68 |
hbase_HFileArchiveTableMonitor_shouldArchiveTable | /**
* Determine if the given table should or should not allow its hfiles to be deleted in the archive
* @param tableName name of the table to check
* @return <tt>true</tt> if its store files should be retained, <tt>false</tt> otherwise
*/
public synchronized boolean shouldArchiveTable(String tableName) {
return archivedTables.contains(tableName);
} | 3.68 |
dubbo_InternalThread_threadLocalMap | /**
* Returns the internal data structure that keeps the threadLocal variables bound to this thread.
* Note that this method is for internal use only, and thus is subject to change at any time.
*/
public final InternalThreadLocalMap threadLocalMap() {
return threadLocalMap;
} | 3.68 |
hudi_HoodieTableConfig_getPartitionFieldProp | /**
* @returns the partition field prop.
* @deprecated please use {@link #getPartitionFields()} instead
*/
@Deprecated
public String getPartitionFieldProp() {
// NOTE: We're adding a stub returning empty string to stay compatible w/ pre-existing
// behavior until this method is fully deprecated
return Option.ofNullable(getString(PARTITION_FIELDS)).orElse("");
} | 3.68 |
hadoop_Find_createOptions | /** Create a new set of find options. */
private FindOptions createOptions() {
FindOptions options = new FindOptions();
options.setOut(out);
options.setErr(err);
options.setIn(System.in);
options.setCommandFactory(getCommandFactory());
options.setConfiguration(getConf());
return options;
} | 3.68 |
zilla_ManyToOneRingBuffer_read | /**
* {@inheritDoc}
*/
public int read(final MessageHandler handler, final int messageCountLimit)
{
int messagesRead = 0;
final AtomicBuffer buffer = this.buffer;
final long head = buffer.getLong(headPositionIndex);
final int capacity = this.capacity;
final int headIndex = (int)head & (capacity - 1);
final int maxBlockLength = Math.min(capacity - headIndex, capacity >> 1);
int bytesRead = 0;
try
{
while (bytesRead < maxBlockLength && messagesRead < messageCountLimit)
{
final int recordIndex = headIndex + bytesRead;
final int recordLength = buffer.getIntVolatile(lengthOffset(recordIndex));
if (recordLength <= 0)
{
break;
}
bytesRead += align(recordLength, ALIGNMENT);
final int messageTypeId = buffer.getInt(typeOffset(recordIndex));
if (PADDING_MSG_TYPE_ID == messageTypeId)
{
continue;
}
++messagesRead;
handler.onMessage(messageTypeId, buffer, recordIndex + HEADER_LENGTH, recordLength - HEADER_LENGTH);
}
}
finally
{
if (bytesRead != 0)
{
buffer.putLongOrdered(headPositionIndex, head + bytesRead);
}
}
return messagesRead;
} | 3.68 |
flink_SourceCoordinator_toBytes | /**
* Serialize the coordinator state. The current implementation may not be super efficient, but
* it should not matter that much because most of the state should be rather small. Large states
* themselves may already be a problem regardless of how the serialization is implemented.
*
* @return A byte array containing the serialized state of the source coordinator.
* @throws Exception When something goes wrong in serialization.
*/
private byte[] toBytes(long checkpointId) throws Exception {
return writeCheckpointBytes(
enumerator.snapshotState(checkpointId), enumCheckpointSerializer);
} | 3.68 |
hbase_CreateStoreFileWriterParams_isCompaction | /**
* Whether we are creating a new file in a compaction
*/
public CreateStoreFileWriterParams isCompaction(boolean isCompaction) {
this.isCompaction = isCompaction;
return this;
} | 3.68 |
flink_MetricListener_getMetric | /**
* Get registered {@link Metric} with identifier relative to the root metric group.
*
* <p>For example, identifier of metric "myMetric" registered in group "myGroup" under root
* metric group can be reached by identifier ("myGroup", "myMetric")
*
* @param identifier identifier relative to the root metric group
* @return Optional registered metric
*/
public <T extends Metric> Optional<T> getMetric(Class<T> metricType, String... identifier) {
if (!metrics.containsKey(getActualIdentifier(identifier))) {
return Optional.empty();
}
return Optional.of(metricType.cast(metrics.get(getActualIdentifier(identifier))));
} | 3.68 |
framework_BasicEvent_setStart | /*
* (non-Javadoc)
*
* @see
* com.vaadin.addon.calendar.event.CalendarEventEditor#setStart(java.util
* .Date)
*/
@Override
public void setStart(Date start) {
this.start = start;
fireEventChange();
} | 3.68 |
framework_Window_addCloseShortcut | /**
* Adds a close shortcut - pressing this key while holding down all (if any)
* modifiers specified while this Window is in focus will close the Window.
*
* @since 7.6
* @param keyCode
* the keycode for invoking the shortcut
* @param modifiers
* the (optional) modifiers for invoking the shortcut. Can be set
* to null to be explicit about not having modifiers.
*/
public void addCloseShortcut(int keyCode, int... modifiers) {
// Ignore attempts to re-add existing shortcuts
if (hasCloseShortcut(keyCode, modifiers)) {
return;
}
// Actually add the shortcut
CloseShortcut shortcut = new CloseShortcut(this, keyCode, modifiers);
addAction(shortcut);
closeShortcuts.add(shortcut);
} | 3.68 |
hbase_RSGroupAdminClient_getRSGroupInfoOfTable | /**
* Gets {@code RSGroupInfo} for the given table's group.
*/
public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException {
GetRSGroupInfoOfTableRequest request = GetRSGroupInfoOfTableRequest.newBuilder()
.setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
try {
GetRSGroupInfoOfTableResponse resp = stub.getRSGroupInfoOfTable(null, request);
if (resp.hasRSGroupInfo()) {
return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo());
}
return null;
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.68 |
hudi_DynamoTableUtils_createTableIfNotExists | /**
* Creates the table and ignores any errors if it already exists.
* @param dynamo The Dynamo client to use.
* @param createTableRequest The create table request.
* @return True if created, false otherwise.
*/
public static boolean createTableIfNotExists(final DynamoDbClient dynamo, final CreateTableRequest createTableRequest) {
try {
dynamo.createTable(createTableRequest);
return true;
} catch (final ResourceInUseException e) {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Table " + createTableRequest.tableName() + " already exists", e);
}
}
return false;
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSqlForMathOperationsForExistingDataFix2 | /**
* @param sqlForRandom SQL to create a random number
* @return expected SQL for math operation for existing data fix 2
*/
protected String expectedSqlForMathOperationsForExistingDataFix2(String sqlForRandom) {
return "FLOOR(" + sqlForRandom + " * 999999.0)";
} | 3.68 |
hmily_EtcdClient_put | /**
* put config content.
*
* @param key config key
* @param content config content
*/
public void put(final String key, final String content) {
try {
client.getKVClient().put(ByteSequence.fromString(key), ByteSequence.fromString(content)).get();
} catch (InterruptedException | ExecutionException e) {
throw new ConfigException(e);
}
} | 3.68 |
hmily_InLineServiceImpl_cancel | /**
* Cancel.
*/
public void cancel() {
System.out.println("执行inline cancel......");
} | 3.68 |
flink_StreamExecutionEnvironment_fromSequence | /**
* Creates a new data stream that contains a sequence of numbers (longs) and is useful for
* testing and for cases that just need a stream of N events of any kind.
*
* <p>The generated source splits the sequence into as many parallel sub-sequences as there are
* parallel source readers. Each sub-sequence will be produced in order. If the parallelism is
* limited to one, the source will produce one sequence in order.
*
* <p>This source is always bounded. For very long sequences (for example over the entire domain
* of long integer values), you may consider executing the application in a streaming manner
* because of the end bound that is pretty far away.
*
* <p>Use {@link #fromSource(Source, WatermarkStrategy, String)} together with {@link
* NumberSequenceSource} if you required more control over the created sources. For example, if
* you want to set a {@link WatermarkStrategy}.
*
* @param from The number to start at (inclusive)
* @param to The number to stop at (inclusive)
*/
public DataStreamSource<Long> fromSequence(long from, long to) {
if (from > to) {
throw new IllegalArgumentException(
"Start of sequence must not be greater than the end");
}
return fromSource(
new NumberSequenceSource(from, to),
WatermarkStrategy.noWatermarks(),
"Sequence Source");
} | 3.68 |
hadoop_DataStatistics_meanCI | /**
* calculates the mean value within 95% ConfidenceInterval.
* 1.96 is standard for 95 %
*
* @return the mean value adding 95% confidence interval
*/
public synchronized double meanCI() {
if (count <= 1) {
return 0.0;
}
double currMean = mean();
double currStd = std();
return currMean + (DEFAULT_CI_FACTOR * currStd / Math.sqrt(count));
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.