name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_ClientIds_getClientId | /**
* Returns the client id from the heartbeat file path, the path name follows
* the naming convention: _, _1, _2, ... _N.
*/
private static String getClientId(Path path) {
String[] splits = path.getName().split(HEARTBEAT_FILE_NAME_PREFIX);
return splits.length > 1 ? splits[1] : INIT_CLIENT_ID;
} | 3.68 |
morf_MergeStatement_shallowCopy | /**
* Performs a shallow copy to a builder, allowing a duplicate
* to be created and modified.
*
* @return A builder, initialised as a duplicate of this statement.
*/
@Override
public MergeStatementBuilder shallowCopy() {
return new MergeStatementBuilder(this);
} | 3.68 |
morf_InsertStatementBuilder_avoidDirectPath | /**
* If supported by the dialect, hints to the database that an {@code APPEND} query hint should be used in the insert statement.
*
* <p>In general, as with all query plan modification, <strong>do not use this unless you know
* exactly what you are doing</strong>.</p>
*
* <p>These directives are applied in the SQL in the order they are called on {@link InsertStatement}. This usually
* affects their precedence or relative importance, depending on the platform.</p>
*
* @return this, for method chaining.
*/
public InsertStatementBuilder avoidDirectPath() {
getHints().add(NoDirectPathQueryHint.INSTANCE);
return this;
} | 3.68 |
rocketmq-connect_RecordOffsetManagement_remove | /**
* remove record
*
* @return
*/
public boolean remove() {
Deque<SubmittedPosition> deque = records.get(position.getPartition());
if (deque == null) {
return false;
}
boolean result = deque.removeLastOccurrence(this);
if (deque.isEmpty()) {
records.remove(position.getPartition());
}
if (result) {
messageAcked();
} else {
log.warn("Attempted to remove record from submitted queue for partition {}, but the record has not been submitted or has already been removed", position.getPartition());
}
return result;
} | 3.68 |
pulsar_SchemaDefinitionImpl_getJsonDef | /**
* Get json schema definition.
*
* @return schema class
*/
public String getJsonDef() {
return jsonDef;
} | 3.68 |
hadoop_ConnectionContext_hasAvailableConcurrency | /**
* Return true if this connection context still has available concurrency,
* else return false.
*/
private synchronized boolean hasAvailableConcurrency() {
return this.numThreads < maxConcurrencyPerConn;
} | 3.68 |
hbase_StorageClusterStatusModel_getWriteRequestsCount | /** Returns the current total write requests made to region */
@XmlAttribute
public long getWriteRequestsCount() {
return writeRequestsCount;
} | 3.68 |
hbase_ServerManager_findServerWithSameHostnamePortWithLock | /**
* Assumes onlineServers is locked.
* @return ServerName with matching hostname and port.
*/
public ServerName findServerWithSameHostnamePortWithLock(final ServerName serverName) {
ServerName end =
ServerName.valueOf(serverName.getHostname(), serverName.getPort(), Long.MAX_VALUE);
ServerName r = onlineServers.lowerKey(end);
if (r != null) {
if (ServerName.isSameAddress(r, serverName)) {
return r;
}
}
return null;
} | 3.68 |
rocketmq-connect_WorkerConnector_getKeyValue | /**
* connector config
*
* @return
*/
public ConnectKeyValue getKeyValue() {
return keyValue;
} | 3.68 |
framework_ContainerHierarchicalWrapper_addToHierarchyWrapper | /**
* Adds the specified Item specified to the internal hierarchy structure.
* The new item is added as a root Item. The underlying container is not
* modified.
*
* @param itemId
* the ID of the item to add to the hierarchy.
*/
private void addToHierarchyWrapper(Object itemId) {
roots.add(itemId);
} | 3.68 |
hbase_CleanerChore_calculatePoolSize | /**
* Calculate size for cleaner pool.
* @param poolSize size from configuration
* @return size of pool after calculation
*/
static int calculatePoolSize(String poolSize) {
if (poolSize.matches("[1-9][0-9]*")) {
// If poolSize is an integer, return it directly,
// but upmost to the number of available processors.
int size = Math.min(Integer.parseInt(poolSize), AVAIL_PROCESSORS);
if (size == AVAIL_PROCESSORS) {
LOG.warn("Use full core processors to scan dir, size={}", size);
}
return size;
} else if (poolSize.matches("0.[0-9]+|1.0")) {
// if poolSize is a double, return poolSize * availableProcessors;
// Ensure that we always return at least one.
int computedThreads = (int) (AVAIL_PROCESSORS * Double.parseDouble(poolSize));
if (computedThreads < 1) {
LOG.debug("Computed {} threads for CleanerChore, using 1 instead", computedThreads);
return 1;
}
return computedThreads;
} else {
LOG.error("Unrecognized value: " + poolSize + " for " + CHORE_POOL_SIZE
+ ", use default config: " + DEFAULT_CHORE_POOL_SIZE + " instead.");
return calculatePoolSize(DEFAULT_CHORE_POOL_SIZE);
}
} | 3.68 |
hbase_FileArchiverNotifierImpl_getSizeOfStoreFile | /**
* Computes the size of the store file given its name, region and family name in the archive
* directory.
*/
long getSizeOfStoreFile(TableName tn, String regionName, String family, String storeFile) {
Path familyArchivePath;
try {
familyArchivePath = HFileArchiveUtil.getStoreArchivePath(conf, tn, regionName, family);
} catch (IOException e) {
LOG.warn("Could not compute path for the archive directory for the region", e);
return 0L;
}
Path fileArchivePath = new Path(familyArchivePath, storeFile);
try {
if (fs.exists(fileArchivePath)) {
FileStatus[] status = fs.listStatus(fileArchivePath);
if (1 != status.length) {
LOG.warn("Expected " + fileArchivePath
+ " to be a file but was a directory, ignoring reference");
return 0L;
}
return status[0].getLen();
}
} catch (IOException e) {
LOG.warn("Could not obtain the status of " + fileArchivePath, e);
return 0L;
}
LOG.warn("Expected " + fileArchivePath + " to exist but does not, ignoring reference.");
return 0L;
} | 3.68 |
flink_AbstractUdfOperator_getBroadcastInputs | /**
* Returns the input, or null, if none is set.
*
* @return The broadcast input root operator.
*/
public Map<String, Operator<?>> getBroadcastInputs() {
return this.broadcastInputs;
} | 3.68 |
hadoop_ApplicationEntity_getApplicationEvent | /**
* @param te TimelineEntity object.
* @param eventId event with this id needs to be fetched
* @return TimelineEvent if TimelineEntity contains the desired event.
*/
public static TimelineEvent getApplicationEvent(TimelineEntity te,
String eventId) {
if (isApplicationEntity(te)) {
for (TimelineEvent event : te.getEvents()) {
if (event.getId().equals(eventId)) {
return event;
}
}
}
return null;
} | 3.68 |
hudi_RowDataKeyGens_instance | /**
* Creates a {@link RowDataKeyGen} with given configuration.
*/
public static RowDataKeyGen instance(Configuration conf, RowType rowType, int taskId, String instantTime) {
String recordKeys = conf.getString(FlinkOptions.RECORD_KEY_FIELD);
if (hasRecordKey(recordKeys, rowType.getFieldNames())) {
return RowDataKeyGen.instance(conf, rowType);
} else {
return AutoRowDataKeyGen.instance(conf, rowType, taskId, instantTime);
}
} | 3.68 |
dubbo_ServiceDiscoveryRegistryDirectory_isNotificationReceived | /**
* This implementation makes sure all application names related to serviceListener received address notification.
* <p>
* FIXME, make sure deprecated "interface-application" mapping item be cleared in time.
*/
@Override
public boolean isNotificationReceived() {
return serviceListener == null
|| serviceListener.isDestroyed()
|| serviceListener.getAllInstances().size()
== serviceListener.getServiceNames().size();
} | 3.68 |
flink_ClassLeakCleaner_cleanUpLeakingClasses | /** Clean up the soft references of the classes under the specified class loader. */
public static synchronized void cleanUpLeakingClasses(ClassLoader classLoader)
throws ReflectiveOperationException, SecurityException, ClassCastException {
if (!leakedClassesCleanedUp) {
// clear the soft references
// see https://bugs.openjdk.java.net/browse/JDK-8199589 for more details
Class<?> clazz = Class.forName("java.io.ObjectStreamClass$Caches");
clearCache(clazz, "localDescs", classLoader);
clearCache(clazz, "reflectors", classLoader);
// It uses finalizers heavily in Netty which still holds the references to the user
// class loader even after job finished.
// so, trigger garbage collection explicitly to:
// 1) trigger the execution of the `Finalizer`s of objects created by the finished jobs
// of this TaskManager
// 2) the references to the class loader will then be released and so the user class
// loader could be garbage collected finally
System.gc();
leakedClassesCleanedUp = true;
}
} | 3.68 |
framework_StaticSection_getRows | /**
* Returns an unmodifiable list of the rows in this section.
*
* @return the rows in this section
*/
protected List<ROW> getRows() {
return Collections.unmodifiableList(rows);
} | 3.68 |
pulsar_AuthorizationService_isValidOriginalPrincipal | /**
* Validates that the authenticatedPrincipal and the originalPrincipal are a valid combination.
* Valid combinations fulfill one of the following two rules:
* <p>
* 1. The authenticatedPrincipal is in {@link ServiceConfiguration#getProxyRoles()}, if, and only if,
* the originalPrincipal is set to a role that is not also in {@link ServiceConfiguration#getProxyRoles()}.
* <p>
* 2. The authenticatedPrincipal and the originalPrincipal are the same, but are not a proxyRole, when
* allowNonProxyPrincipalsToBeEqual is true.
*
* @return true when roles are a valid combination and false when roles are an invalid combination
*/
public boolean isValidOriginalPrincipal(String authenticatedPrincipal,
String originalPrincipal,
SocketAddress remoteAddress,
boolean allowNonProxyPrincipalsToBeEqual) {
String errorMsg = null;
if (conf.getProxyRoles().contains(authenticatedPrincipal)) {
if (StringUtils.isBlank(originalPrincipal)) {
errorMsg = "originalPrincipal must be provided when connecting with a proxy role.";
} else if (conf.getProxyRoles().contains(originalPrincipal)) {
errorMsg = "originalPrincipal cannot be a proxy role.";
}
} else if (StringUtils.isNotBlank(originalPrincipal)
&& !(allowNonProxyPrincipalsToBeEqual && originalPrincipal.equals(authenticatedPrincipal))) {
errorMsg = "cannot specify originalPrincipal when connecting without valid proxy role.";
}
if (errorMsg != null) {
log.warn("[{}] Illegal combination of role [{}] and originalPrincipal [{}]: {}", remoteAddress,
authenticatedPrincipal, originalPrincipal, errorMsg);
return false;
} else {
return true;
}
} | 3.68 |
framework_VAbstractPopupCalendar_closeCalendarPanel | /**
* Closes the open popup panel.
*/
public void closeCalendarPanel() {
if (open) {
toggleButtonClosesWithGuarantee = true;
popup.hide(true);
}
} | 3.68 |
hbase_HRegionServer_startServices | /**
* Start maintenance Threads, Server, Worker and lease checker threads. Start all threads we need
* to run. This is called after we've successfully registered with the Master. Install an
* UncaughtExceptionHandler that calls abort of RegionServer if we get an unhandled exception. We
* cannot set the handler on all threads. Server's internal Listener thread is off limits. For
* Server, if an OOME, it waits a while then retries. Meantime, a flush or a compaction that tries
* to run should trigger same critical condition and the shutdown will run. On its way out, this
* server will shut down Server. Leases are sort of inbetween. It has an internal thread that
* while it inherits from Chore, it keeps its own internal stop mechanism so needs to be stopped
* by this hosting server. Worker logs the exception and exits.
*/
private void startServices() throws IOException {
if (!isStopped() && !isAborted()) {
initializeThreads();
}
this.secureBulkLoadManager = new SecureBulkLoadManager(this.conf, asyncClusterConnection);
this.secureBulkLoadManager.start();
// Health checker thread.
if (isHealthCheckerConfigured()) {
int sleepTime = this.conf.getInt(HConstants.HEALTH_CHORE_WAKE_FREQ,
HConstants.DEFAULT_THREAD_WAKE_FREQUENCY);
healthCheckChore = new HealthCheckChore(sleepTime, this, getConfiguration());
}
// Executor status collect thread.
if (
this.conf.getBoolean(HConstants.EXECUTOR_STATUS_COLLECT_ENABLED,
HConstants.DEFAULT_EXECUTOR_STATUS_COLLECT_ENABLED)
) {
int sleepTime =
this.conf.getInt(ExecutorStatusChore.WAKE_FREQ, ExecutorStatusChore.DEFAULT_WAKE_FREQ);
executorStatusChore = new ExecutorStatusChore(sleepTime, this, this.getExecutorService(),
this.metricsRegionServer.getMetricsSource());
}
this.walRoller = new LogRoller(this);
this.flushThroughputController = FlushThroughputControllerFactory.create(this, conf);
this.procedureResultReporter = new RemoteProcedureResultReporter(this);
// Create the CompactedFileDischarger chore executorService. This chore helps to
// remove the compacted files that will no longer be used in reads.
// Default is 2 mins. The default value for TTLCleaner is 5 mins so we set this to
// 2 mins so that compacted files can be archived before the TTLCleaner runs
int cleanerInterval = conf.getInt("hbase.hfile.compaction.discharger.interval", 2 * 60 * 1000);
this.compactedFileDischarger = new CompactedHFilesDischarger(cleanerInterval, this, this);
choreService.scheduleChore(compactedFileDischarger);
// Start executor services
final int openRegionThreads = conf.getInt("hbase.regionserver.executor.openregion.threads", 3);
executorService.startExecutorService(executorService.new ExecutorConfig()
.setExecutorType(ExecutorType.RS_OPEN_REGION).setCorePoolSize(openRegionThreads));
final int openMetaThreads = conf.getInt("hbase.regionserver.executor.openmeta.threads", 1);
executorService.startExecutorService(executorService.new ExecutorConfig()
.setExecutorType(ExecutorType.RS_OPEN_META).setCorePoolSize(openMetaThreads));
final int openPriorityRegionThreads =
conf.getInt("hbase.regionserver.executor.openpriorityregion.threads", 3);
executorService.startExecutorService(
executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_OPEN_PRIORITY_REGION)
.setCorePoolSize(openPriorityRegionThreads));
final int closeRegionThreads =
conf.getInt("hbase.regionserver.executor.closeregion.threads", 3);
executorService.startExecutorService(executorService.new ExecutorConfig()
.setExecutorType(ExecutorType.RS_CLOSE_REGION).setCorePoolSize(closeRegionThreads));
final int closeMetaThreads = conf.getInt("hbase.regionserver.executor.closemeta.threads", 1);
executorService.startExecutorService(executorService.new ExecutorConfig()
.setExecutorType(ExecutorType.RS_CLOSE_META).setCorePoolSize(closeMetaThreads));
if (conf.getBoolean(StoreScanner.STORESCANNER_PARALLEL_SEEK_ENABLE, false)) {
final int storeScannerParallelSeekThreads =
conf.getInt("hbase.storescanner.parallel.seek.threads", 10);
executorService.startExecutorService(
executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_PARALLEL_SEEK)
.setCorePoolSize(storeScannerParallelSeekThreads).setAllowCoreThreadTimeout(true));
}
final int logReplayOpsThreads =
conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER);
executorService.startExecutorService(
executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_LOG_REPLAY_OPS)
.setCorePoolSize(logReplayOpsThreads).setAllowCoreThreadTimeout(true));
// Start the threads for compacted files discharger
final int compactionDischargerThreads =
conf.getInt(CompactionConfiguration.HBASE_HFILE_COMPACTION_DISCHARGER_THREAD_COUNT, 10);
executorService.startExecutorService(executorService.new ExecutorConfig()
.setExecutorType(ExecutorType.RS_COMPACTED_FILES_DISCHARGER)
.setCorePoolSize(compactionDischargerThreads));
if (ServerRegionReplicaUtil.isRegionReplicaWaitForPrimaryFlushEnabled(conf)) {
final int regionReplicaFlushThreads =
conf.getInt("hbase.regionserver.region.replica.flusher.threads",
conf.getInt("hbase.regionserver.executor.openregion.threads", 3));
executorService.startExecutorService(executorService.new ExecutorConfig()
.setExecutorType(ExecutorType.RS_REGION_REPLICA_FLUSH_OPS)
.setCorePoolSize(regionReplicaFlushThreads));
}
final int refreshPeerThreads =
conf.getInt("hbase.regionserver.executor.refresh.peer.threads", 2);
executorService.startExecutorService(executorService.new ExecutorConfig()
.setExecutorType(ExecutorType.RS_REFRESH_PEER).setCorePoolSize(refreshPeerThreads));
final int replaySyncReplicationWALThreads =
conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 1);
executorService.startExecutorService(executorService.new ExecutorConfig()
.setExecutorType(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL)
.setCorePoolSize(replaySyncReplicationWALThreads));
final int switchRpcThrottleThreads =
conf.getInt("hbase.regionserver.executor.switch.rpc.throttle.threads", 1);
executorService.startExecutorService(
executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_SWITCH_RPC_THROTTLE)
.setCorePoolSize(switchRpcThrottleThreads));
final int claimReplicationQueueThreads =
conf.getInt("hbase.regionserver.executor.claim.replication.queue.threads", 1);
executorService.startExecutorService(
executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_CLAIM_REPLICATION_QUEUE)
.setCorePoolSize(claimReplicationQueueThreads));
final int rsSnapshotOperationThreads =
conf.getInt("hbase.regionserver.executor.snapshot.operations.threads", 3);
executorService.startExecutorService(
executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_SNAPSHOT_OPERATIONS)
.setCorePoolSize(rsSnapshotOperationThreads));
final int rsFlushOperationThreads =
conf.getInt("hbase.regionserver.executor.flush.operations.threads", 3);
executorService.startExecutorService(executorService.new ExecutorConfig()
.setExecutorType(ExecutorType.RS_FLUSH_OPERATIONS).setCorePoolSize(rsFlushOperationThreads));
Threads.setDaemonThreadRunning(this.walRoller, getName() + ".logRoller",
uncaughtExceptionHandler);
if (this.cacheFlusher != null) {
this.cacheFlusher.start(uncaughtExceptionHandler);
}
Threads.setDaemonThreadRunning(this.procedureResultReporter,
getName() + ".procedureResultReporter", uncaughtExceptionHandler);
if (this.compactionChecker != null) {
choreService.scheduleChore(compactionChecker);
}
if (this.periodicFlusher != null) {
choreService.scheduleChore(periodicFlusher);
}
if (this.healthCheckChore != null) {
choreService.scheduleChore(healthCheckChore);
}
if (this.executorStatusChore != null) {
choreService.scheduleChore(executorStatusChore);
}
if (this.nonceManagerChore != null) {
choreService.scheduleChore(nonceManagerChore);
}
if (this.storefileRefresher != null) {
choreService.scheduleChore(storefileRefresher);
}
if (this.fsUtilizationChore != null) {
choreService.scheduleChore(fsUtilizationChore);
}
if (this.namedQueueServiceChore != null) {
choreService.scheduleChore(namedQueueServiceChore);
}
if (this.brokenStoreFileCleaner != null) {
choreService.scheduleChore(brokenStoreFileCleaner);
}
if (this.rsMobFileCleanerChore != null) {
choreService.scheduleChore(rsMobFileCleanerChore);
}
if (replicationMarkerChore != null) {
LOG.info("Starting replication marker chore");
choreService.scheduleChore(replicationMarkerChore);
}
// Leases is not a Thread. Internally it runs a daemon thread. If it gets
// an unhandled exception, it will just exit.
Threads.setDaemonThreadRunning(this.leaseManager, getName() + ".leaseChecker",
uncaughtExceptionHandler);
// Create the log splitting worker and start it
// set a smaller retries to fast fail otherwise splitlogworker could be blocked for
// quite a while inside Connection layer. The worker won't be available for other
// tasks even after current task is preempted after a split task times out.
Configuration sinkConf = HBaseConfiguration.create(conf);
sinkConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
conf.getInt("hbase.log.replay.retries.number", 8)); // 8 retries take about 23 seconds
sinkConf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
conf.getInt("hbase.log.replay.rpc.timeout", 30000)); // default 30 seconds
sinkConf.setInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER, 1);
if (
this.csm != null
&& conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)
) {
// SplitLogWorker needs csm. If none, don't start this.
this.splitLogWorker = new SplitLogWorker(sinkConf, this, this, walFactory);
splitLogWorker.start();
LOG.debug("SplitLogWorker started");
}
// Memstore services.
startHeapMemoryManager();
// Call it after starting HeapMemoryManager.
initializeMemStoreChunkCreator(hMemManager);
} | 3.68 |
flink_BinaryArrayWriter_createNullSetter | /**
* Creates an for accessor setting the elements of an array writer to {@code null} during
* runtime.
*
* @param elementType the element type of the array
*/
public static NullSetter createNullSetter(LogicalType elementType) {
// ordered by type root definition
switch (elementType.getTypeRoot()) {
case CHAR:
case VARCHAR:
case BINARY:
case VARBINARY:
case DECIMAL:
case BIGINT:
case TIMESTAMP_WITHOUT_TIME_ZONE:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
case INTERVAL_DAY_TIME:
case ARRAY:
case MULTISET:
case MAP:
case ROW:
case STRUCTURED_TYPE:
case RAW:
return BinaryArrayWriter::setNullLong;
case BOOLEAN:
return BinaryArrayWriter::setNullBoolean;
case TINYINT:
return BinaryArrayWriter::setNullByte;
case SMALLINT:
return BinaryArrayWriter::setNullShort;
case INTEGER:
case DATE:
case TIME_WITHOUT_TIME_ZONE:
case INTERVAL_YEAR_MONTH:
return BinaryArrayWriter::setNullInt;
case FLOAT:
return BinaryArrayWriter::setNullFloat;
case DOUBLE:
return BinaryArrayWriter::setNullDouble;
case TIMESTAMP_WITH_TIME_ZONE:
throw new UnsupportedOperationException();
case DISTINCT_TYPE:
return createNullSetter(((DistinctType) elementType).getSourceType());
case NULL:
case SYMBOL:
case UNRESOLVED:
default:
throw new IllegalArgumentException();
}
} | 3.68 |
hadoop_BlockManagerParameters_withPrefetchingStatistics | /**
* Sets the prefetching statistics for the stream.
*
* @param statistics The prefetching statistics.
* @return The builder.
*/
public BlockManagerParameters withPrefetchingStatistics(
final PrefetchingStatistics statistics) {
this.prefetchingStatistics = statistics;
return this;
} | 3.68 |
flink_AbstractServerBase_attemptToBind | /**
* Tries to start the server at the provided port.
*
* <p>This, in conjunction with {@link #start()}, try to start the server on a free port among
* the port range provided at the constructor.
*
* @param port the port to try to bind the server to.
* @throws Exception If something goes wrong during the bind operation.
*/
private boolean attemptToBind(final int port) throws Throwable {
log.debug("Attempting to start {} on port {}.", serverName, port);
this.queryExecutor = createQueryExecutor();
this.handler = initializeHandler();
final NettyBufferPool bufferPool = new NettyBufferPool(numEventLoopThreads);
final ThreadFactory threadFactory =
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("Flink " + serverName + " EventLoop Thread %d")
.build();
final NioEventLoopGroup nioGroup =
new NioEventLoopGroup(numEventLoopThreads, threadFactory);
this.bootstrap =
new ServerBootstrap()
.localAddress(bindAddress, port)
.group(nioGroup)
.channel(NioServerSocketChannel.class)
.option(ChannelOption.ALLOCATOR, bufferPool)
.childOption(ChannelOption.ALLOCATOR, bufferPool)
.childHandler(new ServerChannelInitializer<>(handler));
final int defaultHighWaterMark = 64 * 1024; // from DefaultChannelConfig (not exposed)
//noinspection ConstantConditions
// (ignore warning here to make this flexible in case the configuration values change)
if (LOW_WATER_MARK > defaultHighWaterMark) {
bootstrap.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, HIGH_WATER_MARK);
bootstrap.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, LOW_WATER_MARK);
} else { // including (newHighWaterMark < defaultLowWaterMark)
bootstrap.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, LOW_WATER_MARK);
bootstrap.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, HIGH_WATER_MARK);
}
try {
final ChannelFuture future = bootstrap.bind().sync();
if (future.isSuccess()) {
final InetSocketAddress localAddress =
(InetSocketAddress) future.channel().localAddress();
serverAddress =
new InetSocketAddress(localAddress.getAddress(), localAddress.getPort());
return true;
}
// the following throw is to bypass Netty's "optimization magic"
// and catch the bind exception.
// the exception is thrown by the sync() call above.
throw future.cause();
} catch (BindException e) {
log.debug("Failed to start {} on port {}: {}.", serverName, port, e.getMessage());
try {
// we shutdown the server but we reset the future every time because in
// case of failure to bind, we will call attemptToBind() here, and not resetting
// the flag will interfere with future shutdown attempts.
shutdownServer()
.whenComplete((ignoredV, ignoredT) -> serverShutdownFuture.getAndSet(null))
.get();
} catch (Exception r) {
// Here we were seeing this problem:
// https://github.com/netty/netty/issues/4357 if we do a get().
// this is why we now simply wait a bit so that everything is shut down.
log.warn("Problem while shutting down {}: {}", serverName, r.getMessage());
}
}
// any other type of exception we let it bubble up.
return false;
} | 3.68 |
flink_SharedBufferAccessor_lockNode | /**
* Increases the reference counter for the given entry so that it is not accidentally removed.
*
* @param node id of the entry
* @param version dewey number of the (potential) edge that locks the given node
*/
public void lockNode(final NodeId node, final DeweyNumber version) {
Lockable<SharedBufferNode> sharedBufferNode = sharedBuffer.getEntry(node);
if (sharedBufferNode != null) {
sharedBufferNode.lock();
for (Lockable<SharedBufferEdge> edge : sharedBufferNode.getElement().getEdges()) {
if (version.isCompatibleWith(edge.getElement().getDeweyNumber())) {
edge.lock();
}
}
sharedBuffer.upsertEntry(node, sharedBufferNode);
}
} | 3.68 |
hadoop_LeveldbIterator_peekNext | /**
* Returns the next element in the iteration, without advancing the
* iteration.
*
* @return the next element in the iteration.
* @throws DBException db Exception.
*/
public Map.Entry<byte[], byte[]> peekNext() throws DBException {
try {
return iter.peekNext();
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
} | 3.68 |
flink_ExecutionEnvironment_executeAsync | /**
* Triggers the program execution asynchronously. The environment will execute all parts of the
* program that have resulted in a "sink" operation. Sink operations are for example printing
* results ({@link DataSet#print()}, writing results (e.g. {@link DataSet#writeAsText(String)},
* {@link DataSet#write(org.apache.flink.api.common.io.FileOutputFormat, String)}, or other
* generic data sinks created with {@link
* DataSet#output(org.apache.flink.api.common.io.OutputFormat)}.
*
* <p>The program execution will be logged and displayed with the given job name.
*
* @return A {@link JobClient} that can be used to communicate with the submitted job, completed
* on submission succeeded.
* @throws Exception Thrown, if the program submission fails.
*/
@PublicEvolving
public JobClient executeAsync(String jobName) throws Exception {
checkNotNull(
configuration.get(DeploymentOptions.TARGET),
"No execution.target specified in your configuration file.");
final Plan plan = createProgramPlan(jobName);
final PipelineExecutorFactory executorFactory =
executorServiceLoader.getExecutorFactory(configuration);
checkNotNull(
executorFactory,
"Cannot find compatible factory for specified execution.target (=%s)",
configuration.get(DeploymentOptions.TARGET));
CompletableFuture<JobClient> jobClientFuture =
executorFactory
.getExecutor(configuration)
.execute(plan, configuration, userClassloader);
try {
JobClient jobClient = jobClientFuture.get();
jobListeners.forEach(jobListener -> jobListener.onJobSubmitted(jobClient, null));
return jobClient;
} catch (Throwable t) {
jobListeners.forEach(jobListener -> jobListener.onJobSubmitted(null, t));
ExceptionUtils.rethrow(t);
// make javac happy, this code path will not be reached
return null;
}
} | 3.68 |
framework_TypeDataStore_isNoLayoutRpcMethod | /**
* Checks whether the provided method is annotated with {@link NoLayout}.
*
* @param method
* the rpc method to check
*
* @since 7.4
*
* @return <code>true</code> if the method has a NoLayout annotation;
* otherwise <code>false</code>
*/
public static boolean isNoLayoutRpcMethod(Method method) {
return hasMethodAttribute(method, MethodAttribute.NO_LAYOUT);
} | 3.68 |
hadoop_TaskPool_suppressExceptions | /**
* Suppress exceptions from tasks.
* RemoteIterator exceptions are not suppressable.
* @param suppress new value
* @return the builder.
*/
public Builder<I> suppressExceptions(boolean suppress) {
this.suppressExceptions = suppress;
return this;
} | 3.68 |
zxing_PDF417HighLevelEncoder_determineConsecutiveTextCount | /**
* Determines the number of consecutive characters that are encodable using text compaction.
*
* @param input the input
* @param startpos the start position within the input
* @return the requested character count
*/
private static int determineConsecutiveTextCount(ECIInput input, int startpos) {
final int len = input.length();
int idx = startpos;
while (idx < len) {
int numericCount = 0;
while (numericCount < 13 && idx < len && !input.isECI(idx) && isDigit(input.charAt(idx))) {
numericCount++;
idx++;
}
if (numericCount >= 13) {
return idx - startpos - numericCount;
}
if (numericCount > 0) {
//Heuristic: All text-encodable chars or digits are binary encodable
continue;
}
//Check if character is encodable
if (input.isECI(idx) || !isText(input.charAt(idx))) {
break;
}
idx++;
}
return idx - startpos;
} | 3.68 |
hbase_RegionCoprocessorHost_postCompact | /**
* Called after the store compaction has completed.
* @param store the store being compacted
* @param resultFile the new store file written during compaction
* @param tracker used to track the life cycle of a compaction
* @param request the compaction request
* @param user the user
*/
public void postCompact(final HStore store, final HStoreFile resultFile,
final CompactionLifeCycleTracker tracker, final CompactionRequest request, final User user)
throws IOException {
execOperation(
coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult(user) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postCompact(this, store, resultFile, tracker, request);
}
});
} | 3.68 |
hadoop_RollingFileSystemSink_extractId | /**
* Extract the ID from the suffix of the given file name.
*
* @param file the file name
* @return the ID or -1 if no ID could be extracted
*/
private int extractId(String file) {
int index = file.lastIndexOf(".");
int id = -1;
// A hostname has to have at least 1 character
if (index > 0) {
try {
id = Integer.parseInt(file.substring(index + 1));
} catch (NumberFormatException ex) {
// This can happen if there's no suffix, but there is a dot in the
// hostname. Just ignore it.
}
}
return id;
} | 3.68 |
flink_CollectionUtil_map | /** Returns an immutable {@link Map} from the provided entries. */
@SafeVarargs
public static <K, V> Map<K, V> map(Map.Entry<K, V>... entries) {
if (entries == null) {
return Collections.emptyMap();
}
Map<K, V> map = new HashMap<>();
for (Map.Entry<K, V> entry : entries) {
map.put(entry.getKey(), entry.getValue());
}
return Collections.unmodifiableMap(map);
} | 3.68 |
hbase_MoveWithAck_isSameServer | /**
* Returns true if passed region is still on serverName when we look at hbase:meta.
* @return true if region is hosted on serverName otherwise false
*/
private boolean isSameServer(RegionInfo region, ServerName serverName) throws IOException {
ServerName serverForRegion = getServerNameForRegion(region, admin, conn);
return serverForRegion != null && serverForRegion.equals(serverName);
} | 3.68 |
flink_ThreadInfoSamplesRequest_getNumSamples | /**
* Returns the number of samples that are requested to be collected.
*
* @return the number of requested samples.
*/
public int getNumSamples() {
return numSubSamples;
} | 3.68 |
hmily_HmilyRepositoryNode_getHmilyLockRealPath | /**
* Get hmily lock real path.
*
* @param lockId lock id
* @return hmily lock real path
*/
public String getHmilyLockRealPath(final String lockId) {
return Joiner.on("/").join(getHmilyLockRootPath(), lockId);
} | 3.68 |
framework_BrowserWindowOpener_setFeatures | // Avoid breaking url to multiple lines
// @formatter:off
/**
* Sets the features for opening the window. See e.g.
* {@link https://developer.mozilla.org/en-US/docs/DOM/window.open#Position_and_size_features}
* for a description of the commonly supported features.
*
* @param features a string with window features, or <code>null</code> to use the default features.
*/
// @formatter:on
public void setFeatures(String features) {
getState().features = features;
} | 3.68 |
hadoop_TypedBytesInput_readRawString | /**
* Reads the raw bytes following a <code>Type.STRING</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawString() throws IOException {
int length = in.readInt();
byte[] bytes = new byte[5 + length];
bytes[0] = (byte) Type.STRING.code;
bytes[1] = (byte) (0xff & (length >> 24));
bytes[2] = (byte) (0xff & (length >> 16));
bytes[3] = (byte) (0xff & (length >> 8));
bytes[4] = (byte) (0xff & length);
in.readFully(bytes, 5, length);
return bytes;
} | 3.68 |
flink_HiveParserSqlSumAggFunction_isDistinct | // ~ Methods ----------------------------------------------------------------
@Override
public boolean isDistinct() {
return isDistinct;
} | 3.68 |
hadoop_TimelineDomain_setDescription | /**
* Set the domain description
*
* @param description the domain description
*/
public void setDescription(String description) {
this.description = description;
} | 3.68 |
pulsar_HttpLookupService_getBroker | /**
* Calls http-lookup api to find broker-service address which can serve a given topic.
*
* @param topicName topic-name
* @return broker-socket-address that serves given topic
*/
@Override
@SuppressWarnings("deprecation")
public CompletableFuture<Pair<InetSocketAddress, InetSocketAddress>> getBroker(TopicName topicName) {
String basePath = topicName.isV2() ? BasePathV2 : BasePathV1;
String path = basePath + topicName.getLookupName();
path = StringUtils.isBlank(listenerName) ? path : path + "?listenerName=" + Codec.encode(listenerName);
return httpClient.get(path, LookupData.class)
.thenCompose(lookupData -> {
// Convert LookupData into as SocketAddress, handling exceptions
URI uri = null;
try {
if (useTls) {
uri = new URI(lookupData.getBrokerUrlTls());
} else {
String serviceUrl = lookupData.getBrokerUrl();
if (serviceUrl == null) {
serviceUrl = lookupData.getNativeUrl();
}
uri = new URI(serviceUrl);
}
InetSocketAddress brokerAddress = InetSocketAddress.createUnresolved(uri.getHost(), uri.getPort());
return CompletableFuture.completedFuture(Pair.of(brokerAddress, brokerAddress));
} catch (Exception e) {
// Failed to parse url
log.warn("[{}] Lookup Failed due to invalid url {}, {}", topicName, uri, e.getMessage());
return FutureUtil.failedFuture(e);
}
});
} | 3.68 |
flink_CachingLookupFunction_lookupByDelegate | // -------------------------------- Helper functions ------------------------------
private Collection<RowData> lookupByDelegate(RowData keyRow) throws IOException {
try {
Preconditions.checkState(
delegate != null,
"User's lookup function can't be null, if there are possible cache misses.");
long loadStart = System.currentTimeMillis();
Collection<RowData> lookupValues = delegate.lookup(keyRow);
updateLatestLoadTime(System.currentTimeMillis() - loadStart);
loadCounter.inc();
return lookupValues;
} catch (Exception e) {
// TODO: Should implement retry on failure logic as proposed in FLIP-234
numLoadFailuresCounter.inc();
throw new IOException(String.format("Failed to lookup with key '%s'", keyRow), e);
}
} | 3.68 |
querydsl_GeometryExpressions_polygonOperation | /**
* Create a new Polygon operation expression
*
* @param op operator
* @param args arguments
* @return operation expression
*/
public static PolygonExpression<Polygon> polygonOperation(Operator op, Expression<?>... args) {
return new PolygonOperation<Polygon>(Polygon.class, op, args);
} | 3.68 |
hbase_CompactingMemStore_flushInMemory | // externally visible only for tests
// when invoked directly from tests it must be verified that the caller doesn't hold updatesLock,
// otherwise there is a deadlock
void flushInMemory() {
MutableSegment currActive = getActive();
if (currActive.setInMemoryFlushed()) {
flushInMemory(currActive);
}
inMemoryCompaction();
} | 3.68 |
hadoop_OperationAuditor_noteSpanReferenceLost | /**
* Span reference lost from GC operations.
* This is only called when an attempt is made to retrieve on
* the active thread or when a prune operation is cleaning up.
*
* @param threadId thread ID.
*/
default void noteSpanReferenceLost(long threadId) {
} | 3.68 |
flink_AllocatedSlot_equals | /** This always checks based on reference equality. */
@Override
public final boolean equals(Object obj) {
return this == obj;
} | 3.68 |
hudi_AvroSchemaCompatibility_getReader | /**
* Gets the reader schema that was validated.
*
* @return reader schema that was validated.
*/
public Schema getReader() {
return mReader;
} | 3.68 |
flink_ExecutionTimeBasedSlowTaskDetector_scheduleTask | /** Schedule periodical slow task detection. */
private void scheduleTask(
final ExecutionGraph executionGraph,
final SlowTaskDetectorListener listener,
final ComponentMainThreadExecutor mainThreadExecutor) {
this.scheduledDetectionFuture =
mainThreadExecutor.schedule(
() -> {
try {
listener.notifySlowTasks(findSlowTasks(executionGraph));
} catch (Throwable throwable) {
fatalErrorHandler.onFatalError(throwable);
}
scheduleTask(executionGraph, listener, mainThreadExecutor);
},
checkIntervalMillis,
TimeUnit.MILLISECONDS);
} | 3.68 |
hadoop_LogAggregationWebUtils_verifyAndGetNodeId | /**
* Verify and parse NodeId.
* @param html the html
* @param nodeIdStr the nodeId string
* @return the {@link NodeId}
*/
public static NodeId verifyAndGetNodeId(Block html, String nodeIdStr) {
if (nodeIdStr == null || nodeIdStr.isEmpty()) {
html.h1().__("Cannot get container logs without a NodeId").__();
return null;
}
NodeId nodeId = null;
try {
nodeId = NodeId.fromString(nodeIdStr);
} catch (IllegalArgumentException e) {
html.h1().__("Cannot get container logs. Invalid nodeId: " + nodeIdStr)
.__();
return null;
}
return nodeId;
} | 3.68 |
pulsar_PersistentSubscription_deleteForcefully | /**
* Forcefully close all consumers and deletes the subscription.
*
* @return
*/
@Override
public CompletableFuture<Void> deleteForcefully() {
return delete(true);
} | 3.68 |
flink_HiveParserUtils_extractLateralViewInfo | // extracts useful information for a given lateral view node
public static LateralViewInfo extractLateralViewInfo(
HiveParserASTNode lateralView,
HiveParserRowResolver inputRR,
HiveParserSemanticAnalyzer hiveAnalyzer,
FrameworkConfig frameworkConfig,
RelOptCluster cluster)
throws SemanticException {
// checks the left sub-tree
HiveParserASTNode sel = (HiveParserASTNode) lateralView.getChild(0);
Preconditions.checkArgument(sel.getToken().getType() == HiveASTParser.TOK_SELECT);
Preconditions.checkArgument(sel.getChildCount() == 1);
HiveParserASTNode selExpr = (HiveParserASTNode) sel.getChild(0);
Preconditions.checkArgument(selExpr.getToken().getType() == HiveASTParser.TOK_SELEXPR);
// decide function name and function
HiveParserASTNode func = (HiveParserASTNode) selExpr.getChild(0);
Preconditions.checkArgument(func.getToken().getType() == HiveASTParser.TOK_FUNCTION);
String funcName = getFunctionText(func, true);
SqlOperator sqlOperator =
getSqlOperator(
funcName,
frameworkConfig.getOperatorTable(),
SqlFunctionCategory.USER_DEFINED_TABLE_FUNCTION);
Preconditions.checkArgument(isUDTF(sqlOperator), funcName + " is not a valid UDTF");
// decide operands
List<ExprNodeDesc> operands = new ArrayList<>(func.getChildCount() - 1);
List<ColumnInfo> operandColInfos = new ArrayList<>(func.getChildCount() - 1);
HiveParserTypeCheckCtx typeCheckCtx =
new HiveParserTypeCheckCtx(inputRR, frameworkConfig, cluster);
for (int i = 1; i < func.getChildCount(); i++) {
ExprNodeDesc exprDesc =
hiveAnalyzer.genExprNodeDesc(
(HiveParserASTNode) func.getChild(i), inputRR, typeCheckCtx);
operands.add(exprDesc);
operandColInfos.add(
new ColumnInfo(
getColumnInternalName(i - 1),
exprDesc.getWritableObjectInspector(),
null,
false));
}
// decide table alias -- there must be a table alias
HiveParserASTNode tabAliasNode =
(HiveParserASTNode) selExpr.getChild(selExpr.getChildCount() - 1);
Preconditions.checkArgument(
tabAliasNode.getToken().getType() == HiveASTParser.TOK_TABALIAS);
String tabAlias = unescapeIdentifier(tabAliasNode.getChild(0).getText().toLowerCase());
// decide column aliases -- column aliases are optional
List<String> colAliases = new ArrayList<>();
for (int i = 1; i < selExpr.getChildCount() - 1; i++) {
HiveParserASTNode child = (HiveParserASTNode) selExpr.getChild(i);
Preconditions.checkArgument(child.getToken().getType() == HiveASTParser.Identifier);
colAliases.add(unescapeIdentifier(child.getText().toLowerCase()));
}
return new LateralViewInfo(
funcName, sqlOperator, operands, operandColInfos, colAliases, tabAlias);
} | 3.68 |
hbase_ZkSplitLogWorkerCoordination_submitTask | /**
* Submit a log split task to executor service
* @param curTask task to submit
* @param curTaskZKVersion current version of task
*/
void submitTask(final String curTask, final int curTaskZKVersion, final int reportPeriod) {
final MutableInt zkVersion = new MutableInt(curTaskZKVersion);
CancelableProgressable reporter = new CancelableProgressable() {
private long last_report_at = 0;
@Override
public boolean progress() {
long t = EnvironmentEdgeManager.currentTime();
if ((t - last_report_at) > reportPeriod) {
last_report_at = t;
int latestZKVersion =
attemptToOwnTask(false, watcher, server.getServerName(), curTask, zkVersion.intValue());
if (latestZKVersion < 0) {
LOG.warn("Failed to heartbeat the task" + curTask);
return false;
}
zkVersion.setValue(latestZKVersion);
}
return true;
}
};
ZkSplitLogWorkerCoordination.ZkSplitTaskDetails splitTaskDetails =
new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails();
splitTaskDetails.setTaskNode(curTask);
splitTaskDetails.setCurTaskZKVersion(zkVersion);
WALSplitterHandler hsh = new WALSplitterHandler(server, this, splitTaskDetails, reporter,
this.tasksInProgress, splitTaskExecutor);
server.getExecutorService().submit(hsh);
} | 3.68 |
hbase_HRegion_getLongValue | /** Returns Get the long out of the passed in Cell */
private static long getLongValue(final Cell cell) throws DoNotRetryIOException {
int len = cell.getValueLength();
if (len != Bytes.SIZEOF_LONG) {
// throw DoNotRetryIOException instead of IllegalArgumentException
throw new DoNotRetryIOException("Field is not a long, it's " + len + " bytes wide");
}
return PrivateCellUtil.getValueAsLong(cell);
} | 3.68 |
hbase_NamespaceStateManager_getState | /**
* Gets an instance of NamespaceTableAndRegionInfo associated with namespace.
* @param name The name of the namespace
* @return An instance of NamespaceTableAndRegionInfo.
*/
public NamespaceTableAndRegionInfo getState(String name) {
return nsStateCache.get(name);
} | 3.68 |
framework_VUnknownComponent_setCaption | /**
* Sets the content text for this placeholder. Can contain HTML.
*
* @param c
* the content text to set
*/
public void setCaption(String c) {
caption.getElement().setInnerHTML(c);
} | 3.68 |
framework_Table_removeHeaderClickListener | /**
* Removes a header click listener.
*
* @param listener
* The listener to remove.
*/
public void removeHeaderClickListener(HeaderClickListener listener) {
removeListener(TableConstants.HEADER_CLICK_EVENT_ID,
HeaderClickEvent.class, listener);
} | 3.68 |
framework_VAbstractCalendarPanel_setFocusChangeListener | /**
* The given FocusChangeListener is notified when the focused date changes
* by user either clicking on a new date or by using the keyboard.
*
* @param listener
* The FocusChangeListener to be notified
*/
public void setFocusChangeListener(FocusChangeListener listener) {
focusChangeListener = listener;
} | 3.68 |
druid_SQLASTVisitor_visit | /**
* support procedure
*/
default boolean visit(SQLWhileStatement x) {
return true;
} | 3.68 |
flink_GenericDataSourceBase_setSplitDataProperties | /**
* Sets properties of input splits for this data source. Split properties can help to generate
* more efficient execution plans. <br>
* <b> IMPORTANT: Providing wrong split data properties can cause wrong results! </b>
*
* @param splitDataProperties The data properties of this data source's splits.
*/
public void setSplitDataProperties(SplitDataProperties<OUT> splitDataProperties) {
this.splitProperties = splitDataProperties;
} | 3.68 |
hbase_FixedIntervalRateLimiter_setNextRefillTime | // This method is for strictly testing purpose only
@Override
public void setNextRefillTime(long nextRefillTime) {
this.nextRefillTime = nextRefillTime;
} | 3.68 |
hbase_Tag_getValueAsByte | /**
* Converts the value bytes of the given tag into a byte value
* @param tag The Tag
* @return value as byte
*/
public static byte getValueAsByte(Tag tag) {
if (tag.hasArray()) {
return tag.getValueArray()[tag.getValueOffset()];
}
return ByteBufferUtils.toByte(tag.getValueByteBuffer(), tag.getValueOffset());
} | 3.68 |
framework_DropTargetExtensionConnector_removeDragOverStyle | /**
* Remove the drag over indicator class name from the target element.
* <p>
* This is triggered on {@link #onDrop(Event) drop},
* {@link #onDragLeave(Event) dragleave} and {@link #onDragOver(Event)
* dragover} events pending on whether the drop has happened or if it is not
* possible. The drop is not possible if the drop effect for the source and
* target don't match or if there is a drop criteria script that evaluates
* to false.
*
* @param event
* the event that triggered the removal of the indicator
*/
protected void removeDragOverStyle(NativeEvent event) {
getDropTargetElement().removeClassName(styleDragCenter);
} | 3.68 |
flink_ExecutionConfig_setLatencyTrackingInterval | /**
* Interval for sending latency tracking marks from the sources to the sinks. Flink will send
* latency tracking marks from the sources at the specified interval.
*
* <p>Setting a tracking interval <= 0 disables the latency tracking.
*
* @param interval Interval in milliseconds.
*/
@PublicEvolving
public ExecutionConfig setLatencyTrackingInterval(long interval) {
configuration.set(MetricOptions.LATENCY_INTERVAL, interval);
return this;
} | 3.68 |
hbase_MutableRegionInfo_getEndKey | /** Returns the endKey */
@Override
public byte[] getEndKey() {
return endKey;
} | 3.68 |
hadoop_RouterQuotaManager_isQuotaSet | /**
* Check if the quota was set.
* @param quota the quota usage.
* @return True if the quota is set.
*/
public static boolean isQuotaSet(QuotaUsage quota) {
if (quota != null) {
long nsQuota = quota.getQuota();
long ssQuota = quota.getSpaceQuota();
// once nsQuota or ssQuota was set, this mount table is quota set
if (nsQuota != HdfsConstants.QUOTA_RESET
|| ssQuota != HdfsConstants.QUOTA_RESET || Quota.orByStorageType(
t -> quota.getTypeQuota(t) != HdfsConstants.QUOTA_RESET)) {
return true;
}
}
return false;
} | 3.68 |
flink_ThroughputCalculator_resumeMeasurement | /** Mark when the time should be included to the throughput calculation. */
public void resumeMeasurement() {
if (measurementStartTime == NOT_TRACKED) {
measurementStartTime = clock.absoluteTimeMillis();
}
} | 3.68 |
hadoop_RequestFactoryImpl_uploadPartEncryptionParameters | /**
* Sets server side encryption parameters to the part upload
* request when encryption is enabled.
* @param builder upload part request builder
*/
protected void uploadPartEncryptionParameters(
UploadPartRequest.Builder builder) {
// need to set key to get objects encrypted with SSE_C
EncryptionSecretOperations.getSSECustomerKey(encryptionSecrets).ifPresent(base64customerKey -> {
builder.sseCustomerAlgorithm(ServerSideEncryption.AES256.name())
.sseCustomerKey(base64customerKey)
.sseCustomerKeyMD5(Md5Utils.md5AsBase64(Base64.getDecoder().decode(base64customerKey)));
});
} | 3.68 |
zxing_ITFReader_skipWhiteSpace | /**
* Skip all whitespace until we get to the first black line.
*
* @param row row of black/white values to search
* @return index of the first black line.
* @throws NotFoundException Throws exception if no black lines are found in the row
*/
private static int skipWhiteSpace(BitArray row) throws NotFoundException {
int width = row.getSize();
int endStart = row.getNextSet(0);
if (endStart == width) {
throw NotFoundException.getNotFoundInstance();
}
return endStart;
} | 3.68 |
hadoop_Cluster_getJobTrackerStatus | /**
* Get the JobTracker's status.
*
* @return {@link JobTrackerStatus} of the JobTracker
* @throws IOException
* @throws InterruptedException
*/
public JobTrackerStatus getJobTrackerStatus() throws IOException,
InterruptedException {
return client.getJobTrackerStatus();
} | 3.68 |
hadoop_FilterFileSystem_completeLocalOutput | /**
* Called when we're all done writing to the target. A local FS will
* do nothing, because we've written to exactly the right place. A remote
* FS will copy the contents of tmpLocalFile to the correct target at
* fsOutputFile.
*/
@Override
public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
fs.completeLocalOutput(fsOutputFile, tmpLocalFile);
} | 3.68 |
zxing_MinimalEncoder_encodeHighLevel | /**
* Performs message encoding of a DataMatrix message
*
* @param msg the message
* @param priorityCharset The preferred {@link Charset}. When the value of the argument is null, the algorithm
* chooses charsets that leads to a minimal representation. Otherwise the algorithm will use the priority
* charset to encode any character in the input that can be encoded by it if the charset is among the
* supported charsets.
* @param fnc1 denotes the character in the input that represents the FNC1 character or -1 if this is not a GS1
* bar code. If the value is not -1 then a FNC1 is also prepended.
* @param shape requested shape.
* @return the encoded message (the char values range from 0 to 255)
*/
public static String encodeHighLevel(String msg, Charset priorityCharset, int fnc1, SymbolShapeHint shape) {
int macroId = 0;
if (msg.startsWith(HighLevelEncoder.MACRO_05_HEADER) && msg.endsWith(HighLevelEncoder.MACRO_TRAILER)) {
macroId = 5;
msg = msg.substring(HighLevelEncoder.MACRO_05_HEADER.length(), msg.length() - 2);
} else if (msg.startsWith(HighLevelEncoder.MACRO_06_HEADER) && msg.endsWith(HighLevelEncoder.MACRO_TRAILER)) {
macroId = 6;
msg = msg.substring(HighLevelEncoder.MACRO_06_HEADER.length(), msg.length() - 2);
}
return new String(encode(msg, priorityCharset, fnc1, shape, macroId), StandardCharsets.ISO_8859_1);
} | 3.68 |
morf_MySqlDialect_buildPrimaryKeyConstraint | /**
* CONSTRAINT TABLENAME_PK PRIMARY KEY (`X`, `Y`, `Z`)
*/
private String buildPrimaryKeyConstraint(String tableName, List<String> primaryKeyColumns) {
return new StringBuilder()
.append("CONSTRAINT `")
.append(tableName)
.append("_PK` ")
.append("PRIMARY KEY (`")
.append(Joiner.on("`, `").join(primaryKeyColumns))
.append("`)").toString();
} | 3.68 |
hbase_SyncReplicationReplayWALRemoteProcedure_truncateWALs | /**
* Only truncate wals one by one when task succeed. The parent procedure will check the first wal
* length to know whether this task succeed.
*/
private void truncateWALs(MasterProcedureEnv env) {
String firstWal = wals.get(0);
try {
env.getMasterServices().getSyncReplicationReplayWALManager().finishReplayWAL(firstWal);
} catch (IOException e) {
// As it is idempotent to rerun this task. Just ignore this exception and return.
LOG.warn("Failed to truncate wal {} for peer id={}", firstWal, peerId, e);
return;
}
for (int i = 1; i < wals.size(); i++) {
String wal = wals.get(i);
try {
env.getMasterServices().getSyncReplicationReplayWALManager().finishReplayWAL(wal);
} catch (IOException e1) {
try {
// retry
env.getMasterServices().getSyncReplicationReplayWALManager().finishReplayWAL(wal);
} catch (IOException e2) {
// As the parent procedure only check the first wal length. Just ignore this exception.
LOG.warn("Failed to truncate wal {} for peer id={}", wal, peerId, e2);
}
}
}
} | 3.68 |
hadoop_UriUtils_extractAccountNameFromHostName | /**
* Extracts the account name from the host name.
* @param hostName the fully-qualified domain name of the storage service
* endpoint (e.g. {account}.dfs.core.windows.net.
* @return the storage service account name.
*/
public static String extractAccountNameFromHostName(final String hostName) {
if (hostName == null || hostName.isEmpty()) {
return null;
}
if (!containsAbfsUrl(hostName)) {
return null;
}
String[] splitByDot = hostName.split("\\.");
if (splitByDot.length == 0) {
return null;
}
return splitByDot[0];
} | 3.68 |
flink_Configuration_getDouble | /**
* Returns the value associated with the given config option as a {@code double}. If no value is
* mapped under any key of the option, it returns the specified default instead of the option's
* default value.
*
* @param configOption The configuration option
* @param overrideDefault The value to return if no value was mapper for any key of the option
* @return the configured value associated with the given config option, or the overrideDefault
*/
@PublicEvolving
public double getDouble(ConfigOption<Double> configOption, double overrideDefault) {
return getOptional(configOption).orElse(overrideDefault);
} | 3.68 |
hbase_ChaosAgent_setStatusOfTaskZNode | /**
* sets given Status for Task Znode
* @param taskZNode ZNode to set status
* @param status Status value
*/
public void setStatusOfTaskZNode(String taskZNode, String status) {
LOG.info("Setting status of Task ZNode: " + taskZNode + " status : " + status);
zk.setData(taskZNode, status.getBytes(StandardCharsets.UTF_8), -1, setStatusOfTaskZNodeCallback,
null);
} | 3.68 |
hbase_FavoredNodesManager_isFavoredNodeApplicable | /**
* Favored nodes are not applicable for system tables. We will use this to check before we apply
* any favored nodes logic on a region.
*/
public static boolean isFavoredNodeApplicable(RegionInfo regionInfo) {
return !regionInfo.getTable().isSystemTable();
} | 3.68 |
flink_AbstractID_byteArrayToLong | /**
* Converts the given byte array to a long.
*
* @param ba the byte array to be converted
* @param offset the offset indicating at which byte inside the array the conversion shall begin
* @return the long variable
*/
private static long byteArrayToLong(byte[] ba, int offset) {
long l = 0;
for (int i = 0; i < SIZE_OF_LONG; ++i) {
l |= (ba[offset + SIZE_OF_LONG - 1 - i] & 0xffL) << (i << 3);
}
return l;
} | 3.68 |
flink_TableSinkFactory_createTableSink | /**
* Creates and configures a {@link TableSink} based on the given {@link Context}.
*
* @param context context of this table sink.
* @return the configured table sink.
*/
default TableSink<T> createTableSink(Context context) {
return createTableSink(context.getObjectIdentifier().toObjectPath(), context.getTable());
} | 3.68 |
flink_CheckpointConfig_toConfiguration | /**
* @return A copy of internal {@link #configuration}. Note it is missing all options that are
* stored as plain java fields in {@link CheckpointConfig}, for example {@link #storage}.
*/
@Internal
public Configuration toConfiguration() {
return new Configuration(configuration);
} | 3.68 |
flink_DataSet_mapPartition | /**
* Applies a Map-style operation to the entire partition of the data. The function is called
* once per parallel partition of the data, and the entire partition is available through the
* given Iterator. The number of elements that each instance of the MapPartition function sees
* is non deterministic and depends on the parallelism of the operation.
*
* <p>This function is intended for operations that cannot transform individual elements,
* requires no grouping of elements. To transform individual elements, the use of {@code map()}
* and {@code flatMap()} is preferable.
*
* @param mapPartition The MapPartitionFunction that is called for the full DataSet.
* @return A MapPartitionOperator that represents the transformed DataSet.
* @see MapPartitionFunction
* @see MapPartitionOperator
*/
public <R> MapPartitionOperator<T, R> mapPartition(MapPartitionFunction<T, R> mapPartition) {
if (mapPartition == null) {
throw new NullPointerException("MapPartition function must not be null.");
}
String callLocation = Utils.getCallLocationName();
TypeInformation<R> resultType =
TypeExtractor.getMapPartitionReturnTypes(
mapPartition, getType(), callLocation, true);
return new MapPartitionOperator<>(this, resultType, clean(mapPartition), callLocation);
} | 3.68 |
morf_SqlDateUtils_castAsDateReplaceValueIfNullOrZero | /**
* Returns the replacement value if the evaluating expression is null or zero,
* otherwise returns the value casted as date.
*
* @param expression the expression to evaluate
* @param replace the replacement value
* @return expression or replacement value casted as date
*/
public static AliasedField castAsDateReplaceValueIfNullOrZero(AliasedField expression, AliasedField replace) {
return castAsDateCaseStatement(expression, or(isNull(expression), expression.eq(0)), replace);
} | 3.68 |
rocketmq-connect_JsonConverter_fromConnectData | /**
* Convert a rocketmq Connect data object to a native object for serialization.
*
* @param topic the topic associated with the data
* @param schema the schema for the value
* @param value the value to convert
* @return the serialized value
*/
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
if (schema == null && value == null) {
return null;
}
Object jsonValue = converterConfig.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value);
try {
return serializer.serialize(topic, jsonValue);
} catch (Exception e) {
throw new ConnectException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e);
}
} | 3.68 |
graphhopper_BBox_intersects | /**
* This method calculates if this BBox intersects with the specified BBox
*/
public boolean intersects(BBox o) {
// return (o.minLon < minLon && o.maxLon > minLon || o.minLon < maxLon && o.minLon >= minLon)
// && (o.maxLat < maxLat && o.maxLat >= minLat || o.maxLat >= maxLat && o.minLat < maxLat);
return this.minLon < o.maxLon && this.minLat < o.maxLat && o.minLon < this.maxLon && o.minLat < this.maxLat;
} | 3.68 |
hbase_DynamicMetricsRegistry_info | /** Returns the info object of the metrics registry */
public MetricsInfo info() {
return metricsInfo;
} | 3.68 |
flink_VertexThreadInfoTrackerBuilder_setExecutionVertexStatsCache | /**
* Sets {@code executionVertexStatsCache}. This is currently only used for testing.
*
* @param executionVertexStatsCache The Cache instance to use for caching statistics. Will use
* the default defined in {@link VertexThreadInfoTrackerBuilder#defaultCache()} if not set.
* @return Builder.
*/
@VisibleForTesting
VertexThreadInfoTrackerBuilder setExecutionVertexStatsCache(
Cache<VertexThreadInfoTracker.ExecutionVertexKey, VertexThreadInfoStats>
executionVertexStatsCache) {
this.executionVertexStatsCache = executionVertexStatsCache;
return this;
} | 3.68 |
hadoop_BaseNMTokenSecretManager_createIdentifier | /**
* It is required for RPC
*/
@Override
public NMTokenIdentifier createIdentifier() {
return new NMTokenIdentifier();
} | 3.68 |
hadoop_LeveldbIterator_seek | /**
* Repositions the iterator so the key of the next BlockElement
* returned greater than or equal to the specified targetKey.
*
* @param key key of the next BlockElement.
* @throws DBException db Exception.
*/
public void seek(byte[] key) throws DBException {
try {
iter.seek(key);
} catch (DBException e) {
throw e;
} catch (RuntimeException e) {
throw new DBException(e.getMessage(), e);
}
} | 3.68 |
hadoop_FsCreateModes_create | /**
* Create from masked and unmasked modes.
*
* @param masked masked.
* @param unmasked unmasked.
* @return FsCreateModes.
*/
public static FsCreateModes create(FsPermission masked,
FsPermission unmasked) {
assert masked.getUnmasked() == null;
assert unmasked.getUnmasked() == null;
return new FsCreateModes(masked, unmasked);
} | 3.68 |
dubbo_IOUtils_appendLines | /**
* append lines.
*
* @param file file.
* @param lines lines.
* @throws IOException If an I/O error occurs
*/
public static void appendLines(File file, String[] lines) throws IOException {
if (file == null) {
throw new IOException("File is null.");
}
writeLines(new FileOutputStream(file, true), lines);
} | 3.68 |
flink_BlobServerConnection_close | /** Closes the connection socket and lets the thread exit. */
public void close() {
closeSilently(clientSocket, LOG);
interrupt();
} | 3.68 |
hadoop_BlockStorageMovementAttemptedItems_notifyReportedBlock | /**
* Notify the storage movement attempt finished block.
*
* @param reportedDn
* reported datanode
* @param type
* storage type
* @param reportedBlock
* reported block
*/
public void notifyReportedBlock(DatanodeInfo reportedDn, StorageType type,
Block reportedBlock) {
synchronized (scheduledBlkLocs) {
if (scheduledBlkLocs.size() <= 0) {
return;
}
matchesReportedBlock(reportedDn, type, reportedBlock);
}
} | 3.68 |
hbase_MasterObserver_preListNamespaceDescriptors | /**
* Called before a listNamespaceDescriptors request has been processed.
* @param ctx the environment to interact with the framework and master
* @param descriptors an empty list, can be filled with what to return by coprocessor
*/
default void preListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors) throws IOException {
} | 3.68 |
morf_TableSetSchema_viewNames | /**
* @see org.alfasoftware.morf.metadata.Schema#viewNames()
*/
@Override
public Collection<String> viewNames() {
return Collections.emptySet();
} | 3.68 |
hadoop_StoragePolicySatisfyManager_start | /**
* This function will do following logic based on the configured sps mode:
*
* <p>
* If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then
* it won't do anything. Administrator requires to start external sps service
* explicitly.
*
* <p>
* If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then the
* service is disabled and won't do any action.
*/
public void start() {
if (!storagePolicyEnabled) {
LOG.info("Disabling StoragePolicySatisfier service as {} set to {}.",
DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, storagePolicyEnabled);
return;
}
switch (mode) {
case EXTERNAL:
LOG.info("Storage policy satisfier is configured as external, "
+ "please start external sps service explicitly to satisfy policy");
break;
case NONE:
LOG.info("Storage policy satisfier is disabled");
break;
default:
LOG.info("Given mode: {} is invalid", mode);
break;
}
} | 3.68 |
zxing_CalendarParsedResult_parseDate | /**
* Parses a string as a date. RFC 2445 allows the start and end fields to be of type DATE (e.g. 20081021)
* or DATE-TIME (e.g. 20081021T123000 for local time, or 20081021T123000Z for UTC).
*
* @param when The string to parse
* @throws ParseException if not able to parse as a date
*/
private static long parseDate(String when) throws ParseException {
if (!DATE_TIME.matcher(when).matches()) {
throw new ParseException(when, 0);
}
if (when.length() == 8) {
// Show only year/month/day
DateFormat format = new SimpleDateFormat("yyyyMMdd", Locale.ENGLISH);
// For dates without a time, for purposes of interacting with Android, the resulting timestamp
// needs to be midnight of that day in GMT. See:
// http://code.google.com/p/android/issues/detail?id=8330
format.setTimeZone(TimeZone.getTimeZone("GMT"));
return format.parse(when).getTime();
}
// The when string can be local time, or UTC if it ends with a Z
if (when.length() == 16 && when.charAt(15) == 'Z') {
long milliseconds = parseDateTimeString(when.substring(0, 15));
Calendar calendar = new GregorianCalendar();
// Account for time zone difference
milliseconds += calendar.get(Calendar.ZONE_OFFSET);
// Might need to correct for daylight savings time, but use target time since
// now might be in DST but not then, or vice versa
calendar.setTime(new Date(milliseconds));
return milliseconds + calendar.get(Calendar.DST_OFFSET);
}
return parseDateTimeString(when);
} | 3.68 |
framework_View_beforeLeave | /**
* Called when the user is requesting navigation away from the view.
* <p>
* This method allows the view to accept or prevent navigation away from the
* view or optionally delay navigation away until a later stage. For
* navigation to take place, the {@link ViewBeforeLeaveEvent#navigate()}
* method must be called either directly when handling this event or later
* to perform delayed navigation.
* <p>
* The default implementation calls {@link ViewBeforeLeaveEvent#navigate()}
* directly. If you override this and do nothing, the user will never be
* able to leave the view.
* <p>
* This method is triggered before any methods in any added
* {@link ViewChangeListener ViewChangeListeners}. Whenever you call
* {@link ViewBeforeLeaveEvent#navigate()}, any {@link ViewChangeListener}s
* will be triggered. They will be handled normally and might also prevent
* navigation.
*
* @since 8.1
* @param event
* an event object providing information about the event and
* containing the {@link ViewBeforeLeaveEvent#navigate()} method
* needed to perform navigation
*/
public default void beforeLeave(ViewBeforeLeaveEvent event) {
event.navigate();
} | 3.68 |
hbase_CompactingMemStore_getNextRow | /**
* @param cell Find the row that comes after this one. If null, we return the first.
* @return Next row or null if none found.
*/
Cell getNextRow(final Cell cell) {
Cell lowest = null;
List<Segment> segments = getSegments();
for (Segment segment : segments) {
if (lowest == null) {
lowest = getNextRow(cell, segment.getCellSet());
} else {
lowest = getLowest(lowest, getNextRow(cell, segment.getCellSet()));
}
}
return lowest;
} | 3.68 |
flink_Transformation_getManagedMemoryOperatorScopeUseCaseWeights | /**
* Get operator scope use cases that this transformation needs managed memory for, and the
* use-case-specific weights for this transformation. The weights are used for sharing managed
* memory across transformations for the use cases. Check the individual {@link
* ManagedMemoryUseCase} for the specific weight definition.
*/
public Map<ManagedMemoryUseCase, Integer> getManagedMemoryOperatorScopeUseCaseWeights() {
return Collections.unmodifiableMap(managedMemoryOperatorScopeUseCaseWeights);
} | 3.68 |
flink_SourceEventWrapper_getSourceEvent | /** @return The {@link SourceEvent} in this SourceEventWrapper. */
public SourceEvent getSourceEvent() {
return sourceEvent;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.