name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_Criterion_setOperator
/** * Sets the comparison operator. * * @param operator * comparison operator */ public void setOperator(ComparisonOperator operator) { this.operator = operator; }
3.68
hbase_CommonFSUtils_getDefaultBlockSize
/** * Return the number of bytes that large input files should be optimally be split into to minimize * i/o time. * @param fs filesystem object * @return the default block size for the path's filesystem */ public static long getDefaultBlockSize(final FileSystem fs, final Path path) { return fs.getDefaultBlockSize(path); }
3.68
flink_BlobWriter_serializeAndTryOffload
/** * Serializes the given value and offloads it to the BlobServer if its size exceeds the minimum * offloading size of the BlobServer. * * @param value to serialize * @param jobId to which the value belongs. * @param blobWriter to use to offload the serialized value * @param <T> type of the value to serialize * @return Either the serialized value or the stored blob key * @throws IOException if the data cannot be serialized */ static <T> Either<SerializedValue<T>, PermanentBlobKey> serializeAndTryOffload( T value, JobID jobId, BlobWriter blobWriter) throws IOException { Preconditions.checkNotNull(value); final SerializedValue<T> serializedValue = new SerializedValue<>(value); return tryOffload(serializedValue, jobId, blobWriter); }
3.68
hbase_WALSplitUtil_finishSplitLogFile
/** * Completes the work done by splitLogFile by archiving logs * <p> * It is invoked by SplitLogManager once it knows that one of the SplitLogWorkers have completed * the splitLogFile() part. If the master crashes then this function might get called multiple * times. * <p> */ public static void finishSplitLogFile(String logfile, Configuration conf) throws IOException { Path walDir = CommonFSUtils.getWALRootDir(conf); Path oldLogDir = new Path(walDir, HConstants.HREGION_OLDLOGDIR_NAME); Path walPath; if (CommonFSUtils.isStartingWithPath(walDir, logfile)) { walPath = new Path(logfile); } else { walPath = new Path(walDir, logfile); } FileSystem walFS = walDir.getFileSystem(conf); boolean corrupt = ZKSplitLog.isCorrupted(walDir, walPath.getName(), walFS); archive(walPath, corrupt, oldLogDir, walFS, conf); Path stagingDir = ZKSplitLog.getSplitLogDir(walDir, walPath.getName()); walFS.delete(stagingDir, true); }
3.68
pulsar_ManagedLedgerConfig_getMaximumRolloverTimeMs
/** * @return the maximum rollover time. */ public long getMaximumRolloverTimeMs() { return maximumRolloverTimeMs; }
3.68
querydsl_ProjectableSQLQuery_getSQL
/** * Get the query as an SQL query string and bindings * * @return SQL string and bindings */ public SQLBindings getSQL() { return getSQL(serialize(false)); }
3.68
zxing_Result_getResultMetadata
/** * @return {@link Map} mapping {@link ResultMetadataType} keys to values. May be * {@code null}. This contains optional metadata about what was detected about the barcode, * like orientation. */ public Map<ResultMetadataType,Object> getResultMetadata() { return resultMetadata; }
3.68
hbase_CacheConfig_shouldCacheDataCompressed
/** Returns true if data blocks should be compressed in the cache, false if not */ public boolean shouldCacheDataCompressed() { return this.cacheDataOnRead && this.cacheDataCompressed; }
3.68
hbase_StoreFileInfo_validateStoreFileName
/** * Validate the store file name. * @param fileName name of the file to validate * @return <tt>true</tt> if the file could be a valid store file, <tt>false</tt> otherwise */ public static boolean validateStoreFileName(final String fileName) { if (HFileLink.isHFileLink(fileName) || isReference(fileName)) { return true; } return !fileName.contains("-"); }
3.68
flink_ArrowWriter_reset
/** Resets the state of the writer to write the next batch of rows. */ public void reset() { root.setRowCount(0); for (ArrowFieldWriter fieldWriter : fieldWriters) { fieldWriter.reset(); } }
3.68
hadoop_SampleQuantiles_insert
/** * Add a new value from the stream. * * @param v v. */ synchronized public void insert(long v) { buffer[bufferCount] = v; bufferCount++; count++; if (bufferCount == buffer.length) { insertBatch(); compress(); } }
3.68
pulsar_PulsarClient_create
/** * Create a new PulsarClient object. * * @param serviceUrl * the url of the Pulsar endpoint to be used * @param conf * the client configuration * @return a new pulsar client object * @throws PulsarClientException.InvalidServiceURL * if the serviceUrl is invalid * @deprecated use {@link #builder()} to construct a client instance */ @Deprecated static PulsarClient create(String serviceUrl, ClientConfiguration conf) throws PulsarClientException { return new PulsarClientV1Impl(serviceUrl, conf); }
3.68
hbase_SnapshotInfo_getSnapshotList
/** * Returns the list of available snapshots in the specified location * @param conf the {@link Configuration} to use * @return the list of snapshots */ public static List<SnapshotDescription> getSnapshotList(final Configuration conf) throws IOException { Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = FileSystem.get(rootDir.toUri(), conf); Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir); FileStatus[] snapshots = fs.listStatus(snapshotDir, new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); List<SnapshotDescription> snapshotLists = new ArrayList<>(snapshots.length); for (FileStatus snapshotDirStat : snapshots) { SnapshotProtos.SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath()); snapshotLists.add(ProtobufUtil.createSnapshotDesc(snapshotDesc)); } return snapshotLists; }
3.68
hbase_LocalHBaseCluster_getActiveMaster
/** * Gets the current active master, if available. If no active master, returns null. * @return the HMaster for the active master */ public HMaster getActiveMaster() { for (JVMClusterUtil.MasterThread mt : masterThreads) { // Ensure that the current active master is not stopped. // We don't want to return a stopping master as an active master. if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) { return mt.getMaster(); } } return null; }
3.68
framework_MarginInfo_hasAll
/** * Checks if this MarginInfo object has margins on all edges enabled. * * @since 7.5.0 * * @return true if all edges have margins enabled */ public boolean hasAll() { return (bitMask & ALL) == ALL; }
3.68
hadoop_ResourceRequestSet_setNumContainers
/** * Force set the # of containers to ask for this requestSet to a given value. * * @param newValue the new # of containers value * @throws YarnException indicates exceptions from yarn servers. */ public void setNumContainers(int newValue) throws YarnException { if (this.numContainers == 0) { throw new YarnException("should not set numContainers to " + newValue + " for a cancel requestSet: " + toString()); } // Clone the ResourceRequest object whenever we need to change it int oldValue = this.numContainers; this.numContainers = newValue; if (this.key.getExeType().equals(ExecutionType.OPPORTUNISTIC)) { // The assumption we made about O asks is that all RR in a requestSet has // the same numContainers value Map<String, ResourceRequest> newAsks = new HashMap<>(); for (ResourceRequest rr : this.asks.values()) { ResourceRequest clone = ResourceRequest.clone(rr); clone.setNumContainers(newValue); newAsks.put(clone.getResourceName(), clone); } this.asks = newAsks; } else { ResourceRequest rr = this.asks.get(ResourceRequest.ANY); if (rr == null) { throw new YarnException( "No ANY RR found in requestSet with numContainers=" + oldValue); } ResourceRequest clone = ResourceRequest.clone(rr); clone.setNumContainers(newValue); this.asks.put(ResourceRequest.ANY, clone); } }
3.68
hbase_BlockingRpcConnection_readResponse
/* * Receive a response. Because only one receiver, so no synchronization on in. */ private void readResponse() { Call call = null; boolean expectedCall = false; try { // See HBaseServer.Call.setResponse for where we write out the response. // Total size of the response. Unused. But have to read it in anyways. int totalSize = in.readInt(); // Read the header ResponseHeader responseHeader = ResponseHeader.parseDelimitedFrom(in); int id = responseHeader.getCallId(); call = calls.remove(id); // call.done have to be set before leaving this method expectedCall = (call != null && !call.isDone()); if (!expectedCall) { // So we got a response for which we have no corresponding 'call' here on the client-side. // We probably timed out waiting, cleaned up all references, and now the server decides // to return a response. There is nothing we can do w/ the response at this stage. Clean // out the wire of the response so its out of the way and we can get other responses on // this connection. int readSoFar = getTotalSizeWhenWrittenDelimited(responseHeader); int whatIsLeftToRead = totalSize - readSoFar; IOUtils.skipFully(in, whatIsLeftToRead); if (call != null) { call.callStats.setResponseSizeBytes(totalSize); call.callStats .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); } return; } if (responseHeader.hasException()) { ExceptionResponse exceptionResponse = responseHeader.getException(); RemoteException re = createRemoteException(exceptionResponse); call.setException(re); call.callStats.setResponseSizeBytes(totalSize); call.callStats .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); if (isFatalConnectionException(exceptionResponse)) { synchronized (this) { closeConn(re); } } } else { Message value = null; if (call.responseDefaultType != null) { Message.Builder builder = call.responseDefaultType.newBuilderForType(); ProtobufUtil.mergeDelimitedFrom(builder, in); value = builder.build(); } CellScanner cellBlockScanner = null; if (responseHeader.hasCellBlockMeta()) { int size = responseHeader.getCellBlockMeta().getLength(); byte[] cellBlock = new byte[size]; IOUtils.readFully(this.in, cellBlock, 0, cellBlock.length); cellBlockScanner = this.rpcClient.cellBlockBuilder.createCellScanner(this.codec, this.compressor, cellBlock); } call.setResponse(value, cellBlockScanner); call.callStats.setResponseSizeBytes(totalSize); call.callStats .setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.callStats.getStartTime()); } } catch (IOException e) { if (expectedCall) { call.setException(e); } if (e instanceof SocketTimeoutException) { // Clean up open calls but don't treat this as a fatal condition, // since we expect certain responses to not make it by the specified // {@link ConnectionId#rpcTimeout}. if (LOG.isTraceEnabled()) { LOG.trace("ignored", e); } } else { synchronized (this) { closeConn(e); } } } }
3.68
hbase_ColumnSchemaModel___setTTL
/** * @param value the desired value of the TTL attribute */ public void __setTTL(int value) { attrs.put(TTL, Integer.toString(value)); }
3.68
hibernate-validator_AnnotationDescriptor_run
/** * Runs the given privileged action, using a privileged block if required. * <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary * privileged actions within HV's protection domain. */ @IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17") private static <V> V run(PrivilegedAction<V> action) { return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run(); }
3.68
hbase_LocalHBaseCluster_isLocal
/** * @param c Configuration to check. * @return True if a 'local' address in hbase.master value. */ public static boolean isLocal(final Configuration c) { boolean mode = c.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED); return (mode == HConstants.CLUSTER_IS_LOCAL); }
3.68
flink_HeartbeatServices_fromConfiguration
/** * Creates an HeartbeatServices instance from a {@link Configuration}. * * @param configuration Configuration to be used for the HeartbeatServices creation * @return An HeartbeatServices instance created from the given configuration */ static HeartbeatServices fromConfiguration(Configuration configuration) { long heartbeatInterval = configuration.getLong(HeartbeatManagerOptions.HEARTBEAT_INTERVAL); long heartbeatTimeout = configuration.getLong(HeartbeatManagerOptions.HEARTBEAT_TIMEOUT); int failedRpcRequestsUntilUnreachable = configuration.get(HeartbeatManagerOptions.HEARTBEAT_RPC_FAILURE_THRESHOLD); return new HeartbeatServicesImpl( heartbeatInterval, heartbeatTimeout, failedRpcRequestsUntilUnreachable); }
3.68
pulsar_TxnMetaImpl_updateTxnStatus
/** * Update the transaction stats from the <tt>newStatus</tt> only when * the current status is the expected <tt>expectedStatus</tt>. * * @param newStatus the new transaction status * @param expectedStatus the expected transaction status * @return the transaction itself. * @throws InvalidTxnStatusException */ @Override public synchronized TxnMetaImpl updateTxnStatus(TxnStatus newStatus, TxnStatus expectedStatus) throws InvalidTxnStatusException { checkTxnStatus(expectedStatus); if (!TransactionUtil.canTransitionTo(txnStatus, newStatus)) { throw new InvalidTxnStatusException( "Transaction `" + txnID + "` CANNOT transaction from status " + txnStatus + " to " + newStatus); } this.txnStatus = newStatus; return this; }
3.68
hbase_HBaseRpcController_getRegionInfo
/** Returns Target Region's RegionInfo or null if not available or pertinent. */ default RegionInfo getRegionInfo() { return null; }
3.68
flink_ExecEdge_getOutputType
/** Returns the output {@link LogicalType} of the data passing this edge. */ public LogicalType getOutputType() { return source.getOutputType(); }
3.68
hbase_CoprocessorHost_execOperation
/** * @return True if we are to bypass (Can only be <code>true</code> if * ObserverOperation#isBypassable(). */ protected <O> boolean execOperation(final ObserverOperation<O> observerOperation) throws IOException { boolean bypass = false; if (observerOperation == null) { return bypass; } List<E> envs = coprocEnvironments.get(); for (E env : envs) { observerOperation.prepare(env); Thread currentThread = Thread.currentThread(); ClassLoader cl = currentThread.getContextClassLoader(); try { currentThread.setContextClassLoader(env.getClassLoader()); observerOperation.callObserver(); } catch (Throwable e) { handleCoprocessorThrowable(env, e); } finally { currentThread.setContextClassLoader(cl); } // Internal to shouldBypass, it checks if obeserverOperation#isBypassable(). bypass |= observerOperation.shouldBypass(); observerOperation.postEnvCall(); if (bypass) { // If CP says bypass, skip out w/o calling any following CPs; they might ruin our response. // In hbase1, this used to be called 'complete'. In hbase2, we unite bypass and 'complete'. break; } } return bypass; }
3.68
flink_StreamProjection_projectTuple23
/** * Projects a {@link Tuple} {@link DataStream} to the previously selected fields. * * @return The projected DataStream. * @see Tuple * @see DataStream */ public < T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> SingleOutputStreamOperator< Tuple23< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>> projectTuple23() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType()); TupleTypeInfo< Tuple23< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>> tType = new TupleTypeInfo< Tuple23< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>>(fTypes); return dataStream.transform( "Projection", tType, new StreamProject< IN, Tuple23< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>>( fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig()))); }
3.68
flink_Expander_expanded
/** Expands identifiers in a given SQL string, returning a {@link Expanded}. */ public Expanded expanded(String ori) { final Map<SqlParserPos, SqlIdentifier> identifiers = new HashMap<>(); final Map<String, SqlIdentifier> funcNameToId = new HashMap<>(); final SqlNode oriNode = planner.parser().parse(ori); // parse again because validation is stateful, that means the node tree was probably // mutated. final SqlNode validated = planner.validate(planner.parser().parse(ori)); validated.accept( new SqlBasicVisitor<Void>() { @Override public Void visit(SqlCall call) { SqlOperator operator = call.getOperator(); if (operator instanceof BridgingSqlFunction) { final SqlIdentifier functionID = ((BridgingSqlFunction) operator).getSqlIdentifier(); if (!functionID.isSimple()) { funcNameToId.put(Util.last(functionID.names), functionID); } } return super.visit(call); } @Override public Void visit(SqlIdentifier identifier) { // See SqlUtil#deriveAliasFromOrdinal, there is no good solution // to distinguish between system alias (EXPR${number}) and user defines, // and we stop expanding all of them. if (!identifier.names.get(0).startsWith("EXPR$")) { identifiers.putIfAbsent(identifier.getParserPosition(), identifier); } return null; } }); return new Expanded(oriNode, identifiers, funcNameToId); }
3.68
hadoop_TFile_getComparatorName
/** * Get the string representation of the comparator. * * @return If the TFile is not sorted by keys, an empty string will be * returned. Otherwise, the actual comparator string that is * provided during the TFile creation time will be returned. */ public String getComparatorName() { return tfileMeta.getComparatorString(); }
3.68
flink_Channel_setShipStrategyComparator
/** * Sets the ship strategy comparator for this Channel. * * @param shipStrategyComparator The ship strategy comparator to set. */ public void setShipStrategyComparator(TypeComparatorFactory<?> shipStrategyComparator) { this.shipStrategyComparator = shipStrategyComparator; }
3.68
hudi_OptionsResolver_getIndexKeys
/** * Returns the index key field values. */ public static String[] getIndexKeys(Configuration conf) { return getIndexKeyField(conf).split(","); }
3.68
hbase_MultiResponse_add
/** Add the pair to the container, grouped by the regionName. */ public void add(byte[] regionName, int originalIndex, Object resOrEx) { getResult(regionName).addResult(originalIndex, resOrEx); }
3.68
hbase_MetricsMasterQuotaSourceImpl_generateJsonQuotaSummary
/** * Summarizes the usage and limit for many targets (table or namespace) into JSON. */ private String generateJsonQuotaSummary(Iterable<Entry<String, Entry<Long, Long>>> data, String target) { StringBuilder sb = new StringBuilder(); for (Entry<String, Entry<Long, Long>> tableUsage : data) { String tableName = tableUsage.getKey(); long usage = tableUsage.getValue().getKey(); long limit = tableUsage.getValue().getValue(); if (sb.length() > 0) { sb.append(", "); } sb.append("{").append(target).append("=").append(tableName).append(", usage=").append(usage) .append(", limit=").append(limit).append("}"); } sb.insert(0, "[").append("]"); return sb.toString(); }
3.68
hbase_MasterObserver_preEnableReplicationPeer
/** * Called before enable a replication peer * @param peerId a short name that identifies the peer */ default void preEnableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId) throws IOException { }
3.68
hbase_SplitLogTask_parseFrom
/** * @param data Serialized date to parse. * @return An SplitLogTaskState instance made of the passed <code>data</code> * @see #toByteArray() */ public static SplitLogTask parseFrom(final byte[] data) throws DeserializationException { ProtobufUtil.expectPBMagicPrefix(data); try { int prefixLen = ProtobufUtil.lengthOfPBMagic(); ZooKeeperProtos.SplitLogTask.Builder builder = ZooKeeperProtos.SplitLogTask.newBuilder(); ProtobufUtil.mergeFrom(builder, data, prefixLen, data.length - prefixLen); return new SplitLogTask(builder.build()); } catch (IOException e) { throw new DeserializationException(Bytes.toStringBinary(data, 0, 64), e); } }
3.68
hadoop_S3ARemoteObject_getStatistics
/** * Gets an instance of {@code S3AInputStreamStatistics} used for reporting access metrics. * * @return an instance of {@code S3AInputStreamStatistics} used for reporting access metrics. */ public S3AInputStreamStatistics getStatistics() { return streamStatistics; }
3.68
hadoop_FixedLengthInputFormat_getRecordLength
/** * Get record length value * @param conf configuration * @return the record length, zero means none was set */ public static int getRecordLength(Configuration conf) { return conf.getInt(FIXED_RECORD_LENGTH, 0); }
3.68
framework_AbstractClientConnector_getSession
/** * Finds the {@link VaadinSession} to which this connector belongs. If the * connector has not been attached, <code>null</code> is returned. * * @return The connector's session, or <code>null</code> if not attached */ protected VaadinSession getSession() { UI uI = getUI(); if (uI == null) { return null; } else { return uI.getSession(); } }
3.68
hbase_SnapshotOfRegionAssignmentFromMeta_getTableSet
/** * Get the table set * @return the table set */ public Set<TableName> getTableSet() { return this.tableToRegionMap.keySet(); }
3.68
flink_DeclarativeSlotManager_start
/** * Starts the slot manager with the given leader id and resource manager actions. * * @param newResourceManagerId to use for communication with the task managers * @param newMainThreadExecutor to use to run code in the ResourceManager's main thread * @param newResourceAllocator to use for resource (de-)allocations * @param newBlockedTaskManagerChecker to query whether a task manager is blocked */ @Override public void start( ResourceManagerId newResourceManagerId, Executor newMainThreadExecutor, ResourceAllocator newResourceAllocator, ResourceEventListener newResourceEventListener, BlockedTaskManagerChecker newBlockedTaskManagerChecker) { LOG.debug("Starting the slot manager."); this.resourceManagerId = Preconditions.checkNotNull(newResourceManagerId); mainThreadExecutor = Preconditions.checkNotNull(newMainThreadExecutor); resourceEventListener = Preconditions.checkNotNull(newResourceEventListener); taskExecutorManager = taskExecutorManagerFactory.apply(newMainThreadExecutor, newResourceAllocator); blockedTaskManagerChecker = Preconditions.checkNotNull(newBlockedTaskManagerChecker); started = true; registerSlotManagerMetrics(); }
3.68
flink_BuiltInSqlFunction_newBuilder
/** Builder for configuring and creating instances of {@link BuiltInSqlFunction}. */ public static Builder newBuilder() { return new Builder(); }
3.68
pulsar_TransactionMetadataStore_getTxnStatus
/** * Query the {@link TxnStatus} of a given transaction <tt>txnid</tt>. * * @param txnid transaction id * @return a future represents the result of this operation. * it returns {@link TxnStatus} of the given transaction. */ default CompletableFuture<TxnStatus> getTxnStatus(TxnID txnid) { return getTxnMeta(txnid).thenApply(TxnMeta::status); }
3.68
flink_Types_POJO
/** * Returns type information for a POJO (Plain Old Java Object) and allows to specify all fields * manually. * * <p>A type is considered a FLink POJO type, if it fulfills the conditions below. * * <ul> * <li>It is a public class, and standalone (not a non-static inner class) * <li>It has a public no-argument constructor. * <li>All non-static, non-transient fields in the class (and all superclasses) are either * public (and non-final) or have a public getter and a setter method that follows the * Java beans naming conventions for getters and setters. * <li>It is a fixed-length, null-aware composite type with non-deterministic field order. * Every field can be null independent of the field's type. * </ul> * * <p>The generic types for all fields of the POJO can be defined in a hierarchy of subclasses. * * <p>If Flink's type analyzer is unable to extract a POJO field, an {@link * org.apache.flink.api.common.functions.InvalidTypesException} is thrown. * * <p><strong>Note:</strong> In most cases the type information of fields can be determined * automatically, we recommend to use {@link Types#POJO(Class)}. * * @param pojoClass POJO class * @param fields map of fields that map a name to type information. The map key is the name of * the field and the value is its type. */ public static <T> TypeInformation<T> POJO( Class<T> pojoClass, Map<String, TypeInformation<?>> fields) { final List<PojoField> pojoFields = new ArrayList<>(fields.size()); for (Map.Entry<String, TypeInformation<?>> field : fields.entrySet()) { final Field f = TypeExtractor.getDeclaredField(pojoClass, field.getKey()); if (f == null) { throw new InvalidTypesException( "Field '" + field.getKey() + "' could not be accessed."); } pojoFields.add(new PojoField(f, field.getValue())); } return new PojoTypeInfo<>(pojoClass, pojoFields); }
3.68
flink_ThreadInfoRequestCoordinator_triggerThreadInfoRequest
/** * Triggers collection of thread info stats of a job vertex by combining thread info responses * from given subtasks. A thread info response of a subtask in turn consists of {@code * numSamples}, collected with {@code delayBetweenSamples} milliseconds delay between them. * * @param executionsWithGateways Execution attempts together with TaskExecutors running them. * @param numSamples Number of thread info samples to collect from each subtask. * @param delayBetweenSamples Delay between consecutive samples (ms). * @param maxStackTraceDepth Maximum depth of the stack traces collected within thread info * samples. * @return A future of the completed thread info stats. */ public CompletableFuture<VertexThreadInfoStats> triggerThreadInfoRequest( Map<ImmutableSet<ExecutionAttemptID>, CompletableFuture<TaskExecutorThreadInfoGateway>> executionsWithGateways, int numSamples, Duration delayBetweenSamples, int maxStackTraceDepth) { checkNotNull(executionsWithGateways, "Tasks to sample"); checkArgument(executionsWithGateways.size() > 0, "No tasks to sample"); checkArgument(numSamples >= 1, "No number of samples"); checkArgument(maxStackTraceDepth >= 0, "Negative maximum stack trace depth"); // Execution IDs of running tasks grouped by the task manager Collection<ImmutableSet<ExecutionAttemptID>> runningSubtasksIds = executionsWithGateways.keySet(); synchronized (lock) { if (isShutDown) { return FutureUtils.completedExceptionally(new IllegalStateException("Shut down")); } final int requestId = requestIdCounter++; log.debug("Triggering thread info request {}", requestId); final PendingThreadInfoRequest pending = new PendingThreadInfoRequest(requestId, runningSubtasksIds); // requestTimeout is treated as the time on top of the expected sampling duration. // Discard the request if it takes too long. We don't send cancel // messages to the task managers, but only wait for the responses // and then ignore them. long expectedDuration = numSamples * delayBetweenSamples.toMillis(); Time timeout = Time.milliseconds(expectedDuration + requestTimeout.toMillis()); // Add the pending request before scheduling the discard task to // prevent races with removing it again. pendingRequests.put(requestId, pending); ThreadInfoSamplesRequest requestParams = new ThreadInfoSamplesRequest( requestId, numSamples, delayBetweenSamples, maxStackTraceDepth); requestThreadInfo(executionsWithGateways, requestParams, timeout); return pending.getStatsFuture(); } }
3.68
hbase_HBaseFsckRepair_fixUnassigned
/** * Fix unassigned by creating/transition the unassigned ZK node for this region to OFFLINE state * with a special flag to tell the master that this is a forced operation by HBCK. This assumes * that info is in META. */ public static void fixUnassigned(Admin admin, RegionInfo region) throws IOException, KeeperException, InterruptedException { // Force ZK node to OFFLINE so master assigns forceOfflineInZK(admin, region); }
3.68
querydsl_JTSGeometryExpressions_dwithin
/** * Returns true if the geometries are within the specified distance of one another. * For geometry units are in those of spatial reference and For geography units are in meters. * * @param expr1 geometry * @param expr2 other geometry * @param distance distance * @return true, if with distance of each other */ public static BooleanExpression dwithin(Expression<? extends Geometry> expr1, Expression<? extends Geometry> expr2, double distance) { return Expressions.booleanOperation(SpatialOps.DWITHIN, expr1, expr2, ConstantImpl.create(distance)); }
3.68
hbase_RequestConverter_buildGetQuotaStatesRequest
/** * Returns a {@link GetQuotaStatesRequest} object. */ public static GetQuotaStatesRequest buildGetQuotaStatesRequest() { return GetQuotaStatesRequest.getDefaultInstance(); }
3.68
flink_OperationExecutorFactory_getColumnSize
/** * The column size for this type. For numeric data this is the maximum precision. For character * data this is the length in characters. For datetime types this is the length in characters of * the String representation (assuming the maximum allowed precision of the fractional seconds * component). For binary data this is the length in bytes. Null is returned for data types * where the column size is not applicable. */ private static @Nullable Integer getColumnSize(LogicalType columnType) { switch (columnType.getTypeRoot()) { case TINYINT: return 3; case SMALLINT: return 5; case INTEGER: case DATE: return 10; case BIGINT: return 19; case FLOAT: return 7; case DOUBLE: return 15; case DECIMAL: return ((DecimalType) columnType).getScale(); case VARCHAR: case BINARY: return Integer.MAX_VALUE; case TIMESTAMP_WITHOUT_TIME_ZONE: return 29; default: return null; } }
3.68
flink_ThreadBase_internalHandleException
/** * Internally handles an exception and makes sure that this method returns without a problem. * * @param ioex The exception to handle. */ protected final void internalHandleException(IOException ioex) { if (!isRunning()) { // discard any exception that occurs when after the thread is killed. return; } if (this.exceptionHandler != null) { try { this.exceptionHandler.handleException(ioex); } catch (Throwable ignored) { } } }
3.68
framework_AbstractConnector_onUnregister
/* * (non-Javadoc) * * @see com.vaadin.client.ServerConnector#onUnregister() */ @Override public void onUnregister() { if (debugLogging) { getLogger().info( "Unregistered connector " + Util.getConnectorString(this)); } }
3.68
flink_ParquetAvroWriters_forReflectRecord
/** * Creates a ParquetWriterFactory for the given type. The Parquet writers will use Avro to * reflectively create a schema for the type and use that schema to write the columnar data. * * @param type The class of the type to write. */ public static <T> ParquetWriterFactory<T> forReflectRecord(Class<T> type) { return AvroParquetWriters.forReflectRecord(type); }
3.68
pulsar_ConfigValidation_validateConfig
/** * Validate the config object with default annotation class. * @param config config object */ public static void validateConfig(Object config) { validateConfig(config, DEFAULT_ANNOTATION_CLASS); }
3.68
flink_DynamicSinkUtils_fixCollectDataType
/** Temporary solution until we drop legacy types. */ private static DataType fixCollectDataType( DataTypeFactory dataTypeFactory, ResolvedSchema schema) { final DataType fixedDataType = DataTypeUtils.transform( dataTypeFactory, schema.toSourceRowDataType(), TypeTransformations.legacyRawToTypeInfoRaw(), TypeTransformations.legacyToNonLegacy()); // TODO erase the conversion class earlier when dropping legacy code, esp. FLINK-22321 return TypeConversions.fromLogicalToDataType(fixedDataType.getLogicalType()); }
3.68
hbase_BaseReplicationEndpoint_getNamespaceTableCfWALEntryFilter
/** * Returns a WALEntryFilter for checking replication per table and CF. Subclasses can return null * if they don't want this filter */ protected WALEntryFilter getNamespaceTableCfWALEntryFilter() { return new NamespaceTableCfWALEntryFilter(ctx.getReplicationPeer()); }
3.68
flink_BufferConsumer_build
/** * @return sliced {@link Buffer} containing the not yet consumed data. Returned {@link Buffer} * shares the reference counter with the parent {@link BufferConsumer} - in order to recycle * memory both of them must be recycled/closed. */ public Buffer build() { writerPosition.update(); int cachedWriterPosition = writerPosition.getCached(); Buffer slice = buffer.readOnlySlice( currentReaderPosition, cachedWriterPosition - currentReaderPosition); currentReaderPosition = cachedWriterPosition; return slice.retainBuffer(); }
3.68
framework_OptionGroup_setItemEnabled
/** * Sets an item disabled or enabled. In the multiselect mode, a disabled * item cannot be selected or deselected by the user. In the single * selection mode, a disable item cannot be selected. * * However, programmatical selection or deselection of an disable item is * possible. By default, items are enabled. * * @param itemId * the id of the item to be disabled or enabled * @param enabled * if true the item is enabled, otherwise the item is disabled */ public void setItemEnabled(Object itemId, boolean enabled) { if (itemId != null) { if (enabled) { disabledItemIds.remove(itemId); } else { disabledItemIds.add(itemId); } markAsDirty(); } }
3.68
hbase_Struct_decode
/** * Read the field at {@code index}. {@code src}'s position is not affected. */ public Object decode(PositionedByteRange src, int index) { assert index >= 0; StructIterator it = iterator(src.shallowCopy()); for (; index > 0; index--) { it.skip(); } return it.next(); }
3.68
pulsar_ManagedCursorImpl_persistPositionWhenClosing
/** * Persist given markDelete position to cursor-ledger or zk-metaStore based on max number of allowed unack-range * that can be persist in zk-metastore. If current unack-range is higher than configured threshold then broker * persists mark-delete into cursor-ledger else into zk-metastore. * * @param position * @param properties * @param callback * @param ctx */ void persistPositionWhenClosing(PositionImpl position, Map<String, Long> properties, final AsyncCallbacks.CloseCallback callback, final Object ctx) { if (shouldPersistUnackRangesToLedger()) { persistPositionToLedger(cursorLedger, new MarkDeleteEntry(position, properties, null, null), new VoidCallback() { @Override public void operationComplete() { log.info("[{}][{}] Updated md-position={} into cursor-ledger {}", ledger.getName(), name, markDeletePosition, cursorLedger.getId()); asyncCloseCursorLedger(callback, ctx); } @Override public void operationFailed(ManagedLedgerException e) { log.warn("[{}][{}] Failed to persist mark-delete position into cursor-ledger{}: {}", ledger.getName(), name, cursorLedger.getId(), e.getMessage()); callback.closeFailed(e, ctx); } }); } else { persistPositionMetaStore(-1, position, properties, new MetaStoreCallback<Void>() { @Override public void operationComplete(Void result, Stat stat) { log.info("[{}][{}] Closed cursor at md-position={}", ledger.getName(), name, markDeletePosition); // At this point the position had already been safely stored in the cursor z-node callback.closeComplete(ctx); asyncDeleteLedger(cursorLedger); } @Override public void operationFailed(MetaStoreException e) { log.warn("[{}][{}] Failed to update cursor info when closing: {}", ledger.getName(), name, e.getMessage()); callback.closeFailed(e, ctx); } }, true); } }
3.68
querydsl_JTSGeometryExpressions_asJTSGeometry
/** * Create a new JTSGeometryExpression * * @param value Geometry * @return new JTSGeometryExpression */ public static <T extends Geometry> JTSGeometryExpression<T> asJTSGeometry(T value) { return asJTSGeometry(Expressions.constant(value)); }
3.68
framework_TouchScrollDelegate_moveTransformationToScrolloffset
/** * Called at the end of scrolling. Moves possible translate values to * scrolltop, causing onscroll event. */ private void moveTransformationToScrolloffset() { if (ANDROID_WITH_BROKEN_SCROLL_TOP) { scrolledElement.setPropertyInt("_vScrollTop", finalScrollTop); if (scrollHandler != null) { scrollHandler.onScroll(null); } } else { for (Element el : layers) { Style style = el.getStyle(); style.setProperty("webkitTransform", "translate3d(0,0,0)"); } scrolledElement.setScrollTop(finalScrollTop); } activeScrollDelegate = null; handlerRegistration.removeHandler(); handlerRegistration = null; }
3.68
framework_StateChangeEvent_hasPropertyChanged
/** * Checks whether the give property has changed. * * @param property * the name of the property to check * @return <code>true</code> if the property has changed, else * <code>false></code> */ public boolean hasPropertyChanged(String property) { if (isInitialStateChange()) { // Everything has changed for a new connector return true; } else if (stateJson != null) { // Check whether it's in the json object return isInJson(property, Util.json2jso(stateJson)); } else { // Legacy cases if (changedProperties != null) { // Check legacy stuff return changedProperties.contains(property); } else if (changedPropertiesSet != null) { // Check legacy stuff return changedPropertiesSet.contains(property); } else { throw new IllegalStateException( "StateChangeEvent should have either stateJson, changedProperties or changePropertiesSet"); } } }
3.68
dubbo_ReferenceAnnotationBeanPostProcessor_postProcessProperties
/** * Alternatives to the {@link #postProcessPropertyValues(PropertyValues, PropertyDescriptor[], Object, String)}. * @see #postProcessPropertyValues */ @Override public PropertyValues postProcessProperties(PropertyValues pvs, Object bean, String beanName) throws BeansException { try { AnnotatedInjectionMetadata metadata = findInjectionMetadata(beanName, bean.getClass(), pvs); prepareInjection(metadata); metadata.inject(bean, beanName, pvs); } catch (BeansException ex) { throw ex; } catch (Throwable ex) { throw new BeanCreationException( beanName, "Injection of @" + getAnnotationType().getSimpleName() + " dependencies is failed", ex); } return pvs; }
3.68
hadoop_IOStatisticsSupport_snapshotIOStatistics
/** * Create a snapshot statistics instance ready to aggregate data. * * The instance can be serialized, and its * {@code toString()} method lists all the values. * @return an empty snapshot */ public static IOStatisticsSnapshot snapshotIOStatistics() { return new IOStatisticsSnapshot(); } /** * Get the IOStatistics of the source, casting it * if it is of the relevant type, otherwise, * if it implements {@link IOStatisticsSource} * extracting the value. * * Returns null if the source isn't of the write type * or the return value of * {@link IOStatisticsSource#getIOStatistics()}
3.68
hadoop_S3ClientFactory_withPathUri
/** * Set full s3a path. * added in HADOOP-18330 * @param value new value * @return the builder */ public S3ClientCreationParameters withPathUri( final URI value) { pathUri = value; return this; }
3.68
morf_SpreadsheetDataSetProducer_getDestinationWorksheet
/** * Gets the name of the destination worksheet for the given hyperlink. * * @param hyperlink Hyperlink to determine worksheet name for * @return the name of the worksheet that the hyperlink points to */ private String getDestinationWorksheet(HyperlinkRecord hyperlink) { /* * Hyperlinks will be either to a specific cell or to a worksheet as a * whole. If the regular expression for the sheet name part of a link * doesn't match then the hyperlink must be to a worksheet as a whole. */ final Matcher matcher = sheetName.matcher(hyperlink.getLocation()); if (matcher.matches()) { return matcher.group(1); } else { return hyperlink.getLocation(); } }
3.68
framework_Link_getTargetHeight
/** * Returns the target window height or -1 if not set. * * @return the target window height. */ public int getTargetHeight() { return getState(false).targetHeight < 0 ? -1 : getState(false).targetHeight; }
3.68
framework_AbstractListingConnector_isRowSelected
/** * Returns whether the given row is selected. * * @param row * the row * @return {@code true} if the row is selected, {@code false} otherwise */ protected boolean isRowSelected(JsonObject row) { return row.hasKey(DataCommunicatorConstants.SELECTED); }
3.68
graphhopper_TranslationMap_doImport
/** * This loads the translation files from classpath. */ public TranslationMap doImport() { try { for (String locale : LOCALES) { TranslationHashMap trMap = new TranslationHashMap(getLocale(locale)); trMap.doImport(TranslationMap.class.getResourceAsStream(locale + ".txt")); add(trMap); } postImportHook(); return this; } catch (Exception ex) { throw new RuntimeException(ex); } }
3.68
hbase_HFileReaderImpl_getEntries
/** Returns number of KV entries in this HFile */ @Override public long getEntries() { return trailer.getEntryCount(); }
3.68
flink_DeltaEvictor_of
/** * Creates a {@code DeltaEvictor} from the given threshold, {@code DeltaFunction}. Eviction is * done before/after the window function based on the value of doEvictAfter. * * @param threshold The threshold * @param deltaFunction The {@code DeltaFunction} * @param doEvictAfter Whether eviction should be done after window function */ public static <T, W extends Window> DeltaEvictor<T, W> of( double threshold, DeltaFunction<T> deltaFunction, boolean doEvictAfter) { return new DeltaEvictor<>(threshold, deltaFunction, doEvictAfter); }
3.68
framework_VRichTextArea_removeBlurHandler
/** * Removes a blur handler. * * @param blurHandler * the handler to remove */ public void removeBlurHandler(BlurHandler blurHandler) { HandlerRegistration registration = blurHandlers.remove(blurHandler); if (registration != null) { registration.removeHandler(); } }
3.68
flink_ZooKeeperStateHandleStore_getAndLock
/** * Gets the {@link RetrievableStateHandle} stored in the given ZooKeeper node and locks it. A * locked node cannot be removed by another {@link ZooKeeperStateHandleStore} instance as long * as this instance remains connected to ZooKeeper. * * @param pathInZooKeeper Path to the ZooKeeper node which contains the state handle * @return The retrieved state handle from the specified ZooKeeper node * @throws IOException Thrown if the method failed to deserialize the stored state handle * @throws Exception Thrown if a ZooKeeper operation failed */ @Override public RetrievableStateHandle<T> getAndLock(String pathInZooKeeper) throws Exception { return get(pathInZooKeeper, true); }
3.68
hadoop_TaskTrackerInfo_getTaskTrackerName
/** * Gets the tasktracker's name. * * @return tracker's name. */ public String getTaskTrackerName() { return name; }
3.68
hbase_ResultScanner_hasNext
// return true if there is another item pending, false if there isn't. // this method is where the actual advancing takes place, but you need // to call next() to consume it. hasNext() will only advance if there // isn't a pending next(). @Override public boolean hasNext() { if (next != null) { return true; } try { return (next = ResultScanner.this.next()) != null; } catch (IOException e) { throw new UncheckedIOException(e); } }
3.68
hadoop_AzureFileSystemInstrumentation_serverErrorEncountered
/** * Indicate that we just encountered a server-caused error. */ public void serverErrorEncountered() { serverErrors.incr(); }
3.68
framework_GridDragSource_getDraggedItems
/** * Collects the dragged items of a Grid given the list of item keys. */ private List<T> getDraggedItems(Grid<T> grid, List<String> draggedItemKeys) { if (draggedItemKeys == null || draggedItemKeys.isEmpty()) { throw new IllegalStateException( "The drag event does not contain dragged items"); } return draggedItemKeys.stream() .map(key -> grid.getDataCommunicator().getKeyMapper().get(key)) .collect(Collectors.toList()); }
3.68
flink_NFACompiler_canProduceEmptyMatches
/** * Verifies if the provided pattern can possibly generate empty match. Example of patterns that * can possibly generate empty matches are: A*, A?, A* B? etc. * * @param pattern pattern to check * @return true if empty match could potentially match the pattern, false otherwise */ public static boolean canProduceEmptyMatches(final Pattern<?, ?> pattern) { NFAFactoryCompiler<?> compiler = new NFAFactoryCompiler<>(checkNotNull(pattern)); compiler.compileFactory(); State<?> startState = compiler.getStates().stream() .filter(State::isStart) .findFirst() .orElseThrow( () -> new IllegalStateException( "Compiler produced no start state. It is a bug. File a jira.")); Set<State<?>> visitedStates = new HashSet<>(); final Stack<State<?>> statesToCheck = new Stack<>(); statesToCheck.push(startState); while (!statesToCheck.isEmpty()) { final State<?> currentState = statesToCheck.pop(); if (visitedStates.contains(currentState)) { continue; } else { visitedStates.add(currentState); } for (StateTransition<?> transition : currentState.getStateTransitions()) { if (transition.getAction() == StateTransitionAction.PROCEED) { if (transition.getTargetState().isFinal()) { return true; } else { statesToCheck.push(transition.getTargetState()); } } } } return false; }
3.68
hbase_MasterObserver_postListReplicationPeers
/** * Called after list replication peers. * @param ctx the environment to interact with the framework and master * @param regex The regular expression to match peer id */ default void postListReplicationPeers(final ObserverContext<MasterCoprocessorEnvironment> ctx, String regex) throws IOException { }
3.68
graphhopper_Path_getFinalEdge
/** * Yields the final edge of the path */ public EdgeIteratorState getFinalEdge() { return graph.getEdgeIteratorState(edgeIds.get(edgeIds.size() - 1), endNode); }
3.68
hbase_CoprocessorHost_execOperationWithResult
/** * Do not call with an observerOperation that is null! Have the caller check. */ protected <O, R> R execOperationWithResult( final ObserverOperationWithResult<O, R> observerOperation) throws IOException { boolean bypass = execOperation(observerOperation); R result = observerOperation.getResult(); return bypass == observerOperation.isBypassable() ? result : null; }
3.68
flink_DefaultFailureEnricherContext_forTaskManagerFailure
/** Factory method returning a TaskManager failure Context for the given params. */ public static Context forTaskManagerFailure( JobID jobID, String jobName, MetricGroup metricGroup, Executor ioExecutor, ClassLoader classLoader) { return new DefaultFailureEnricherContext( jobID, jobName, metricGroup, FailureType.TASK_MANAGER, ioExecutor, classLoader); }
3.68
framework_DragAndDropWrapper_getDraggedComponent
/** * The component in wrapper that is being dragged or null if the * transferable is not a component (most likely an html5 drag). * * @return */ public Component getDraggedComponent() { Component object = (Component) getData("component"); return object; }
3.68
hbase_CommonFSUtils_listLocatedStatus
/** * Calls fs.listFiles() to get FileStatus and BlockLocations together for reducing rpc call * @param fs file system * @param dir directory * @return LocatedFileStatus list */ public static List<LocatedFileStatus> listLocatedStatus(final FileSystem fs, final Path dir) throws IOException { List<LocatedFileStatus> status = null; try { RemoteIterator<LocatedFileStatus> locatedFileStatusRemoteIterator = fs.listFiles(dir, false); while (locatedFileStatusRemoteIterator.hasNext()) { if (status == null) { status = Lists.newArrayList(); } status.add(locatedFileStatusRemoteIterator.next()); } } catch (FileNotFoundException fnfe) { // if directory doesn't exist, return null if (LOG.isTraceEnabled()) { LOG.trace("{} doesn't exist", dir); } } return status; }
3.68
graphhopper_OSMReader_calcDistance
/** * @return the distance of the given way or NaN if some nodes were missing */ private double calcDistance(ReaderWay way, WaySegmentParser.CoordinateSupplier coordinateSupplier) { LongArrayList nodes = way.getNodes(); // every way has at least two nodes according to our acceptWay function GHPoint3D prevPoint = coordinateSupplier.getCoordinate(nodes.get(0)); if (prevPoint == null) return Double.NaN; boolean is3D = !Double.isNaN(prevPoint.ele); double distance = 0; for (int i = 1; i < nodes.size(); i++) { GHPoint3D point = coordinateSupplier.getCoordinate(nodes.get(i)); if (point == null) return Double.NaN; if (Double.isNaN(point.ele) == is3D) throw new IllegalStateException("There should be elevation data for either all points or no points at all. OSM way: " + way.getId()); distance += is3D ? distCalc.calcDist3D(prevPoint.lat, prevPoint.lon, prevPoint.ele, point.lat, point.lon, point.ele) : distCalc.calcDist(prevPoint.lat, prevPoint.lon, point.lat, point.lon); prevPoint = point; } return distance; }
3.68
querydsl_ExpressionUtils_eqConst
/** * Create a {@code left == constant} expression * * @param <D> type of expressions * @param left lhs of expression * @param constant rhs of expression * @return left == constant */ public static <D> Predicate eqConst(Expression<D> left, D constant) { return eq(left, ConstantImpl.create(constant)); }
3.68
hadoop_SharedKeyCredentials_appendCanonicalizedElement
/** * Append a string to a string builder with a newline constant. * * @param builder the StringBuilder object * @param element the string to append. */ private static void appendCanonicalizedElement(final StringBuilder builder, final String element) { builder.append("\n"); builder.append(element); }
3.68
hadoop_NodePlan_setTimeStamp
/** * Sets the timestamp when this plan was created. * * @param timeStamp */ public void setTimeStamp(long timeStamp) { this.timeStamp = timeStamp; }
3.68
framework_TextAreaElement_getValue
/** * Return value of the field element. * * @since 8.0 * @return value of the field element */ @Override public String getValue() { return getAttribute("value"); }
3.68
flink_RocksDBMemoryConfiguration_setHighPriorityPoolRatio
/** * Sets the fraction of the total memory to be used for high priority blocks like indexes, * dictionaries, etc. This only has an effect is either {@link #setUseManagedMemory(boolean)} or * {@link #setFixedMemoryPerSlot(MemorySize)} are set. * * <p>See {@link RocksDBOptions#HIGH_PRIORITY_POOL_RATIO} for details. */ public void setHighPriorityPoolRatio(double highPriorityPoolRatio) { Preconditions.checkArgument( highPriorityPoolRatio > 0 && highPriorityPoolRatio < 1.0, "High priority pool ratio %s must be in (0, 1)", highPriorityPoolRatio); this.highPriorityPoolRatio = highPriorityPoolRatio; }
3.68
hbase_SplitTableRegionProcedure_isRollbackSupported
/* * Check whether we are in the state that can be rollback */ @Override protected boolean isRollbackSupported(final SplitTableRegionState state) { switch (state) { case SPLIT_TABLE_REGION_POST_OPERATION: case SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS: case SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META: case SPLIT_TABLE_REGION_UPDATE_META: // It is not safe to rollback if we reach to these states. return false; default: break; } return true; }
3.68
incubator-hugegraph-toolchain_PropertyKeyService_checkUsing
/** * Check the property key is being used, used means that there is * any vertex label or edge label contains the property(name) */ public boolean checkUsing(String name, int connId) { HugeClient client = this.client(connId); List<VertexLabel> vertexLabels = client.schema().getVertexLabels(); for (VertexLabel vertexLabel : vertexLabels) { if (vertexLabel.properties().contains(name)) { return true; } } List<EdgeLabel> edgeLabels = client.schema().getEdgeLabels(); for (EdgeLabel edgeLabel : edgeLabels) { if (edgeLabel.properties().contains(name)) { return true; } } return false; }
3.68
hudi_CompletionTimeQueryView_getCompletionTime
/** * Queries the instant completion time with given start time. * * @param startTime The start time. * * @return The completion time if the instant finished or empty if it is still pending. */ public Option<String> getCompletionTime(String startTime) { String completionTime = this.startToCompletionInstantTimeMap.get(startTime); if (completionTime != null) { return Option.of(completionTime); } if (HoodieTimeline.compareTimestamps(startTime, GREATER_THAN_OR_EQUALS, this.cursorInstant)) { // the instant is still pending return Option.empty(); } // the 'startTime' should be out of the eager loading range, switch to a lazy loading. // This operation is resource costly. synchronized (this) { if (HoodieTimeline.compareTimestamps(startTime, LESSER_THAN, this.cursorInstant)) { HoodieArchivedTimeline.loadInstants(metaClient, new HoodieArchivedTimeline.ClosedOpenTimeRangeFilter(startTime, this.cursorInstant), HoodieArchivedTimeline.LoadMode.SLIM, r -> true, this::readCompletionTime); } // refresh the start instant this.cursorInstant = startTime; } return Option.ofNullable(this.startToCompletionInstantTimeMap.get(startTime)); }
3.68
pulsar_AuthenticationDataProvider_getCommandData
/** * * @return authentication data which will be stored in a command */ default String getCommandData() { return null; }
3.68
hudi_HoodieCreateHandle_close
/** * Performs actions to durably, persist the current changes and returns a WriteStatus object. */ @Override public List<WriteStatus> close() { LOG.info("Closing the file " + writeStatus.getFileId() + " as we are done with all the records " + recordsWritten); try { if (isClosed()) { // Handle has already been closed return Collections.emptyList(); } markClosed(); if (fileWriter != null) { fileWriter.close(); fileWriter = null; } setupWriteStatus(); LOG.info(String.format("CreateHandle for partitionPath %s fileID %s, took %d ms.", writeStatus.getStat().getPartitionPath(), writeStatus.getStat().getFileId(), writeStatus.getStat().getRuntimeStats().getTotalCreateTime())); return Collections.singletonList(writeStatus); } catch (IOException e) { throw new HoodieInsertException("Failed to close the Insert Handle for path " + path, e); } }
3.68
framework_DragSourceExtensionConnector_isAndoidChrome
/** * Returns whether the current browser is Android Chrome. * * @return {@code true} if Android Chrome, {@code false} if not * */ protected boolean isAndoidChrome() { BrowserInfo browserInfo = BrowserInfo.get(); return browserInfo.isAndroid() && browserInfo.isChrome(); }
3.68
flink_ManuallyTriggeredScheduledExecutorService_triggerNonPeriodicScheduledTasksWithRecursion
/** * Triggers all non-periodically scheduled tasks. In contrast to {@link * #triggerNonPeriodicScheduledTasks()}, if such a task schedules another non-periodically * schedule task, then this new task will also be triggered. */ public void triggerNonPeriodicScheduledTasksWithRecursion() { while (!nonPeriodicScheduledTasks.isEmpty()) { final ScheduledTask<?> scheduledTask = nonPeriodicScheduledTasks.poll(); if (!scheduledTask.isCancelled()) { scheduledTask.execute(); } } }
3.68
framework_ViewChangeListener_getParameterMap
/** * Returns the parameters for the view being activated parsed to a map, * using the given string as the parameter separator character. * * @param separator * the parameter separator string to use * @return navigation parameters (potentially bookmarkable) for the new * view * @since 8.1 */ public Map<String, String> getParameterMap(String separator) { return getNavigator().parseParameterStringToMap(getParameters(), separator); }
3.68
hbase_CommonFSUtils_isMatchingTail
/** * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true. Does not consider * schema; i.e. if schemas different but path or subpath matches, the two will equate. * @param pathToSearch Path we will be trying to match agains against * @param pathTail what to match * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code> */ public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) { if (pathToSearch.depth() != pathTail.depth()) { return false; } Path tailPath = pathTail; String tailName; Path toSearch = pathToSearch; String toSearchName; boolean result = false; do { tailName = tailPath.getName(); if (tailName == null || tailName.length() <= 0) { result = true; break; } toSearchName = toSearch.getName(); if (toSearchName == null || toSearchName.length() <= 0) { break; } // Move up a parent on each path for next go around. Path doesn't let us go off the end. tailPath = tailPath.getParent(); toSearch = toSearch.getParent(); } while (tailName.equals(toSearchName)); return result; }
3.68
incubator-hugegraph-toolchain_ElementBuilder_retainField
/** * Retain only the key-value pairs needed by the current vertex or edge */ protected boolean retainField(String fieldName, Object fieldValue) { ElementMapping mapping = this.mapping(); Set<String> selectedFields = mapping.selectedFields(); Set<String> ignoredFields = mapping.ignoredFields(); // Retain selected fields or remove ignored fields if (!selectedFields.isEmpty() && !selectedFields.contains(fieldName)) { return false; } if (!ignoredFields.isEmpty() && ignoredFields.contains(fieldName)) { return false; } String mappedKey = mapping.mappingField(fieldName); Set<String> nullableKeys = this.schemaLabel().nullableKeys(); Set<Object> nullValues = mapping.nullValues(); if (nullableKeys.isEmpty() || nullValues.isEmpty()) { return true; } return !nullableKeys.contains(mappedKey) || !nullValues.contains(fieldValue); }
3.68
hbase_RequestConverter_buildCatalogScanRequest
/** * Creates a request for running a catalog scan * @return A {@link RunCatalogScanRequest} */ public static RunCatalogScanRequest buildCatalogScanRequest() { return RunCatalogScanRequest.getDefaultInstance(); }
3.68
flink_RocksDBNativeMetricOptions_enableMemTableFlushPending
/** Returns 1 if a memtable flush is pending; otherwise, returns 0. */ public void enableMemTableFlushPending() { this.properties.add(RocksDBProperty.MemTableFlushPending.getRocksDBProperty()); }
3.68