name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
pulsar_InMemoryDelayedDeliveryTracker_hasMessageAvailable
/** * Return true if there's at least a message that is scheduled to be delivered already. */ @Override public boolean hasMessageAvailable() { boolean hasMessageAvailable = !priorityQueue.isEmpty() && priorityQueue.peekN1() <= getCutoffTime(); if (!hasMessageAvailable) { updateTimer(); } return hasMessageAvailable; }
3.68
hbase_AbstractHBaseSaslRpcClient_getInitialResponse
/** * Computes the initial response a client sends to a server to begin the SASL challenge/response * handshake. If the client's SASL mechanism does not have an initial response, an empty token * will be returned without querying the evaluateChallenge method, as an authentication processing * must be started by client. * @return The client's initial response to send the server (which may be empty). */ public byte[] getInitialResponse() throws SaslException { if (saslClient.hasInitialResponse()) { return saslClient.evaluateChallenge(EMPTY_TOKEN); } return EMPTY_TOKEN; }
3.68
hbase_MasterObserver_preTruncateTableAction
/** * Called before {@link org.apache.hadoop.hbase.master.HMaster} truncates a table. Called as part * of truncate table procedure and it is async to the truncate RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preTruncateTableAction(final ObserverContext<MasterCoprocessorEnvironment> ctx, final TableName tableName) throws IOException { }
3.68
hadoop_IOStatisticsBinding_trackFunctionDuration
/** * Given an IOException raising function/lambda expression, * return a new one which wraps the inner and tracks * the duration of the operation, including whether * it passes/fails. * @param factory factory of duration trackers * @param statistic statistic key * @param inputFn input function * @param <A> type of argument to the input function. * @param <B> return type. * @return a new function which tracks duration and failure. */ public static <A, B> FunctionRaisingIOE<A, B> trackFunctionDuration( @Nullable DurationTrackerFactory factory, String statistic, FunctionRaisingIOE<A, B> inputFn) { return (x) -> { // create the tracker outside try-with-resources so // that failures can be set in the catcher. DurationTracker tracker = createTracker(factory, statistic); try { // exec the input function and return its value return inputFn.apply(x); } catch (IOException | RuntimeException e) { // input function failed: note it tracker.failed(); // and rethrow throw e; } finally { // update the tracker. // this is called after the catch() call would have // set the failed flag. tracker.close(); } }; }
3.68
hadoop_RouterAuditLogger_createFailureLog
/** * A helper api for creating an audit log for a failure event. */ static String createFailureLog(String user, String operation, String perm, String target, String description, ApplicationId appId, SubClusterId subClusterId) { StringBuilder b = createStringBuilderForFailureLog(user, operation, target, description, perm); if (appId != null) { add(Keys.APPID, appId.toString(), b); } if (subClusterId != null) { add(Keys.SUBCLUSTERID, subClusterId.toString(), b); } return b.toString(); }
3.68
morf_SqlDialect_expandInnerSelectFields
/** * Creates the fields from any inner selects on the outer select. * * @param statement the select statement to expand. */ private void expandInnerSelectFields(SelectStatement statement) { if (statement == null || !statement.getFields().isEmpty() || statement.getFromSelects().isEmpty()) { return; } for (SelectStatement selectStatement : statement.getFromSelects()) { expandInnerSelectFields(selectStatement); for (AliasedField field : selectStatement.getFields()) { statement.appendFields(new FieldReference(new TableReference(selectStatement.getAlias()), field.getAlias())); } } }
3.68
shardingsphere-elasticjob_SnapshotService_dumpJobDirectly
/** * Dump job. * @param jobName job's name * @return dump job's info */ public String dumpJobDirectly(final String jobName) { String path = "/" + jobName; final List<String> result = new ArrayList<>(); dumpDirectly(path, jobName, result); return String.join("\n", SensitiveInfoUtils.filterSensitiveIps(result)) + "\n"; }
3.68
hbase_AbstractRpcClient_configureRpcController
/** * Configure an rpc controller * @param controller to configure * @return configured rpc controller */ protected HBaseRpcController configureRpcController(RpcController controller) { HBaseRpcController hrc; // TODO: Ideally we should not use an RpcController other than HBaseRpcController at client // side. And now we may use ServerRpcController. if (controller != null && controller instanceof HBaseRpcController) { hrc = (HBaseRpcController) controller; if (!hrc.hasCallTimeout()) { hrc.setCallTimeout(rpcTimeout); } } else { hrc = new HBaseRpcControllerImpl(); hrc.setCallTimeout(rpcTimeout); } return hrc; }
3.68
framework_AnimationUtil_setAnimationDuration
/** * For internal use only. May be removed or replaced in the future. * * Set the animation-duration CSS property. * * @param elem * the element whose animation-duration to set * @param duration * the duration as a valid CSS value */ public static void setAnimationDuration(Element elem, String duration) { Style style = elem.getStyle(); style.setProperty(ANIMATION_PROPERTY_NAME + "Duration", duration); }
3.68
flink_Rowtime_watermarksFromStrategy
/** Sets a custom watermark strategy to be used for the rowtime attribute. */ public Rowtime watermarksFromStrategy(WatermarkStrategy strategy) { internalProperties.putProperties(strategy.toProperties()); return this; }
3.68
flink_UnsortedGrouping_min
/** * Syntactic sugar for aggregate (MIN, field). * * @param field The index of the Tuple field on which the aggregation function is applied. * @return An AggregateOperator that represents the min'ed DataSet. * @see org.apache.flink.api.java.operators.AggregateOperator */ public AggregateOperator<T> min(int field) { return this.aggregate(Aggregations.MIN, field, Utils.getCallLocationName()); }
3.68
hadoop_ProtobufWrapperLegacy_isUnshadedProtobufMessage
/** * Is a message an unshaded protobuf message? * @param payload payload * @return true if protobuf.jar is on the classpath and the payload is a Message */ public static boolean isUnshadedProtobufMessage(Object payload) { if (PROTOBUF_KNOWN_NOT_FOUND.get()) { // protobuf is known to be absent. fail fast without examining // jars or generating exceptions. return false; } // load the protobuf message class. // if it does not load, then the payload is guaranteed not to be // an unshaded protobuf message // this relies on classloader caching for performance try { Class<?> protobufMessageClazz = Class.forName("com.google.protobuf.Message"); return protobufMessageClazz.isAssignableFrom(payload.getClass()); } catch (ClassNotFoundException e) { PROTOBUF_KNOWN_NOT_FOUND.set(true); return false; } }
3.68
pulsar_ManagedLedgerPayloadProcessor_outputProcessor
/** * Used by ManagedLedger for processing payload after reading from bookkeeper ledger. * @return Handle to Processor instance */ default Processor outputProcessor() { return null; }
3.68
framework_BrowserInfo_isAndroidWithBrokenScrollTop
/** * Tests if this is an Android devices with a broken scrollTop * implementation. * * @return true if scrollTop cannot be trusted on this device, false * otherwise */ public boolean isAndroidWithBrokenScrollTop() { return isAndroid() && (getOperatingSystemMajorVersion() == 3 || getOperatingSystemMajorVersion() == 4); }
3.68
hadoop_Tristate_isBoolean
/** * Does this value map to a boolean. * @return true if the state is one of true or false. */ public boolean isBoolean() { return mapping.isPresent(); }
3.68
hudi_BaseHoodieWriteClient_rollbackFailedWrites
/** * Rollback failed writes if any. * * @return true if rollback happened. false otherwise. */ public boolean rollbackFailedWrites() { return tableServiceClient.rollbackFailedWrites(); }
3.68
rocketmq-connect_ConnectKeyValueSerde_serde
/** * serializer and deserializer * * @return */ public static ConnectKeyValueSerde serde() { return new ConnectKeyValueSerde(new ConnectKeyValueSerializer(), new ConnectKeyValueDeserializer()); }
3.68
pulsar_KerberosName_replaceSubstitution
/** * Replace the matches of the from pattern in the base string with the value * of the to string. * @param base the string to transform * @param from the pattern to look for in the base string * @param to the string to replace matches of the pattern with * @param repeat whether the substitution should be repeated * @return */ static String replaceSubstitution(String base, Pattern from, String to, boolean repeat) { Matcher match = from.matcher(base); if (repeat) { return match.replaceAll(to); } else { return match.replaceFirst(to); } }
3.68
framework_BasicEventProvider_removeEventSetChangeListener
/* * (non-Javadoc) * * @see com.vaadin.addon.calendar.ui.CalendarComponentEvents. * EventSetChangeNotifier #removeListener * (com.vaadin.addon.calendar.ui.CalendarComponentEvents. * EventSetChangeListener ) */ @Override public void removeEventSetChangeListener(EventSetChangeListener listener) { listeners.remove(listener); }
3.68
querydsl_MapExpressionBase_containsValue
/** * Create a {@code value in values(this)} expression * * @param value value * @return expression */ public final BooleanExpression containsValue(V value) { return Expressions.booleanOperation(Ops.CONTAINS_VALUE, mixin, ConstantImpl.create(value)); }
3.68
dubbo_PathAndInvokerMapper_getRestMethodMetadata
/** * get rest method metadata by path matcher * * @param pathMatcher * @return */ public InvokerAndRestMethodMetadataPair getRestMethodMetadata(PathMatcher pathMatcher) { // first search from pathToServiceMapNoPathVariable if (pathToServiceMapNoPathVariable.containsKey(pathMatcher)) { return pathToServiceMapNoPathVariable.get(pathMatcher); } // second search from pathToServiceMapContainPathVariable if (pathToServiceMapContainPathVariable.containsKey(pathMatcher)) { return pathToServiceMapContainPathVariable.get(pathMatcher); } return null; }
3.68
hudi_InternalSchemaCache_getInternalSchemaByVersionId
/** * Give a schema versionId return its internalSchema. * This method will be called by spark tasks, we should minimize time cost. * We try our best to not use metaClient, since the initialization of metaClient is time cost * step1: * try to parser internalSchema from HoodieInstant directly * step2: * if we cannot parser internalSchema in step1, (eg: current versionId HoodieInstant has been archived) * try to find internalSchema in historySchema. * step3: * if we cannot parser internalSchema in step2 (eg: schema evolution is not enabled when we create hoodie table, however after some inserts we enable schema evolution) * try to convert table schema to internalSchema. * @param versionId the internalSchema version to be search. * @param tablePath table path * @param hadoopConf conf * @param validCommits current validate commits, use to make up the commit file path/verify the validity of the history schema files * @return a internalSchema. */ public static InternalSchema getInternalSchemaByVersionId(long versionId, String tablePath, Configuration hadoopConf, String validCommits) { String avroSchema = ""; Set<String> commitSet = Arrays.stream(validCommits.split(",")).collect(Collectors.toSet()); List<String> validateCommitList = commitSet.stream().map(HoodieInstant::extractTimestamp).collect(Collectors.toList()); FileSystem fs = FSUtils.getFs(tablePath, hadoopConf); Path hoodieMetaPath = new Path(tablePath, HoodieTableMetaClient.METAFOLDER_NAME); //step1: Path candidateCommitFile = commitSet.stream().filter(fileName -> HoodieInstant.extractTimestamp(fileName).equals(versionId + "")) .findFirst().map(f -> new Path(hoodieMetaPath, f)).orElse(null); if (candidateCommitFile != null) { try { byte[] data; try (FSDataInputStream is = fs.open(candidateCommitFile)) { data = FileIOUtils.readAsByteArray(is); } catch (IOException e) { throw e; } HoodieCommitMetadata metadata = HoodieCommitMetadata.fromBytes(data, HoodieCommitMetadata.class); String latestInternalSchemaStr = metadata.getMetadata(SerDeHelper.LATEST_SCHEMA); avroSchema = metadata.getMetadata(HoodieCommitMetadata.SCHEMA_KEY); if (latestInternalSchemaStr != null) { return SerDeHelper.fromJson(latestInternalSchemaStr).orElse(null); } } catch (Exception e1) { // swallow this exception. LOG.warn(String.format("Cannot find internal schema from commit file %s. Falling back to parsing historical internal schema", candidateCommitFile.toString())); } } // step2: FileBasedInternalSchemaStorageManager fileBasedInternalSchemaStorageManager = new FileBasedInternalSchemaStorageManager(hadoopConf, new Path(tablePath)); String latestHistorySchema = fileBasedInternalSchemaStorageManager.getHistorySchemaStrByGivenValidCommits(validateCommitList); if (latestHistorySchema.isEmpty()) { return InternalSchema.getEmptyInternalSchema(); } InternalSchema fileSchema = InternalSchemaUtils.searchSchema(versionId, SerDeHelper.parseSchemas(latestHistorySchema)); // step3: return fileSchema.isEmptySchema() ? StringUtils.isNullOrEmpty(avroSchema) ? InternalSchema.getEmptyInternalSchema() : AvroInternalSchemaConverter.convert(HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(avroSchema))) : fileSchema; }
3.68
hbase_CellVisibility_getExpression
/** Returns The visibility expression */ public String getExpression() { return this.expression; }
3.68
framework_VAccordion_getComponent
/** * Returns the wrapped widget of this stack item. * * @return the widget * * @deprecated This method is not called by the framework code anymore. * Use {@link #getChildWidget()} instead. */ @Deprecated public Widget getComponent() { return getChildWidget(); }
3.68
flink_ExecutionEnvironment_setParallelism
/** * Sets the parallelism for operations executed through this environment. Setting a parallelism * of x here will cause all operators (such as join, map, reduce) to run with x parallel * instances. * * <p>This method overrides the default parallelism for this environment. The {@link * LocalEnvironment} uses by default a value equal to the number of hardware contexts (CPU cores * / threads). When executing the program via the command line client from a JAR file, the * default parallelism is the one configured for that setup. * * @param parallelism The parallelism */ public void setParallelism(int parallelism) { config.setParallelism(parallelism); }
3.68
open-banking-gateway_TppTokenConfig_loadPrivateKey
/** * See {@code de.adorsys.opba.tppauthapi.TokenSignVerifyTest#generateNewTppKeyPair()} for details of how to * generate the encoded key. */ @SneakyThrows private PrivateKey loadPrivateKey(TppTokenProperties tppTokenProperties) { byte[] privateKeyBytes = Base64.getDecoder().decode(tppTokenProperties.getPrivateKey()); PKCS8EncodedKeySpec ks = new PKCS8EncodedKeySpec(privateKeyBytes); KeyFactory kf = KeyFactory.getInstance(tppTokenProperties.getSignAlgo()); return kf.generatePrivate(ks); }
3.68
framework_AbstractComponentTest_initializeComponents
/** * By default initializes just one instance of {@link #getTestClass()} using * {@link #constructComponent()}. */ @Override protected void initializeComponents() { component = constructComponent(); component.setId("testComponent"); addTestComponent(component); }
3.68
hbase_HRegion_updateCellTimestamps
/** * Replace any cell timestamps set to {@link org.apache.hadoop.hbase.HConstants#LATEST_TIMESTAMP} * provided current timestamp. */ private static void updateCellTimestamps(final Iterable<List<Cell>> cellItr, final byte[] now) throws IOException { for (List<Cell> cells : cellItr) { if (cells == null) continue; // Optimization: 'foreach' loop is not used. See: // HBASE-12023 HRegion.applyFamilyMapToMemstore creates too many iterator objects assert cells instanceof RandomAccess; int listSize = cells.size(); for (int i = 0; i < listSize; i++) { PrivateCellUtil.updateLatestStamp(cells.get(i), now); } } }
3.68
flink_MapValue_size
/* * (non-Javadoc) * @see java.util.Map#size() */ @Override public int size() { return this.map.size(); }
3.68
hbase_TableSchemaModel_toString
/* * (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{ NAME=> '"); sb.append(name); sb.append('\''); for (Map.Entry<QName, Object> e : attrs.entrySet()) { sb.append(", "); sb.append(e.getKey().getLocalPart()); sb.append(" => '"); sb.append(e.getValue().toString()); sb.append('\''); } sb.append(", COLUMNS => [ "); Iterator<ColumnSchemaModel> i = columns.iterator(); while (i.hasNext()) { ColumnSchemaModel family = i.next(); sb.append(family.toString()); if (i.hasNext()) { sb.append(','); } sb.append(' '); } sb.append("] }"); return sb.toString(); }
3.68
hadoop_Server_getHomeDir
/** * Returns the server home dir. * * @return the server home dir. */ public String getHomeDir() { return homeDir; }
3.68
hbase_RecoverableZooKeeper_multi
/** * Run multiple operations in a transactional manner. Retry before throwing exception */ public List<OpResult> multi(Iterable<Op> ops) throws KeeperException, InterruptedException { final Span span = TraceUtil.createSpan("RecoverableZookeeper.multi"); try (Scope ignored = span.makeCurrent()) { RetryCounter retryCounter = retryCounterFactory.create(); Iterable<Op> multiOps = prepareZKMulti(ops); while (true) { try { span.setStatus(StatusCode.OK); return checkZk().multi(multiOps); } catch (KeeperException e) { switch (e.code()) { case CONNECTIONLOSS: case OPERATIONTIMEOUT: case REQUESTTIMEOUT: TraceUtil.setError(span, e); retryOrThrow(retryCounter, e, "multi"); break; default: TraceUtil.setError(span, e); throw e; } } retryCounter.sleepUntilNextRetry(); } } finally { span.end(); } }
3.68
zxing_MinimalEncoder_getCodewordsRemaining
/** Returns the remaining capacity in codewords of the smallest symbol that has enough capacity to fit the given * minimal number of codewords. **/ int getCodewordsRemaining(int minimum) { return getMinSymbolSize(minimum) - minimum; }
3.68
zxing_AztecCode_getMatrix
/** * @return the symbol image */ public BitMatrix getMatrix() { return matrix; }
3.68
flink_BinarySegmentUtils_allocateReuseBytes
/** * Allocate bytes that is only for temporary usage, it should not be stored in somewhere else. * Use a {@link ThreadLocal} to reuse bytes to avoid overhead of byte[] new and gc. * * <p>If there are methods that can only accept a byte[], instead of a MemorySegment[] * parameter, we can allocate a reuse bytes and copy the MemorySegment data to byte[], then call * the method. Such as String deserialization. */ public static byte[] allocateReuseBytes(int length) { byte[] bytes = BYTES_LOCAL.get(); if (bytes == null) { if (length <= MAX_BYTES_LENGTH) { bytes = new byte[MAX_BYTES_LENGTH]; BYTES_LOCAL.set(bytes); } else { bytes = new byte[length]; } } else if (bytes.length < length) { bytes = new byte[length]; } return bytes; }
3.68
hadoop_DynoInfraUtils_waitForNameNodeReadiness
/** * Wait for the launched NameNode to be ready, i.e. to have at least 99% of * its DataNodes register, have fewer than 0.01% of its blocks missing, and * less than 1% of its blocks under replicated. Continues until the criteria * have been met or {@code shouldExit} returns true. * * @param nameNodeProperties The set of properties containing information * about the NameNode. * @param numTotalDataNodes Total expected number of DataNodes to register. * @param shouldExit Should return true iff this should stop waiting. * @param log Where to log information. */ static void waitForNameNodeReadiness(final Properties nameNodeProperties, int numTotalDataNodes, boolean triggerBlockReports, Supplier<Boolean> shouldExit, final Configuration conf, final Logger log) throws IOException, InterruptedException { if (shouldExit.get()) { return; } int minDataNodes = (int) (conf.getFloat(DATANODE_LIVE_MIN_FRACTION_KEY, DATANODE_LIVE_MIN_FRACTION_DEFAULT) * numTotalDataNodes); log.info(String.format( "Waiting for %d DataNodes to register with the NameNode...", minDataNodes)); waitForNameNodeJMXValue("Number of live DataNodes", FSNAMESYSTEM_STATE_JMX_QUERY, JMX_LIVE_NODE_COUNT, minDataNodes, numTotalDataNodes * 0.001, false, nameNodeProperties, shouldExit, log); final int totalBlocks = Integer.parseInt(fetchNameNodeJMXValue( nameNodeProperties, FSNAMESYSTEM_STATE_JMX_QUERY, JMX_BLOCKS_TOTAL)); final AtomicBoolean doneWaiting = new AtomicBoolean(false); if (triggerBlockReports) { // This will be significantly lower than the actual expected number of // blocks because it does not // take into account replication factor. However the block reports are // pretty binary; either a full // report has been received or it hasn't. Thus we don't mind the large // underestimate here. final int blockThreshold = totalBlocks / numTotalDataNodes * 2; // The Configuration object here is based on the host cluster, which may // have security enabled; we need to disable it to talk to the Dyno NN conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "simple"); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "false"); final DistributedFileSystem dfs = (DistributedFileSystem) FileSystem .get(getNameNodeHdfsUri(nameNodeProperties), conf); log.info("Launching thread to trigger block reports for Datanodes with <" + blockThreshold + " blocks reported"); Thread blockReportThread = new Thread(() -> { // Here we count both Missing and UnderReplicated within under // replicated long lastUnderRepBlocks = Long.MAX_VALUE; try { while (true) { // this will eventually exit via an interrupt try { Thread.sleep(TimeUnit.MINUTES.toMillis(1)); long underRepBlocks = Long .parseLong(fetchNameNodeJMXValue(nameNodeProperties, FSNAMESYSTEM_JMX_QUERY, JMX_MISSING_BLOCKS)) + Long.parseLong(fetchNameNodeJMXValue(nameNodeProperties, FSNAMESYSTEM_STATE_JMX_QUERY, JMX_UNDER_REPLICATED_BLOCKS)); long blockDecrease = lastUnderRepBlocks - underRepBlocks; lastUnderRepBlocks = underRepBlocks; if (blockDecrease < 0 || blockDecrease > (totalBlocks * 0.001)) { continue; } String liveNodeListString = fetchNameNodeJMXValue( nameNodeProperties, NAMENODE_INFO_JMX_QUERY, JMX_LIVE_NODES_LIST); Set<String> datanodesToReport = parseStaleDataNodeList( liveNodeListString, blockThreshold, log); if (datanodesToReport.isEmpty() && doneWaiting.get()) { log.info("BlockReportThread exiting; all DataNodes have " + "reported blocks"); break; } log.info("Queueing {} Datanodes for block report: {}", datanodesToReport.size(), Joiner.on(",").join(datanodesToReport)); DatanodeInfo[] datanodes = dfs.getDataNodeStats(); int cnt = 0; for (DatanodeInfo datanode : datanodes) { if (datanodesToReport.contains(datanode.getXferAddr(true))) { Thread.sleep(1); // to throw an interrupt if one is found triggerDataNodeBlockReport(conf, datanode.getIpcAddr(true)); cnt++; Thread.sleep(1000); } } if (cnt != datanodesToReport.size()) { log.warn("Found {} Datanodes to queue block reports for but " + "was only able to trigger {}", datanodesToReport.size(), cnt); } } catch (IOException ioe) { log.warn("Exception encountered in block report thread", ioe); } } } catch (InterruptedException ie) { // Do nothing; just exit } log.info("Block reporting thread exiting"); }); blockReportThread.setDaemon(true); blockReportThread .setUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); blockReportThread.start(); } float maxMissingBlocks = totalBlocks * conf.getFloat( MISSING_BLOCKS_MAX_FRACTION_KEY, MISSING_BLOCKS_MAX_FRACTION_DEFAULT); log.info("Waiting for MissingBlocks to fall below {}...", maxMissingBlocks); waitForNameNodeJMXValue("Number of missing blocks", FSNAMESYSTEM_JMX_QUERY, JMX_MISSING_BLOCKS, maxMissingBlocks, totalBlocks * 0.0001, true, nameNodeProperties, shouldExit, log); float maxUnderreplicatedBlocks = totalBlocks * conf.getFloat(UNDERREPLICATED_BLOCKS_MAX_FRACTION_KEY, UNDERREPLICATED_BLOCKS_MAX_FRACTION_DEFAULT); log.info("Waiting for UnderReplicatedBlocks to fall below {}...", maxUnderreplicatedBlocks); waitForNameNodeJMXValue("Number of under replicated blocks", FSNAMESYSTEM_STATE_JMX_QUERY, JMX_UNDER_REPLICATED_BLOCKS, maxUnderreplicatedBlocks, totalBlocks * 0.001, true, nameNodeProperties, shouldExit, log); log.info("NameNode is ready for use!"); doneWaiting.set(true); }
3.68
dubbo_ApplicationModel_getConsumerModel
/** * @deprecated ConsumerModel should fetch from context */ @Deprecated public static ConsumerModel getConsumerModel(String serviceKey) { return defaultModel().getDefaultModule().getServiceRepository().lookupReferredService(serviceKey); }
3.68
hbase_HFileWriterImpl_getMinimumMidpointArray
/** * Try to create a new byte array that falls between left and right as short as possible with * lexicographical order. * @return Return a new array that is between left and right and minimally sized else just return * null if left == right. */ private static byte[] getMinimumMidpointArray(ByteBuffer left, int leftOffset, int leftLength, ByteBuffer right, int rightOffset, int rightLength) { int minLength = leftLength < rightLength ? leftLength : rightLength; int diffIdx = 0; for (; diffIdx < minLength; diffIdx++) { int leftByte = ByteBufferUtils.toByte(left, leftOffset + diffIdx); int rightByte = ByteBufferUtils.toByte(right, rightOffset + diffIdx); if ((leftByte & 0xff) > (rightByte & 0xff)) { throw new IllegalArgumentException("Left byte array sorts after right row; left=" + ByteBufferUtils.toStringBinary(left, leftOffset, leftLength) + ", right=" + ByteBufferUtils.toStringBinary(right, rightOffset, rightLength)); } else if (leftByte != rightByte) { break; } } if (diffIdx == minLength) { if (leftLength > rightLength) { // right is prefix of left throw new IllegalArgumentException("Left byte array sorts after right row; left=" + ByteBufferUtils.toStringBinary(left, leftOffset, leftLength) + ", right=" + ByteBufferUtils.toStringBinary(right, rightOffset, rightLength)); } else if (leftLength < rightLength) { // left is prefix of right. byte[] minimumMidpointArray = new byte[minLength + 1]; ByteBufferUtils.copyFromBufferToArray(minimumMidpointArray, right, rightOffset, 0, minLength + 1); minimumMidpointArray[minLength] = 0x00; return minimumMidpointArray; } else { // left == right return null; } } // Note that left[diffIdx] can never be equal to 0xff since left < right byte[] minimumMidpointArray = new byte[diffIdx + 1]; ByteBufferUtils.copyFromBufferToArray(minimumMidpointArray, left, leftOffset, 0, diffIdx + 1); minimumMidpointArray[diffIdx] = (byte) (minimumMidpointArray[diffIdx] + 1); return minimumMidpointArray; }
3.68
flink_Tuple24_copy
/** * Shallow tuple copy. * * @return A new Tuple with the same fields as this. */ @Override @SuppressWarnings("unchecked") public Tuple24< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> copy() { return new Tuple24<>( this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8, this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16, this.f17, this.f18, this.f19, this.f20, this.f21, this.f22, this.f23); }
3.68
hudi_AbstractTableFileSystemView_fetchLatestFileSlices
/** * Default implementation for fetching latest file-slices for a partition path. */ Stream<FileSlice> fetchLatestFileSlices(String partitionPath) { return fetchAllStoredFileGroups(partitionPath).map(HoodieFileGroup::getLatestFileSlice).filter(Option::isPresent) .map(Option::get); }
3.68
hbase_MetricsMaster_incrementSnapshotFetchTime
/** * Sets the execution time to fetch the mapping of snapshots to originating table. */ public void incrementSnapshotFetchTime(long executionTime) { masterQuotaSource.incrementSnapshotObserverSnapshotFetchTime(executionTime); }
3.68
hmily_HmilyRepositoryStorage_createHmilyTransaction
/** * Create hmily transaction. * * @param hmilyTransaction the hmily transaction */ public static void createHmilyTransaction(final HmilyTransaction hmilyTransaction) { if (Objects.nonNull(hmilyTransaction)) { PUBLISHER.publishEvent(hmilyTransaction, EventTypeEnum.CREATE_HMILY_TRANSACTION.getCode()); } }
3.68
morf_SqlServerDialect_getForUpdateSql
/** * SQL server places a shared lock on a record when it is selected without doing anything else (no MVCC) * so no need to specify a lock mode. * * @see org.alfasoftware.morf.jdbc.SqlDialect#getForUpdateSql() * @see <a href="http://stackoverflow.com/questions/10935850/when-to-use-select-for-update">When to use select for update</a> */ @Override protected String getForUpdateSql() { return StringUtils.EMPTY; }
3.68
hadoop_RollingFileSystemSink_updateFlushTime
/** * Update the {@link #nextFlush} variable to the next flush time. Add * an integer number of flush intervals, preserving the initial random offset. * * @param now the current time */ @VisibleForTesting protected void updateFlushTime(Date now) { // In non-initial rounds, add an integer number of intervals to the last // flush until a time in the future is achieved, thus preserving the // original random offset. int millis = (int) (((now.getTime() - nextFlush.getTimeInMillis()) / rollIntervalMillis + 1) * rollIntervalMillis); nextFlush.add(Calendar.MILLISECOND, millis); }
3.68
hbase_SimpleRpcServer_start
/** Starts the service. Must be called before any calls will be handled. */ @Override public synchronized void start() { if (started) { return; } authTokenSecretMgr = createSecretManager(); if (authTokenSecretMgr != null) { // Start AuthenticationTokenSecretManager in synchronized way to avoid race conditions in // LeaderElector start. See HBASE-25875 synchronized (authTokenSecretMgr) { setSecretManager(authTokenSecretMgr); authTokenSecretMgr.start(); } } this.authManager = new ServiceAuthorizationManager(); HBasePolicyProvider.init(conf, authManager); responder.start(); listener.start(); scheduler.start(); started = true; }
3.68
hbase_BaseLoadBalancer_updateBalancerStatus
/** * Updates the balancer status tag reported to JMX */ @Override public void updateBalancerStatus(boolean status) { metricsBalancer.balancerStatus(status); }
3.68
framework_NativeButtonClick_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { return "Validate click event coordinates not erroneously returned as x=0, y=0"; }
3.68
morf_AbstractSqlDialectTest_expectedPreInsertStatementsNotInsertingUnderAutonumLimit
/** * @return The expected SQL statements to be run prior to insert for the test database table. */ protected List<String> expectedPreInsertStatementsNotInsertingUnderAutonumLimit() { return Collections.emptyList(); }
3.68
flink_HttpRequestHandler_checkAndCreateUploadDir
/** * Checks whether the given directory exists and is writable. If it doesn't exist this method * will attempt to create it. * * @param uploadDir directory to check * @throws IOException if the directory does not exist and cannot be created, or if the * directory isn't writable */ public static synchronized void checkAndCreateUploadDir(File uploadDir) throws IOException { if (uploadDir.exists() && uploadDir.canWrite()) { LOG.info("Using directory {} for web frontend JAR file uploads.", uploadDir); } else if (uploadDir.mkdirs() && uploadDir.canWrite()) { LOG.info("Created directory {} for web frontend JAR file uploads.", uploadDir); } else { LOG.warn( "Jar upload directory {} cannot be created or is not writable.", uploadDir.getAbsolutePath()); throw new IOException( String.format( "Jar upload directory %s cannot be created or is not writable.", uploadDir.getAbsolutePath())); } }
3.68
hbase_CompactionPipeline_flattenOneSegment
/** * If the caller holds the current version, go over the the pipeline and try to flatten each * segment. Flattening is replacing the ConcurrentSkipListMap based CellSet to CellArrayMap based. * Flattening of the segment that initially is not based on ConcurrentSkipListMap has no effect. * Return after one segment is successfully flatten. * @return true iff a segment was successfully flattened */ public boolean flattenOneSegment(long requesterVersion, CompactingMemStore.IndexType idxType, MemStoreCompactionStrategy.Action action) { if (requesterVersion != version) { LOG.warn("Segment flattening failed, because versions do not match. Requester version: " + requesterVersion + ", actual version: " + version); return false; } synchronized (pipeline) { if (requesterVersion != version) { LOG.warn("Segment flattening failed, because versions do not match"); return false; } int i = -1; for (ImmutableSegment s : pipeline) { i++; if (s.canBeFlattened()) { s.waitForUpdates(); // to ensure all updates preceding s in-memory flush have completed if (s.isEmpty()) { // after s.waitForUpdates() is called, there is no updates pending,if no cells in s, // we can skip it. continue; } // size to be updated MemStoreSizing newMemstoreAccounting = new NonThreadSafeMemStoreSizing(); ImmutableSegment newS = SegmentFactory.instance().createImmutableSegmentByFlattening( (CSLMImmutableSegment) s, idxType, newMemstoreAccounting, action); replaceAtIndex(i, newS); if (region != null) { // Update the global memstore size counter upon flattening there is no change in the // data size MemStoreSize mss = newMemstoreAccounting.getMemStoreSize(); region.addMemStoreSize(mss.getDataSize(), mss.getHeapSize(), mss.getOffHeapSize(), mss.getCellsCount()); } LOG.debug("Compaction pipeline segment {} flattened", s); return true; } } } // do not update the global memstore size counter and do not increase the version, // because all the cells remain in place return false; }
3.68
pulsar_ClientConfiguration_getTlsTrustCertsFilePath
/** * @return path to the trusted TLS certificate file */ public String getTlsTrustCertsFilePath() { return confData.getTlsTrustCertsFilePath(); }
3.68
hbase_HRegion_replayWALBulkLoadEventMarker
/** * @deprecated Since 3.0.0, will be removed in 4.0.0. Only for keep compatibility for old region * replica implementation. */ @Deprecated void replayWALBulkLoadEventMarker(WALProtos.BulkLoadDescriptor bulkLoadEvent) throws IOException { checkTargetRegion(bulkLoadEvent.getEncodedRegionName().toByteArray(), "BulkLoad marker from WAL ", bulkLoadEvent); if (ServerRegionReplicaUtil.isDefaultReplica(this.getRegionInfo())) { return; // if primary nothing to do } if (LOG.isDebugEnabled()) { LOG.debug(getRegionInfo().getEncodedName() + " : " + "Replaying bulkload event marker " + TextFormat.shortDebugString(bulkLoadEvent)); } // check if multiple families involved boolean multipleFamilies = false; byte[] family = null; for (StoreDescriptor storeDescriptor : bulkLoadEvent.getStoresList()) { byte[] fam = storeDescriptor.getFamilyName().toByteArray(); if (family == null) { family = fam; } else if (!Bytes.equals(family, fam)) { multipleFamilies = true; break; } } startBulkRegionOperation(multipleFamilies); try { // we will use writestate as a coarse-grain lock for all the replay events synchronized (writestate) { // Replication can deliver events out of order when primary region moves or the region // server crashes, since there is no coordination between replication of different wal files // belonging to different region servers. We have to safe guard against this case by using // region open event's seqid. Since this is the first event that the region puts (after // possibly flushing recovered.edits), after seeing this event, we can ignore every edit // smaller than this seqId if ( bulkLoadEvent.getBulkloadSeqNum() >= 0 && this.lastReplayedOpenRegionSeqId >= bulkLoadEvent.getBulkloadSeqNum() ) { LOG.warn(getRegionInfo().getEncodedName() + " : " + "Skipping replaying bulkload event :" + TextFormat.shortDebugString(bulkLoadEvent) + " because its sequence id is smaller than this region's lastReplayedOpenRegionSeqId" + " =" + lastReplayedOpenRegionSeqId); return; } for (StoreDescriptor storeDescriptor : bulkLoadEvent.getStoresList()) { // stores of primary may be different now family = storeDescriptor.getFamilyName().toByteArray(); HStore store = getStore(family); if (store == null) { LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a bulk load marker from primary, but the family is not found. " + "Ignoring. StoreDescriptor:" + storeDescriptor); continue; } List<String> storeFiles = storeDescriptor.getStoreFileList(); for (String storeFile : storeFiles) { StoreFileInfo storeFileInfo = null; try { storeFileInfo = fs.getStoreFileInfo(Bytes.toString(family), storeFile); store.bulkLoadHFile(storeFileInfo); } catch (FileNotFoundException ex) { LOG.warn(getRegionInfo().getEncodedName() + " : " + ((storeFileInfo != null) ? storeFileInfo.toString() : (new Path(Bytes.toString(family), storeFile)).toString()) + " doesn't exist any more. Skip loading the file"); } } } } if (bulkLoadEvent.getBulkloadSeqNum() > 0) { mvcc.advanceTo(bulkLoadEvent.getBulkloadSeqNum()); } } finally { closeBulkRegionOperation(); } }
3.68
hbase_SimpleRegionNormalizer_getMergeMinRegionSizeMb
/** * Return this instance's configured value for {@value #MERGE_MIN_REGION_SIZE_MB_KEY}. */ public long getMergeMinRegionSizeMb() { return normalizerConfiguration.getMergeMinRegionSizeMb(); }
3.68
hbase_DataBlockEncoding_getNameFromId
/** * Find and return the name of data block encoder for the given id. * @param encoderId id of data block encoder * @return name, same as used in options in column family */ public static String getNameFromId(short encoderId) { return getEncodingById(encoderId).toString(); }
3.68
hudi_DiskMap_addShutDownHook
/** * Register shutdown hook to force flush contents of the data written to FileOutputStream from OS page cache * (typically 4 KB) to disk. */ private void addShutDownHook() { shutdownThread = new Thread(this::cleanup); Runtime.getRuntime().addShutdownHook(shutdownThread); }
3.68
flink_StreamExecutionEnvironment_setStreamTimeCharacteristic
/** * Sets the time characteristic for all streams create from this environment, e.g., processing * time, event time, or ingestion time. * * <p>If you set the characteristic to IngestionTime of EventTime this will set a default * watermark update interval of 200 ms. If this is not applicable for your application you * should change it using {@link ExecutionConfig#setAutoWatermarkInterval(long)}. * * @param characteristic The time characteristic. * @deprecated In Flink 1.12 the default stream time characteristic has been changed to {@link * TimeCharacteristic#EventTime}, thus you don't need to call this method for enabling * event-time support anymore. Explicitly using processing-time windows and timers works in * event-time mode. If you need to disable watermarks, please use {@link * ExecutionConfig#setAutoWatermarkInterval(long)}. If you are using {@link * TimeCharacteristic#IngestionTime}, please manually set an appropriate {@link * WatermarkStrategy}. If you are using generic "time window" operations (for example {@link * org.apache.flink.streaming.api.datastream.KeyedStream#timeWindow(org.apache.flink.streaming.api.windowing.time.Time)} * that change behaviour based on the time characteristic, please use equivalent operations * that explicitly specify processing time or event time. */ @PublicEvolving @Deprecated public void setStreamTimeCharacteristic(TimeCharacteristic characteristic) { this.timeCharacteristic = Preconditions.checkNotNull(characteristic); if (characteristic == TimeCharacteristic.ProcessingTime) { getConfig().setAutoWatermarkInterval(0); } else { getConfig().setAutoWatermarkInterval(200); } }
3.68
pulsar_ResourceGroupService_resourceGroupGet
/** * Get a copy of the RG with the given name. */ public ResourceGroup resourceGroupGet(String resourceGroupName) { ResourceGroup retrievedRG = this.getResourceGroupInternal(resourceGroupName); if (retrievedRG == null) { return null; } // Return a copy. return new ResourceGroup(retrievedRG); }
3.68
dubbo_ServiceAnnotationPostProcessor_processAnnotatedBeanDefinition
/** * process @DubboService at java-config @bean method * <pre class="code"> * &#064;Configuration * public class ProviderConfig { * * &#064;Bean * &#064;DubboService(group="demo", version="1.2.3") * public DemoService demoService() { * return new DemoServiceImpl(); * } * * } * </pre> * @param refServiceBeanName * @param refServiceBeanDefinition * @param attributes */ private void processAnnotatedBeanDefinition( String refServiceBeanName, AnnotatedBeanDefinition refServiceBeanDefinition, Map<String, Object> attributes) { Map<String, Object> serviceAnnotationAttributes = new LinkedHashMap<>(attributes); // get bean class from return type String returnTypeName = SpringCompatUtils.getFactoryMethodReturnType(refServiceBeanDefinition); Class<?> beanClass = resolveClassName(returnTypeName, classLoader); String serviceInterface = resolveInterfaceName(serviceAnnotationAttributes, beanClass); // ServiceBean Bean name String serviceBeanName = generateServiceBeanName(serviceAnnotationAttributes, serviceInterface); AbstractBeanDefinition serviceBeanDefinition = buildServiceBeanDefinition(serviceAnnotationAttributes, serviceInterface, refServiceBeanName); // set id serviceBeanDefinition.getPropertyValues().add(Constants.ID, serviceBeanName); registerServiceBeanDefinition(serviceBeanName, serviceBeanDefinition, serviceInterface); }
3.68
dubbo_MetricsSupport_dec
/** * Dec method num */ public static void dec( MetricsKey metricsKey, MetricsPlaceValue placeType, MethodMetricsCollector<TimeCounterEvent> collector, MetricsEvent event) { collector.increment( event.getAttachmentValue(METHOD_METRICS), new MetricsKeyWrapper(metricsKey, placeType), -SELF_INCREMENT_SIZE); }
3.68
flink_ExecutionEnvironment_execute
/** * Triggers the program execution. The environment will execute all parts of the program that * have resulted in a "sink" operation. Sink operations are for example printing results ({@link * DataSet#print()}, writing results (e.g. {@link DataSet#writeAsText(String)}, {@link * DataSet#write(org.apache.flink.api.common.io.FileOutputFormat, String)}, or other generic * data sinks created with {@link DataSet#output(org.apache.flink.api.common.io.OutputFormat)}. * * <p>The program execution will be logged and displayed with the given job name. * * @return The result of the job execution, containing elapsed time and accumulators. * @throws Exception Thrown, if the program executions fails. */ public JobExecutionResult execute(String jobName) throws Exception { final JobClient jobClient = executeAsync(jobName); try { if (configuration.getBoolean(DeploymentOptions.ATTACHED)) { lastJobExecutionResult = jobClient.getJobExecutionResult().get(); } else { lastJobExecutionResult = new DetachedJobExecutionResult(jobClient.getJobID()); } jobListeners.forEach( jobListener -> jobListener.onJobExecuted(lastJobExecutionResult, null)); } catch (Throwable t) { // get() on the JobExecutionResult Future will throw an ExecutionException. This // behaviour was largely not there in Flink versions before the PipelineExecutor // refactoring so we should strip that exception. Throwable strippedException = ExceptionUtils.stripExecutionException(t); jobListeners.forEach( jobListener -> { jobListener.onJobExecuted(null, strippedException); }); ExceptionUtils.rethrowException(strippedException); } return lastJobExecutionResult; }
3.68
flink_YarnClusterDescriptor_getYarnSessionClusterEntrypoint
/** * The class to start the application master with. This class runs the main method in case of * session cluster. */ protected String getYarnSessionClusterEntrypoint() { return YarnSessionClusterEntrypoint.class.getName(); }
3.68
hbase_RegionSplitCalculator_calcCoverage
/** * Generates a coverage multimap from split key to Regions that start with the split key. * @return coverage multimap */ public Multimap<byte[], R> calcCoverage() { // This needs to be sorted to force the use of the comparator on the values, // otherwise byte array comparison isn't used Multimap<byte[], R> regions = TreeMultimap.create(BYTES_COMPARATOR, rangeCmp); // march through all splits from the start points for (Entry<byte[], Collection<R>> start : starts.asMap().entrySet()) { byte[] key = start.getKey(); for (R r : start.getValue()) { regions.put(key, r); for (byte[] coveredSplit : splits.subSet(r.getStartKey(), specialEndKey(r))) { regions.put(coveredSplit, r); } } } return regions; }
3.68
flink_StandaloneResourceManagerFactory_getConfigurationWithoutResourceLimitationIfSet
/** * Get the configuration for standalone ResourceManager, overwrite invalid configs. * * @param configuration configuration object * @return the configuration for standalone ResourceManager */ @VisibleForTesting public static Configuration getConfigurationWithoutResourceLimitationIfSet( Configuration configuration) { final Configuration copiedConfig = new Configuration(configuration); removeResourceLimitationConfig(copiedConfig); return copiedConfig; }
3.68
zxing_BufferedImageLuminanceSource_isRotateSupported
/** * This is always true, since the image is a gray-scale image. * * @return true */ @Override public boolean isRotateSupported() { return true; }
3.68
hbase_ConfigurationManager_notifyAllObservers
/** * The conf object has been repopulated from disk, and we have to notify all the observers that * are expressed interest to do that. */ public void notifyAllObservers(Configuration conf) { LOG.info("Starting to notify all observers that config changed."); synchronized (configurationObservers) { for (ConfigurationObserver observer : configurationObservers) { try { if (observer != null) { observer.onConfigurationChange(conf); } } catch (Throwable t) { LOG.error("Encountered a throwable while notifying observers: of type : {}({})", observer.getClass().getCanonicalName(), observer, t); } } } }
3.68
hbase_BinaryPrefixComparator_parseFrom
/** * Parse a serialized representation of {@link BinaryPrefixComparator} * @param pbBytes A pb serialized {@link BinaryPrefixComparator} instance * @return An instance of {@link BinaryPrefixComparator} made from <code>bytes</code> * @throws DeserializationException if an error occurred * @see #toByteArray */ public static BinaryPrefixComparator parseFrom(final byte[] pbBytes) throws DeserializationException { ComparatorProtos.BinaryPrefixComparator proto; try { proto = ComparatorProtos.BinaryPrefixComparator.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } return new BinaryPrefixComparator(proto.getComparable().getValue().toByteArray()); }
3.68
hadoop_And_apply
/** * Applies child expressions to the {@link PathData} item. If all pass then * returns {@link Result#PASS} else returns the result of the first * non-passing expression. */ @Override public Result apply(PathData item, int depth) throws IOException { Result result = Result.PASS; for (Expression child : getChildren()) { Result childResult = child.apply(item, -1); result = result.combine(childResult); if (!result.isPass()) { return result; } } return result; }
3.68
framework_BootstrapResponse_getBootstrapHandler
/** * Gets the bootstrap handler that fired this event. * * @return the bootstrap handler that fired this event */ public BootstrapHandler getBootstrapHandler() { return (BootstrapHandler) getSource(); }
3.68
framework_GridDragSourceConnector_dragMultipleRows
/** * Tells if multiple rows are dragged. Returns true if multiple selection is * allowed and a selected row is dragged. * * @param draggedRow * Data of dragged row. * @return {@code true} if multiple rows are dragged, {@code false} * otherwise. */ private boolean dragMultipleRows(JsonObject draggedRow) { SelectionModel<JsonObject> selectionModel = getGrid() .getSelectionModel(); return selectionModel.isSelectionAllowed() && selectionModel instanceof MultiSelectionModelConnector.MultiSelectionModel && selectionModel.isSelected(draggedRow); }
3.68
dubbo_EnvironmentUtils_filterDubboProperties
/** * Filters Dubbo Properties from {@link ConfigurableEnvironment} * * @param environment {@link ConfigurableEnvironment} * @return Read-only SortedMap */ public static SortedMap<String, String> filterDubboProperties(ConfigurableEnvironment environment) { SortedMap<String, String> dubboProperties = new TreeMap<>(); Map<String, Object> properties = extractProperties(environment); for (Map.Entry<String, Object> entry : properties.entrySet()) { String propertyName = entry.getKey(); if (propertyName.startsWith(DUBBO_PREFIX + PROPERTY_NAME_SEPARATOR) && entry.getValue() != null) { dubboProperties.put( propertyName, environment.resolvePlaceholders(entry.getValue().toString())); } } return Collections.unmodifiableSortedMap(dubboProperties); }
3.68
rocketmq-connect_WorkerTask_initialize
/** * Initialize the task for execution. * * @param taskConfig initial configuration */ protected void initialize(ConnectKeyValue taskConfig) { // NO-op }
3.68
hudi_HoodieHFileDataBlock_lookupRecords
// TODO abstract this w/in HoodieDataBlock @Override protected <T> ClosableIterator<HoodieRecord<T>> lookupRecords(List<String> sortedKeys, boolean fullKey) throws IOException { HoodieLogBlockContentLocation blockContentLoc = getBlockContentLocation().get(); // NOTE: It's important to extend Hadoop configuration here to make sure configuration // is appropriately carried over Configuration inlineConf = FSUtils.buildInlineConf(blockContentLoc.getHadoopConf()); Path inlinePath = InLineFSUtils.getInlineFilePath( blockContentLoc.getLogFile().getPath(), blockContentLoc.getLogFile().getPath().toUri().getScheme(), blockContentLoc.getContentPositionInLogFile(), blockContentLoc.getBlockSize()); try (final HoodieAvroHFileReader reader = new HoodieAvroHFileReader(inlineConf, inlinePath, new CacheConfig(inlineConf), inlinePath.getFileSystem(inlineConf), Option.of(getSchemaFromHeader()))) { // Get writer's schema from the header final ClosableIterator<HoodieRecord<IndexedRecord>> recordIterator = fullKey ? reader.getRecordsByKeysIterator(sortedKeys, readerSchema) : reader.getRecordsByKeyPrefixIterator(sortedKeys, readerSchema); return new CloseableMappingIterator<>(recordIterator, data -> (HoodieRecord<T>) data); } }
3.68
hadoop_AzureBlobFileSystem_commitSingleFileByRename
/** * Perform the rename. * This will be rate limited, as well as able to recover * from rename errors if the etag was passed in. * @param source path to source file * @param dest destination of rename. * @param sourceEtag etag of source file. may be null or empty * @return the outcome of the operation * @throws IOException any rename failure which was not recovered from. */ public Pair<Boolean, Duration> commitSingleFileByRename( final Path source, final Path dest, @Nullable final String sourceEtag) throws IOException { LOG.debug("renameFileWithEtag source: {} dest: {} etag {}", source, dest, sourceEtag); statIncrement(CALL_RENAME); trailingPeriodCheck(dest); Path qualifiedSrcPath = makeQualified(source); Path qualifiedDstPath = makeQualified(dest); TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.RENAME, true, tracingHeaderFormat, listener); if (qualifiedSrcPath.equals(qualifiedDstPath)) { // rename to itself is forbidden throw new PathIOException(qualifiedSrcPath.toString(), "cannot rename object onto self"); } // acquire one IO permit final Duration waitTime = rateLimiting.acquire(1); try { final boolean recovered = abfsStore.rename(qualifiedSrcPath, qualifiedDstPath, tracingContext, sourceEtag); return Pair.of(recovered, waitTime); } catch (AzureBlobFileSystemException ex) { LOG.debug("Rename operation failed. ", ex); checkException(source, ex); // never reached return null; } }
3.68
dubbo_PojoUtils_getKeyTypeForMap
/** * Get key type for {@link Map} directly implemented by {@code clazz}. * If {@code clazz} does not implement {@link Map} directly, return {@code null}. * * @param clazz {@link Class} * @return Return String.class for {@link com.alibaba.fastjson.JSONObject} */ private static Type getKeyTypeForMap(Class<?> clazz) { Type[] interfaces = clazz.getGenericInterfaces(); if (!ArrayUtils.isEmpty(interfaces)) { for (Type type : interfaces) { if (type instanceof ParameterizedType) { ParameterizedType t = (ParameterizedType) type; if ("java.util.Map".equals(t.getRawType().getTypeName())) { return t.getActualTypeArguments()[0]; } } } } return null; }
3.68
hadoop_RecurrenceId_getPipelineId
/** * Return the pipelineId for the pipeline jobs. * * @return the pipelineId. */ public final String getPipelineId() { return pipelineId; }
3.68
framework_VTabsheet_onClose
/** * Handles a request to close this tab. Closability should be checked * before calling this method. The close request will be delivered to * the server, where the actual closing is handled. * * @see #isClosable() */ public void onClose() { closeHandler.onClose(new VCloseEvent(this)); }
3.68
framework_DownloadStream_getContentDispositionFilename
/** * Returns the filename formatted for inclusion in a Content-Disposition * header. Includes both a plain version of the name and a UTF-8 version * * @since 7.4.8 * @param filename * The filename to include * @return A value for inclusion in a Content-Disposition header */ public static String getContentDispositionFilename(String filename) { String encodedFilename = EncodeUtil.rfc5987Encode(filename); return String.format("filename=\"%s\"; filename*=utf-8''%s", encodedFilename, encodedFilename); }
3.68
hbase_ZKServerTool_main
/** * Run the tool. * @param args Command line arguments. */ public static void main(String[] args) { for (ServerName server : readZKNodes(HBaseConfiguration.create())) { // bin/zookeeper.sh relies on the "ZK host" string for grepping which is case sensitive. System.out.println("ZK host: " + server.getHostname()); } }
3.68
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperations5
/** * Tests that expression builder produces an output with brackets if a second * operand is Math operation. */ @Test public void shouldGenerateCorrectSqlForMathOperations5() { String result = testDialect.getSqlFrom(field("a").multiplyBy(field("b").plus(field("c")))); assertEquals(expectedSqlForMathOperations5(), result); }
3.68
hbase_WALEdit_getBulkLoadDescriptor
/** * Deserialized and returns a BulkLoadDescriptor from the passed in Cell * @param cell the key value * @return deserialized BulkLoadDescriptor or null. */ public static WALProtos.BulkLoadDescriptor getBulkLoadDescriptor(Cell cell) throws IOException { return CellUtil.matchingColumn(cell, METAFAMILY, BULK_LOAD) ? WALProtos.BulkLoadDescriptor.parseFrom(CellUtil.cloneValue(cell)) : null; }
3.68
framework_VScrollTable_initializeRows
/** For internal use only. May be removed or replaced in the future. */ public void initializeRows(UIDL uidl, UIDL rowData) { if (scrollBody != null) { scrollBody.removeFromParent(); } // Without this call the scroll position is messed up in IE even after // the lazy scroller has set the scroll position to the first visible // item int pos = scrollBodyPanel.getScrollPosition(); // Reset first row in view port so client requests correct last row. if (pos == 0) { firstRowInViewPort = 0; } scrollBody = createScrollBody(); scrollBody.renderInitialRows(rowData, uidl.getIntAttribute("firstrow"), uidl.getIntAttribute("rows")); scrollBodyPanel.add(scrollBody); initialContentReceived = true; sizeNeedsInit = true; scrollBody.restoreRowVisibility(); }
3.68
pulsar_FunctionRuntimeManager_findFunctionAssignment
/** * Find a assignment of a function. * @param tenant the tenant the function belongs to * @param namespace the namespace the function belongs to * @param functionName the function name * @return the assignment of the function */ public synchronized Assignment findFunctionAssignment(String tenant, String namespace, String functionName, int instanceId) { return this.findAssignment(tenant, namespace, functionName, instanceId); }
3.68
graphhopper_BikeCommonPriorityParser_collect
/** * @param weightToPrioMap associate a weight with every priority. This sorted map allows * subclasses to 'insert' more important priorities as well as overwrite determined priorities. */ void collect(ReaderWay way, double wayTypeSpeed, TreeMap<Double, PriorityCode> weightToPrioMap) { String highway = way.getTag("highway"); if (isDesignated(way)) { if ("path".equals(highway)) weightToPrioMap.put(100d, VERY_NICE); else weightToPrioMap.put(100d, PREFER); } if ("cycleway".equals(highway)) { if (way.hasTag("foot", intendedValues) && !way.hasTag("segregated", "yes")) weightToPrioMap.put(100d, PREFER); else weightToPrioMap.put(100d, VERY_NICE); } double maxSpeed = Math.max(getMaxSpeed(way, false), getMaxSpeed(way, true)); if (preferHighwayTags.contains(highway) || (isValidSpeed(maxSpeed) && maxSpeed <= 30)) { if (!isValidSpeed(maxSpeed) || maxSpeed < avoidSpeedLimit) { weightToPrioMap.put(40d, PREFER); if (way.hasTag("tunnel", intendedValues)) weightToPrioMap.put(40d, UNCHANGED); } } else if (avoidHighwayTags.containsKey(highway) || isValidSpeed(maxSpeed) && maxSpeed >= avoidSpeedLimit && !"track".equals(highway)) { PriorityCode priorityCode = avoidHighwayTags.get(highway); weightToPrioMap.put(50d, priorityCode == null ? AVOID : priorityCode); if (way.hasTag("tunnel", intendedValues)) { PriorityCode worse = priorityCode == null ? BAD : priorityCode.worse().worse(); weightToPrioMap.put(50d, worse == EXCLUDE ? REACH_DESTINATION : worse); } } String cycleway = way.getFirstPriorityTag(Arrays.asList("cycleway", "cycleway:left", "cycleway:right", "cycleway:both")); if (Arrays.asList("lane", "opposite_track", "shared_lane", "share_busway", "shoulder").contains(cycleway)) { weightToPrioMap.put(100d, SLIGHT_PREFER); } else if ("track".equals(cycleway)) { weightToPrioMap.put(100d, PREFER); } if (way.hasTag("bicycle", "use_sidepath")) { weightToPrioMap.put(100d, REACH_DESTINATION); } if (pushingSectionsHighways.contains(highway) || "parking_aisle".equals(way.getTag("service"))) { PriorityCode pushingSectionPrio = SLIGHT_AVOID; if (way.hasTag("bicycle", "yes") || way.hasTag("bicycle", "permissive")) pushingSectionPrio = PREFER; if (isDesignated(way) && (!way.hasTag("highway", "steps"))) pushingSectionPrio = VERY_NICE; if (way.hasTag("foot", "yes")) { pushingSectionPrio = pushingSectionPrio.worse(); if (way.hasTag("segregated", "yes")) pushingSectionPrio = pushingSectionPrio.better(); } if (way.hasTag("highway", "steps")) { pushingSectionPrio = BAD; } weightToPrioMap.put(100d, pushingSectionPrio); } if (way.hasTag("railway", "tram")) weightToPrioMap.put(50d, AVOID_MORE); if (way.hasTag("lcn", "yes")) weightToPrioMap.put(100d, PREFER); String classBicycleValue = way.getTag(classBicycleKey); if (classBicycleValue != null) { // We assume that humans are better in classifying preferences compared to our algorithm above -> weight = 100 weightToPrioMap.put(100d, convertClassValueToPriority(classBicycleValue)); } else { String classBicycle = way.getTag("class:bicycle"); if (classBicycle != null) weightToPrioMap.put(100d, convertClassValueToPriority(classBicycle)); } // Increase the priority for scenic routes or in case that maxspeed limits our average speed as compensation. See #630 if (way.hasTag("scenic", "yes") || maxSpeed > 0 && maxSpeed <= wayTypeSpeed) { PriorityCode lastEntryValue = weightToPrioMap.lastEntry().getValue(); if (lastEntryValue.getValue() < BEST.getValue()) weightToPrioMap.put(110d, lastEntryValue.better()); } }
3.68
dubbo_ApplicationModel_getProviderModel
/** * @deprecated use {@link FrameworkServiceRepository#lookupExportedService(String)} */ @Deprecated public static ProviderModel getProviderModel(String serviceKey) { return defaultModel().getDefaultModule().getServiceRepository().lookupExportedService(serviceKey); }
3.68
framework_VFilterSelect_asFraction
/** * Returns the percentage value as a fraction, e.g. 42% -> 0.42 * * @param percentage */ private float asFraction(String percentage) { String trimmed = percentage.trim(); String withoutPercentSign = trimmed.substring(0, trimmed.length() - 1); float asFraction = Float.parseFloat(withoutPercentSign) / 100; return asFraction; }
3.68
hadoop_TimelineMetricCalculator_compare
/** * Compare two not-null numbers. * @param n1 Number n1 * @param n2 Number n2 * @return 0 if n1 equals n2, a negative int if n1 is less than n2, a * positive int otherwise. */ public static int compare(Number n1, Number n2) { if (n1 == null || n2 == null) { throw new YarnRuntimeException( "Number to be compared shouldn't be null."); } if (n1 instanceof Integer || n1 instanceof Long) { if (n1.longValue() == n2.longValue()) { return 0; } else { return (n1.longValue() < n2.longValue()) ? -1 : 1; } } if (n1 instanceof Float || n1 instanceof Double) { if (n1.doubleValue() == n2.doubleValue()) { return 0; } else { return (n1.doubleValue() < n2.doubleValue()) ? -1 : 1; } } // TODO throw warnings/exceptions for other types of number. throw new YarnRuntimeException("Unsupported types for number comparison: " + n1.getClass().getName() + ", " + n2.getClass().getName()); }
3.68
hbase_CommonFSUtils_getFilePermissions
/** * Get the file permissions specified in the configuration, if they are enabled. * @param fs filesystem that the file will be created on. * @param conf configuration to read for determining if permissions are enabled and * which to use * @param permssionConfKey property key in the configuration to use when finding the permission * @return the permission to use when creating a new file on the fs. If special permissions are * not specified in the configuration, then the default permissions on the the fs will be * returned. */ public static FsPermission getFilePermissions(final FileSystem fs, final Configuration conf, final String permssionConfKey) { boolean enablePermissions = conf.getBoolean(HConstants.ENABLE_DATA_FILE_UMASK, false); if (enablePermissions) { try { FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS); // make sure that we have a mask, if not, go default. String mask = conf.get(permssionConfKey); if (mask == null) { return FsPermission.getFileDefault(); } // appy the umask FsPermission umask = new FsPermission(mask); return perm.applyUMask(umask); } catch (IllegalArgumentException e) { LOG.warn("Incorrect umask attempted to be created: " + conf.get(permssionConfKey) + ", using default file permissions.", e); return FsPermission.getFileDefault(); } } return FsPermission.getFileDefault(); }
3.68
flink_HiveDDLUtils_noValidateConstraint
// returns a constraint trait that doesn't require VALIDATE public static byte noValidateConstraint(byte trait) { return (byte) (trait & (~HIVE_CONSTRAINT_VALIDATE)); }
3.68
hudi_BaseHoodieWriteClient_initMetadataTable
/** * Bootstrap the metadata table. * * @param instantTime current inflight instant time */ protected void initMetadataTable(Option<String> instantTime) { // by default do nothing. }
3.68
hadoop_TaskTrackerInfo_getBlacklistReport
/** * Gets a descriptive report about why the tasktracker was blacklisted. * * @return report describing why the tasktracker was blacklisted. */ public String getBlacklistReport() { return blacklistReport; }
3.68
flink_NettyShuffleMaster_computeShuffleMemorySizeForTask
/** * JM announces network memory requirement from the calculating result of this method. Please * note that the calculating algorithm depends on both I/O details of a vertex and network * configuration, e.g. {@link NettyShuffleEnvironmentOptions#NETWORK_BUFFERS_PER_CHANNEL} and * {@link NettyShuffleEnvironmentOptions#NETWORK_EXTRA_BUFFERS_PER_GATE}, which means we should * always keep the consistency of configurations between JM, RM and TM in fine-grained resource * management, thus to guarantee that the processes of memory announcing and allocating respect * each other. */ @Override public MemorySize computeShuffleMemorySizeForTask(TaskInputsOutputsDescriptor desc) { checkNotNull(desc); int numRequiredNetworkBuffers = NettyShuffleUtils.computeNetworkBuffersForAnnouncing( buffersPerInputChannel, floatingBuffersPerGate, maxRequiredBuffersPerGate, sortShuffleMinParallelism, sortShuffleMinBuffers, desc.getInputChannelNums(), desc.getPartitionReuseCount(), desc.getSubpartitionNums(), desc.getInputPartitionTypes(), desc.getPartitionTypes()); return new MemorySize((long) networkBufferSize * numRequiredNetworkBuffers); }
3.68
hbase_CellFlatMap_firstKey
// -------------------------------- Key's getters -------------------------------- @Override public Cell firstKey() { if (isEmpty()) { return null; } return descending ? getCell(maxCellIdx - 1) : getCell(minCellIdx); }
3.68
framework_DataProvider_withConfigurableFilter
/** * Wraps this data provider to create a data provider that supports * programmatically setting a filter but no filtering through the query. * * @see #withConfigurableFilter(SerializableBiFunction) * @see ConfigurableFilterDataProvider#setFilter(Object) * * @return a data provider with a configurable filter, not <code>null</code> */ public default ConfigurableFilterDataProvider<T, Void, F> withConfigurableFilter() { return withConfigurableFilter((queryFilter, configuredFilter) -> { assert queryFilter == null : "Filter from Void query must be null"; return configuredFilter; }); }
3.68
framework_WidgetSet_createConnector
/** * Create an uninitialized connector that best matches given UIDL. The * connector must implement {@link ServerConnector}. * * @param tag * connector type tag for the connector to create * @param conf * the application configuration to use when creating the * connector * * @return New uninitialized and unregistered connector that can paint given * UIDL. */ public ServerConnector createConnector(int tag, ApplicationConfiguration conf) { /* * Yes, this (including the generated code in WidgetMap) may look very * odd code, but due the nature of GWT, we cannot do this any cleaner. * Luckily this is mostly written by WidgetSetGenerator, here are just * some hacks. Extra instantiation code is needed if client side * connector has no "native" counterpart on client side. */ Profiler.enter("WidgetSet.createConnector"); Class<? extends ServerConnector> classType = resolveInheritedConnectorType( conf, tag); try { if (classType == null || classType == UnknownComponentConnector.class || classType == UnknownExtensionConnector.class) { String serverSideName = conf .getUnknownServerClassNameByTag(tag); if (classType == UnknownExtensionConnector.class) { // Display message in the console for non-visual connectors getLogger().severe(UnknownComponentConnector .createMessage(serverSideName)); return GWT.create(UnknownExtensionConnector.class); } else { UnknownComponentConnector c = GWT .create(UnknownComponentConnector.class); // Set message to be shown in a widget for visual connectors c.setServerSideClassName(serverSideName); return c; } } else { /* * let the auto generated code instantiate this type */ ServerConnector connector = (ServerConnector) TypeData .getType(classType).createInstance(); connector.setTag(tag); return connector; } } catch (NoDataException e) { throw new IllegalStateException("There is no information about " + classType + ". Did you remember to compile the right widgetset?", e); } finally { Profiler.leave("WidgetSet.createConnector"); } }
3.68
morf_XmlDataSetProducer_views
/** * @see org.alfasoftware.morf.metadata.Schema#views() */ @Override public Collection<View> views() { return Collections.emptySet(); }
3.68
morf_Function_sumDistinct
/** * Helper method to create an instance of the "sum(distinct)" SQL function. * * @param fieldToEvaluate the field to evaluate in the sum function. This can be any expression resulting in a single column of data. * @return an instance of the sum function */ public static Function sumDistinct(AliasedField fieldToEvaluate) { return new Function(FunctionType.SUM_DISTINCT, fieldToEvaluate); }
3.68
hbase_ScannerContext_setTimeLimitScope
/** * @param scope The scope in which the time limit will be enforced */ void setTimeLimitScope(LimitScope scope) { limits.setTimeScope(scope); }
3.68
framework_TreeGrid_setHierarchyColumn
/** * Set the column that displays the hierarchy of this grid's data. By * default the hierarchy will be displayed in the first column. * <p> * Setting a hierarchy column by calling this method also sets the column to * be visible and not hidable. * <p> * <strong>Note:</strong> Changing the Renderer of the hierarchy column is * not supported. * * @see Column#setId(String) * * @param id * id of the column to use for displaying hierarchy */ public void setHierarchyColumn(String id) { Objects.requireNonNull(id, "id may not be null"); if (getColumn(id) == null) { throw new IllegalArgumentException("No column found for given id"); } setHierarchyColumn(getColumn(id)); }
3.68
open-banking-gateway_EncryptionProviderConfig_consentAuthEncryptionProvider
/** * Consent authorization flow encryption. * @param specSecretKeyConfig Secret key based encryption configuration. * @return Consent Authorization encryption */ @Bean ConsentAuthorizationEncryptionServiceProvider consentAuthEncryptionProvider(ConsentSpecSecretKeyConfig specSecretKeyConfig) { return new ConsentAuthorizationEncryptionServiceProvider( new EncryptionWithInitVectorOper(specSecretKeyConfig) ); }
3.68
framework_AbstractComponent_getWidth
/* * (non-Javadoc) * * @see com.vaadin.server.Sizeable#getWidth() */ @Override public float getWidth() { return width; }
3.68