name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
pulsar_PulsarClientImpl_newPartitionedProducerImpl
/** * Factory method for creating PartitionedProducerImpl instance. * * Allows overriding the PartitionedProducerImpl instance in tests. * * @param topic topic name * @param conf producer configuration * @param schema topic schema * @param interceptors producer interceptors * @param producerCreatedFuture future for signaling completion of async producer creation * @param metadata partitioned topic metadata * @param <T> message type class * @return new PartitionedProducerImpl instance */ protected <T> PartitionedProducerImpl<T> newPartitionedProducerImpl(String topic, ProducerConfigurationData conf, Schema<T> schema, ProducerInterceptors interceptors, CompletableFuture<Producer<T>> producerCreatedFuture, PartitionedTopicMetadata metadata) { return new PartitionedProducerImpl<>(PulsarClientImpl.this, topic, conf, metadata.partitions, producerCreatedFuture, schema, interceptors); }
3.68
hbase_BinaryPrefixComparator_areSerializedFieldsEqual
/** * Returns true if and only if the fields of the comparator that are serialized are equal to the * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { if (other == this) { return true; } if (!(other instanceof BinaryPrefixComparator)) { return false; } return super.areSerializedFieldsEqual(other); }
3.68
flink_StreamCompressionDecorator_decorateWithCompression
/** * IMPORTANT: For streams returned by this method, {@link InputStream#close()} is not propagated * to the inner stream. The inner stream must be closed separately. * * @param stream the stream to decorate. * @return an input stream that is decorated by the compression scheme. */ public final InputStream decorateWithCompression(InputStream stream) throws IOException { return decorateWithCompression(new NonClosingInputStreamDecorator(stream)); }
3.68
dubbo_StringUtils_replace
/** * <p>Replaces a String with another String inside a larger String, * for the first {@code max} values of the search String.</p> * * <p>A {@code null} reference passed to this method is a no-op.</p> * * <pre> * StringUtils.replace(null, *, *, *) = null * StringUtils.replace("", *, *, *) = "" * StringUtils.replace("any", null, *, *) = "any" * StringUtils.replace("any", *, null, *) = "any" * StringUtils.replace("any", "", *, *) = "any" * StringUtils.replace("any", *, *, 0) = "any" * StringUtils.replace("abaa", "a", null, -1) = "abaa" * StringUtils.replace("abaa", "a", "", -1) = "b" * StringUtils.replace("abaa", "a", "z", 0) = "abaa" * StringUtils.replace("abaa", "a", "z", 1) = "zbaa" * StringUtils.replace("abaa", "a", "z", 2) = "zbza" * StringUtils.replace("abaa", "a", "z", -1) = "zbzz" * </pre> * * @param text text to search and replace in, may be null * @param searchString the String to search for, may be null * @param replacement the String to replace it with, may be null * @param max maximum number of values to replace, or {@code -1} if no maximum * @return the text with any replacements processed, * {@code null} if null String input */ public static String replace(final String text, final String searchString, final String replacement, int max) { if (isAnyEmpty(text, searchString) || replacement == null || max == 0) { return text; } int start = 0; int end = text.indexOf(searchString, start); if (end == INDEX_NOT_FOUND) { return text; } final int replLength = searchString.length(); int increase = replacement.length() - replLength; increase = increase < 0 ? 0 : increase; increase *= max < 0 ? 16 : max > 64 ? 64 : max; final StringBuilder buf = new StringBuilder(text.length() + increase); while (end != INDEX_NOT_FOUND) { buf.append(text, start, end).append(replacement); start = end + replLength; if (--max == 0) { break; } end = text.indexOf(searchString, start); } buf.append(text.substring(start)); return buf.toString(); }
3.68
flink_CoGroupedStreams_apply
/** * Completes the co-group operation with the user function that is executed for windowed * groups. * * <p>Note: This method's return type does not support setting an operator-specific * parallelism. Due to binary backwards compatibility, this cannot be altered. Use the * {@link #with(CoGroupFunction, TypeInformation)} method to set an operator-specific * parallelism. */ public <T> DataStream<T> apply( CoGroupFunction<T1, T2, T> function, TypeInformation<T> resultType) { // clean the closure function = input1.getExecutionEnvironment().clean(function); UnionTypeInfo<T1, T2> unionType = new UnionTypeInfo<>(input1.getType(), input2.getType()); UnionKeySelector<T1, T2, KEY> unionKeySelector = new UnionKeySelector<>(keySelector1, keySelector2); SingleOutputStreamOperator<TaggedUnion<T1, T2>> taggedInput1 = input1.map(new Input1Tagger<T1, T2>()); taggedInput1.getTransformation().setParallelism(input1.getParallelism(), false); taggedInput1.returns(unionType); SingleOutputStreamOperator<TaggedUnion<T1, T2>> taggedInput2 = input2.map(new Input2Tagger<T1, T2>()); taggedInput2.getTransformation().setParallelism(input2.getParallelism(), false); taggedInput2.returns(unionType); DataStream<TaggedUnion<T1, T2>> unionStream = taggedInput1.union(taggedInput2); // we explicitly create the keyed stream to manually pass the key type information in windowedStream = new KeyedStream<TaggedUnion<T1, T2>, KEY>( unionStream, unionKeySelector, keyType) .window(windowAssigner); if (trigger != null) { windowedStream.trigger(trigger); } if (evictor != null) { windowedStream.evictor(evictor); } if (allowedLateness != null) { windowedStream.allowedLateness(allowedLateness); } return windowedStream.apply( new CoGroupWindowFunction<T1, T2, T, KEY, W>(function), resultType); }
3.68
hadoop_BCFile_getStartPos
/** * Get the starting position of the block in the file. * * @return the starting position of the block in the file. */ public long getStartPos() { return rBlkState.getBlockRegion().getOffset(); }
3.68
hudi_RocksDBDAO_dropColumnFamily
/** * Note : Does not delete from underlying DB. Just closes the handle. * * @param columnFamilyName Column Family Name */ public void dropColumnFamily(String columnFamilyName) { ValidationUtils.checkArgument(!closed); managedDescriptorMap.computeIfPresent(columnFamilyName, (colFamilyName, descriptor) -> { ColumnFamilyHandle handle = managedHandlesMap.get(colFamilyName); try { getRocksDB().dropColumnFamily(handle); handle.close(); } catch (RocksDBException e) { throw new HoodieException(e); } managedHandlesMap.remove(columnFamilyName); return null; }); }
3.68
morf_InsertStatement_drive
/** * @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser) */ @Override public void drive(ObjectTreeTraverser traverser) { traverser .dispatch(table) .dispatch(fromTable) .dispatch(selectStatement) .dispatch(fields) .dispatch(values) .dispatch(fieldDefaults.values()); }
3.68
flink_DeclarativeSlotPoolService_onClose
/** * This method is called when the slot pool service is closed. It can be overridden by * subclasses. */ protected void onClose() {}
3.68
flink_NFACompiler_createLooping
/** * Creates the given state as a looping one. Looping state is one with TAKE edge to itself * and PROCEED edge to the sinkState. It also consists of a similar state without the * PROCEED edge, so that for each PROCEED transition branches in computation state graph can * be created only once. * * @param sinkState the state that the converted state should point to * @return the first state of the created complex state */ @SuppressWarnings("unchecked") private State<T> createLooping(final State<T> sinkState) { if (currentPattern instanceof GroupPattern) { return createLoopingGroupPatternState((GroupPattern) currentPattern, sinkState); } final IterativeCondition<T> untilCondition = (IterativeCondition<T>) currentPattern.getUntilCondition(); final IterativeCondition<T> ignoreCondition = extendWithUntilCondition( getInnerIgnoreCondition(currentPattern), untilCondition, false); final IterativeCondition<T> takeCondition = extendWithUntilCondition( getTakeCondition(currentPattern), untilCondition, true); IterativeCondition<T> proceedCondition = getTrueFunction(); final State<T> loopingState = createState(State.StateType.Normal, true); if (currentPattern.getQuantifier().hasProperty(Quantifier.QuantifierProperty.GREEDY)) { if (untilCondition != null) { State<T> sinkStateCopy = copy(sinkState); loopingState.addProceed( sinkStateCopy, new RichAndCondition<>(proceedCondition, untilCondition)); originalStateMap.put(sinkState.getName(), sinkStateCopy); } loopingState.addProceed( sinkState, untilCondition != null ? new RichAndCondition<>( proceedCondition, new RichNotCondition<>(untilCondition)) : proceedCondition); updateWithGreedyCondition(sinkState, getTakeCondition(currentPattern)); } else { loopingState.addProceed(sinkState, proceedCondition); } loopingState.addTake(takeCondition); addStopStateToLooping(loopingState); if (ignoreCondition != null) { final State<T> ignoreState = createState(State.StateType.Normal, false); ignoreState.addTake(loopingState, takeCondition); ignoreState.addIgnore(ignoreCondition); loopingState.addIgnore(ignoreState, ignoreCondition); addStopStateToLooping(ignoreState); } return loopingState; }
3.68
framework_ApplicationConfiguration_getVaadinDirUrl
/** * Gets the URL of the VAADIN directory on the server. * * @return the URL of the VAADIN directory */ public String getVaadinDirUrl() { return vaadinDirUrl; }
3.68
hbase_TableState_isEnabledOrEnabling
/** Returns True if {@link State#ENABLED} or {@link State#ENABLING} */ public boolean isEnabledOrEnabling() { return isInStates(State.ENABLED, State.ENABLING); }
3.68
hbase_RowCountEndpoint_getKeyValueCount
/** * Returns a count of all KeyValues in the region where this coprocessor is loaded. */ @Override public void getKeyValueCount(RpcController controller, CountRequest request, RpcCallback<CountResponse> done) { CountResponse response = null; InternalScanner scanner = null; try { scanner = env.getRegion().getScanner(new Scan()); List<Cell> results = new ArrayList<>(); boolean hasMore = false; long count = 0; do { hasMore = scanner.next(results); count += Iterables.size(results); results.clear(); } while (hasMore); response = CountResponse.newBuilder().setCount(count).build(); } catch (IOException ioe) { CoprocessorRpcUtils.setControllerException(controller, ioe); } finally { if (scanner != null) { IOUtils.closeQuietly(scanner); } } done.run(response); }
3.68
hbase_FileIOEngine_isPersistent
/** * File IO engine is always able to support persistent storage for the cache */ @Override public boolean isPersistent() { return true; }
3.68
framework_ColorUtil_getRGBPatternColor
/** * Parses {@link Color} from matched RGB {@link Matcher}. * * @param matcher * {@link Matcher} matching RGB pattern with named regex groups * {@code red}, {@code green}, and {@code blue} * @return {@link Color} parsed from {@link Matcher} */ public static Color getRGBPatternColor(Matcher matcher) { int red = Integer.parseInt(matcher.group("red")); int green = Integer.parseInt(matcher.group("green")); int blue = Integer.parseInt(matcher.group("blue")); return new Color(red, green, blue); }
3.68
hbase_BackupManifest_store
/** * TODO: fix it. Persist the manifest file. * @throws BackupException if an error occurred while storing the manifest file. */ public void store(Configuration conf) throws BackupException { byte[] data = backupImage.toProto().toByteArray(); // write the file, overwrite if already exist Path manifestFilePath = new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(), backupImage.getBackupId()), MANIFEST_FILE_NAME); try (FSDataOutputStream out = manifestFilePath.getFileSystem(conf).create(manifestFilePath, true)) { out.write(data); } catch (IOException e) { throw new BackupException(e.getMessage()); } LOG.info("Manifest file stored to " + manifestFilePath); }
3.68
hadoop_ContainerUpdates_getDemotionRequests
/** * Returns Container Demotion Requests. * @return Container Demotion Requests. */ public List<UpdateContainerRequest> getDemotionRequests() { return demotionRequests; }
3.68
hadoop_OperationDuration_finished
/** * Update the finished time with the current system time. */ public void finished() { finished = time(); }
3.68
flink_SourceReader_notifyCheckpointComplete
/** * We have an empty default implementation here because most source readers do not have to * implement the method. * * @see CheckpointListener#notifyCheckpointComplete(long) */ @Override default void notifyCheckpointComplete(long checkpointId) throws Exception {}
3.68
hbase_QuotaTableUtil_createPutForNamespaceSnapshotSize
/** * Creates a {@code Put} for the namespace's total snapshot size. */ static Put createPutForNamespaceSnapshotSize(String namespace, long size) { Put p = new Put(getNamespaceRowKey(namespace)); p.addColumn(QUOTA_FAMILY_USAGE, QUOTA_SNAPSHOT_SIZE_QUALIFIER, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot.newBuilder() .setQuotaUsage(size).build().toByteArray()); return p; }
3.68
hibernate-validator_ResourceBundleMessageInterpolator_run
/** * Runs the given privileged action, using a privileged block if required. * <p> * <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary * privileged actions within HV's protection domain. */ @IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17") private static <T> T run(PrivilegedAction<T> action) { return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run(); }
3.68
Activiti_WSOperation_getId
/** * {@inheritDoc} */ public String getId() { return this.id; }
3.68
hadoop_ContainerStatus_getExposedPorts
/** * Get exposed ports of the container. * @return List of exposed ports */ @Public @Unstable public String getExposedPorts() { throw new UnsupportedOperationException( "subclass must implement this method"); }
3.68
framework_GridLayout_replaceComponent
/* Documented in superclass */ @Override public void replaceComponent(Component oldComponent, Component newComponent) { // Gets the locations ChildComponentData oldLocation = getState().childData.get(oldComponent); ChildComponentData newLocation = getState().childData.get(newComponent); if (oldLocation == null) { addComponent(newComponent); } else if (newLocation == null) { removeComponent(oldComponent); addComponent(newComponent, oldLocation.column1, oldLocation.row1, oldLocation.column2, oldLocation.row2); } else { int oldAlignment = oldLocation.alignment; oldLocation.alignment = newLocation.alignment; newLocation.alignment = oldAlignment; getState().childData.put(newComponent, oldLocation); getState().childData.put(oldComponent, newLocation); } }
3.68
flink_FlinkAggregateJoinTransposeRule_toRegularAggregate
/** * Convert aggregate with AUXILIARY_GROUP to regular aggregate. Return original aggregate and * null project if the given aggregate does not contain AUXILIARY_GROUP, else new aggregate * without AUXILIARY_GROUP and a project to permute output columns if needed. */ private Pair<Aggregate, List<RexNode>> toRegularAggregate(Aggregate aggregate) { Tuple2<int[], Seq<AggregateCall>> auxGroupAndRegularAggCalls = AggregateUtil.checkAndSplitAggCalls(aggregate); final int[] auxGroup = auxGroupAndRegularAggCalls._1; final Seq<AggregateCall> regularAggCalls = auxGroupAndRegularAggCalls._2; if (auxGroup.length != 0) { int[] fullGroupSet = AggregateUtil.checkAndGetFullGroupSet(aggregate); ImmutableBitSet newGroupSet = ImmutableBitSet.of(fullGroupSet); List<AggregateCall> aggCalls = JavaConverters.seqAsJavaListConverter(regularAggCalls).asJava(); final Aggregate newAgg = aggregate.copy( aggregate.getTraitSet(), aggregate.getInput(), newGroupSet, com.google.common.collect.ImmutableList.of(newGroupSet), aggCalls); final List<RelDataTypeField> aggFields = aggregate.getRowType().getFieldList(); final List<RexNode> projectAfterAgg = new ArrayList<>(); for (int i = 0; i < fullGroupSet.length; ++i) { int group = fullGroupSet[i]; int index = newGroupSet.indexOf(group); projectAfterAgg.add(new RexInputRef(index, aggFields.get(i).getType())); } int fieldCntOfAgg = aggFields.size(); for (int i = fullGroupSet.length; i < fieldCntOfAgg; ++i) { projectAfterAgg.add(new RexInputRef(i, aggFields.get(i).getType())); } Preconditions.checkArgument(projectAfterAgg.size() == fieldCntOfAgg); return new Pair<>(newAgg, projectAfterAgg); } else { return new Pair<>(aggregate, null); } }
3.68
hadoop_AbstractPolicyManager_updateContext
/** * This method is used to copy-on-write the context, that will be passed * downstream to the router/amrmproxy policies. */ private FederationPolicyInitializationContext updateContext( FederationPolicyInitializationContext federationPolicyContext, String type) { // copying configuration and context to avoid modification of original SubClusterPolicyConfiguration newConf = SubClusterPolicyConfiguration .newInstance(federationPolicyContext .getSubClusterPolicyConfiguration()); newConf.setType(type); return new FederationPolicyInitializationContext(newConf, federationPolicyContext.getFederationSubclusterResolver(), federationPolicyContext.getFederationStateStoreFacade(), federationPolicyContext.getHomeSubcluster()); }
3.68
querydsl_GeometryExpression_asText
/** * Exports this geometric object to a specific Well-known Text Representation of Geometry. * * @return text representation */ public StringExpression asText() { if (text == null) { text = Expressions.stringOperation(SpatialOps.AS_TEXT, mixin); } return text; }
3.68
pulsar_BrokerMonitor_printGlobalData
// Prints out the global load data. private void printGlobalData() { synchronized (loadData) { // 1 header row, 1 total row, and loadData.size() rows for brokers. Object[][] rows = new Object[loadData.size() + 2][]; rows[0] = GLOBAL_HEADER; int totalBundles = 0; double totalThroughput = 0; double totalMessageRate = 0; double totalLongTermMessageRate = 0; double maxMaxUsage = 0; int i = 1; for (final Map.Entry<String, Object> entry : loadData.entrySet()) { final String broker = entry.getKey(); final Object data = entry.getValue(); rows[i] = new Object[GLOBAL_HEADER.length]; rows[i][0] = broker; int numBundles; double messageRate; double longTermMessageRate; double messageThroughput; double maxUsage; if (data instanceof LoadReport) { final LoadReport loadReport = (LoadReport) data; numBundles = loadReport.getNumBundles(); messageRate = loadReport.getMsgRateIn() + loadReport.getMsgRateOut(); longTermMessageRate = loadReport.getAllocatedMsgRateIn() + loadReport.getAllocatedMsgRateOut(); messageThroughput = (loadReport.getAllocatedBandwidthIn() + loadReport.getAllocatedBandwidthOut()) / 1024; final SystemResourceUsage systemResourceUsage = loadReport.getSystemResourceUsage(); maxUsage = Math.max( Math.max( Math.max(systemResourceUsage.getCpu().percentUsage(), systemResourceUsage.getMemory().percentUsage()), Math.max(systemResourceUsage.getDirectMemory().percentUsage(), systemResourceUsage.getBandwidthIn().percentUsage())), systemResourceUsage.getBandwidthOut().percentUsage()); } else if (data instanceof LocalBrokerData) { final LocalBrokerData localData = (LocalBrokerData) data; numBundles = localData.getNumBundles(); messageRate = localData.getMsgRateIn() + localData.getMsgRateOut(); final String timeAveragePath = BROKER_TIME_AVERAGE_BASE_PATH + "/" + broker; try { final TimeAverageBrokerData timeAverageData = gson.fromJson( new String(zkClient.getData(timeAveragePath, false, null)), TimeAverageBrokerData.class); longTermMessageRate = timeAverageData.getLongTermMsgRateIn() + timeAverageData.getLongTermMsgRateOut(); } catch (Exception x) { throw new RuntimeException(x); } messageThroughput = (localData.getMsgThroughputIn() + localData.getMsgThroughputOut()) / 1024; maxUsage = localData.getMaxResourceUsage(); } else { throw new AssertionError("Unreachable code"); } rows[i][1] = numBundles; rows[i][2] = messageRate; rows[i][3] = messageThroughput; rows[i][4] = longTermMessageRate; rows[i][5] = maxUsage; totalBundles += numBundles; totalMessageRate += messageRate; totalLongTermMessageRate += longTermMessageRate; totalThroughput += messageThroughput; maxMaxUsage = Math.max(maxUsage, maxMaxUsage); ++i; } final int finalRow = loadData.size() + 1; rows[finalRow] = new Object[GLOBAL_HEADER.length]; rows[finalRow][0] = "TOTAL"; rows[finalRow][1] = totalBundles; rows[finalRow][2] = totalMessageRate; rows[finalRow][3] = totalLongTermMessageRate; rows[finalRow][4] = totalThroughput; rows[finalRow][5] = maxMaxUsage; final String table = globalTableMaker.make(rows); log.info("Overall Broker Data:\n{}", table); } }
3.68
framework_CalendarEventProvider_getProvider
/** * @return the * {@link com.vaadin.addon.calendar.event.CalendarEventProvider * CalendarEventProvider} that has changed */ public CalendarEventProvider getProvider() { return source; }
3.68
flink_TableChange_getOldColumnName
/** Returns the origin column name. */ public String getOldColumnName() { return oldColumn.getName(); }
3.68
querydsl_AbstractHibernateQuery_setCacheRegion
/** * Set the name of the cache region. * @param cacheRegion the name of a query cache region, or {@code null} * for the default query cache */ @SuppressWarnings("unchecked") public Q setCacheRegion(String cacheRegion) { this.cacheRegion = cacheRegion; return (Q) this; }
3.68
flink_ExecNodeConfig_getStateRetentionTime
/** @return The duration until state which was not updated will be retained. */ public long getStateRetentionTime() { return get(ExecutionConfigOptions.IDLE_STATE_RETENTION).toMillis(); }
3.68
morf_AbstractSqlDialectTest_testSelectHash
/** * Test that {@link SqlDialect#convertStatementToHash(SelectStatement)} works. */ @Test public void testSelectHash() { SelectStatement stmt = new SelectStatement().from(new TableReference(TEST_TABLE)); String hash = testDialect.convertStatementToHash(stmt); assertFalse("Valid", StringUtils.isBlank(hash)); }
3.68
flink_DataOutputSerializer_getSharedBuffer
/** * Gets a reference to the internal byte buffer. This buffer may be larger than the actual * serialized data. Only the bytes from zero to {@link #length()} are valid. The buffer will * also be overwritten with the next write calls. * * <p>This method is useful when trying to avid byte copies, but should be used carefully. * * @return A reference to the internal shared and reused buffer. */ public byte[] getSharedBuffer() { return buffer; }
3.68
framework_VTabsheetBase_addTabKey
/** * For internal use only. May be removed or replaced in the future. * * @param key * an internal key that corresponds with a tab * @param disabled * {@code true} if the tab should be disabled, {@code false} * otherwise */ public void addTabKey(String key, boolean disabled) { tabKeys.add(key); if (disabled) { disabledTabKeys.add(key); } }
3.68
hmily_HmilyTacParticipantCoordinator_commitParticipant
/** * Commit participant. * * @param hmilyParticipantList the hmily participant list * @param selfParticipantId the self participant id */ public void commitParticipant(final List<HmilyParticipant> hmilyParticipantList, final Long selfParticipantId) { if (CollectionUtils.isEmpty(hmilyParticipantList)) { return; } log.debug("TAC-participate-commit ::: {}", hmilyParticipantList); for (HmilyParticipant participant : hmilyParticipantList) { try { if (participant.getParticipantId().equals(selfParticipantId)) { HmilyTacLocalParticipantExecutor.confirm(participant); } else { HmilyReflector.executor(HmilyActionEnum.CONFIRMING, ExecutorTypeEnum.RPC, participant); } } catch (Throwable throwable) { throw new HmilyRuntimeException(" hmilyParticipant execute confirm exception:" + participant.toString()); } finally { // FIXME why remove context after first participator handled HmilyContextHolder.remove(); } } }
3.68
hbase_RSGroupInfo_removeConfiguration
/** Remove a config setting represented by the key from the {@link #configuration} map */ public void removeConfiguration(final String key) { configuration.remove(key); }
3.68
flink_FixedLengthRecordSorter_reset
/** * Resets the sort buffer back to the state where it is empty. All contained data is discarded. */ @Override public void reset() { // reset all offsets this.numRecords = 0; this.currentSortBufferOffset = 0; this.sortBufferBytes = 0; // return all memory this.freeMemory.addAll(this.sortBuffer); this.sortBuffer.clear(); // grab first buffers this.currentSortBufferSegment = nextMemorySegment(); this.sortBuffer.add(this.currentSortBufferSegment); this.outView.set(this.currentSortBufferSegment); }
3.68
framework_DataCommunicator_setInMemorySorting
/** * Sets the {@link Comparator} to use with in-memory sorting. * * @param comparator * comparator used to sort data */ public void setInMemorySorting(Comparator<T> comparator) { setInMemorySorting(comparator, true); }
3.68
flink_Configuration_getInteger
/** * Returns the value associated with the given config option as an integer. If no value is * mapped under any key of the option, it returns the specified default instead of the option's * default value. * * @param configOption The configuration option * @param overrideDefault The value to return if no value was mapper for any key of the option * @return the configured value associated with the given config option, or the overrideDefault */ @PublicEvolving public int getInteger(ConfigOption<Integer> configOption, int overrideDefault) { return getOptional(configOption).orElse(overrideDefault); }
3.68
rocketmq-connect_AbstractLocalSchemaRegistryClient_getSchemaLatestVersion
/** * Get schema latest version * * @param subject * @return */ public GetSchemaResponse getSchemaLatestVersion(String namespace, String subject) { try { return schemaRegistryClient.getSchemaBySubject(cluster, namespace, subject); } catch (RestClientException | IOException e) { if (e instanceof RestClientException) { return null; } else { throw new RuntimeException(e); } } }
3.68
hbase_MetricsHeapMemoryManager_setCurMemStoreOnHeapSizeGauge
/** * Set the current global memstore on-heap size gauge * @param memStoreOnHeapSize the current memory on-heap size in memstore, in bytes. */ public void setCurMemStoreOnHeapSizeGauge(final long memStoreOnHeapSize) { source.setCurMemStoreOnHeapSizeGauge(memStoreOnHeapSize); }
3.68
MagicPlugin_CastPermissionManager_getRegionCastPermission
/** * This will perform cast permission checks for a specific location. * * @return false to deny cast permission, null to not care. Returning true means the cast will be allowed, * including breaking/building blocks, even if it otherwise would not be allowed. */ @Nullable default Boolean getRegionCastPermission(Player player, SpellTemplate spell, Location location) { return null; }
3.68
querydsl_ExpressionUtils_likeToRegex
/** * Convert the given like pattern to a regex pattern * * @param expr expression to be converted * @param matchStartAndEnd if start and end should be matched as well * @return converted expression */ @SuppressWarnings("unchecked") public static Expression<String> likeToRegex(Expression<String> expr, boolean matchStartAndEnd) { // TODO : this should take the escape character into account if (expr instanceof Constant<?>) { final String like = expr.toString(); final StringBuilder rv = new StringBuilder(like.length() + 4); if (matchStartAndEnd && !like.startsWith("%")) { rv.append('^'); } for (int i = 0; i < like.length(); i++) { char ch = like.charAt(i); if (ch == '.' || ch == '*' || ch == '?') { rv.append('\\'); } else if (ch == '%') { rv.append(".*"); continue; } else if (ch == '_') { rv.append('.'); continue; } rv.append(ch); } if (matchStartAndEnd && !like.endsWith("%")) { rv.append('$'); } if (!like.equals(rv.toString())) { return ConstantImpl.create(rv.toString()); } } else if (expr instanceof Operation<?>) { Operation<?> o = (Operation<?>) expr; if (o.getOperator() == Ops.CONCAT) { Expression<String> lhs = likeToRegex((Expression<String>) o.getArg(0), false); Expression<String> rhs = likeToRegex((Expression<String>) o.getArg(1), false); if (lhs != o.getArg(0) || rhs != o.getArg(1)) { return operation(String.class, Ops.CONCAT, lhs, rhs); } } } return expr; }
3.68
framework_UIDL_getLongAttribute
/** * Gets the named attribute as a long. * * @param name * the name of the attribute to get * @return the attribute value */ public long getLongAttribute(String name) { return (long) attr().getRawNumber(name); }
3.68
framework_VMediaBase_setLoop
/** * Enables or disables looping. * * @param loop * if true, enable looping * @since 7.7.11 */ public void setLoop(final boolean loop) { media.setLoop(loop); }
3.68
hbase_SnapshotManager_getCoordinator
/** Returns distributed commit coordinator for all running snapshots */ ProcedureCoordinator getCoordinator() { return coordinator; }
3.68
morf_SqlDialect_tableHasBlobColumns
/** * Whether this table has any BLOB columns. * * @param table The table. * @return true if the table has one or more BLOB columns. */ protected boolean tableHasBlobColumns(Table table) { for (Column column : table.columns()) { if (column.getType() == DataType.BLOB) { return true; } } return false; }
3.68
pulsar_ResourceUnitRanking_getAllocatedLoadPercentageMemory
/** * Percetage of memory allocated to bundle's quota. */ public double getAllocatedLoadPercentageMemory() { return this.allocatedLoadPercentageMemory; }
3.68
pulsar_ProducerConfiguration_getBlockIfQueueFull
/** * * @return whether the producer will block {@link Producer#send} and {@link Producer#sendAsync} operations when the * pending queue is full */ public boolean getBlockIfQueueFull() { return conf.isBlockIfQueueFull(); }
3.68
framework_Escalator_resetSizesFromDom
/** * Resets all cached pixel sizes and reads new values from the DOM. This * methods should be used e.g. when styles affecting the dimensions of * elements in this escalator have been changed. */ public void resetSizesFromDom() { header.autodetectRowHeightNow(); body.autodetectRowHeightNow(); footer.autodetectRowHeightNow(); for (int i = 0; i < columnConfiguration.getColumnCount(); i++) { columnConfiguration.setColumnWidth(i, columnConfiguration.getColumnWidth(i)); } }
3.68
hadoop_RouterQuotaManager_getQuotaUsage
/** * Get the nearest ancestor's quota usage, and meanwhile its quota was set. * @param path The path being written. * @return RouterQuotaUsage Quota usage. */ public RouterQuotaUsage getQuotaUsage(String path) { readLock.lock(); try { RouterQuotaUsage quotaUsage = this.cache.get(path); if (quotaUsage != null && isQuotaSet(quotaUsage)) { return quotaUsage; } // If not found, look for its parent path usage value. int pos = path.lastIndexOf(Path.SEPARATOR); if (pos != -1) { String parentPath = path.substring(0, pos); return getQuotaUsage(parentPath); } } finally { readLock.unlock(); } return null; }
3.68
hudi_UtilHelpers_buildSparkContext
/** * Build Spark Context for ingestion/compaction. * * @return */ public static JavaSparkContext buildSparkContext(String appName, String sparkMaster, String sparkMemory) { SparkConf sparkConf = buildSparkConf(appName, sparkMaster); if (sparkMemory != null) { sparkConf.set("spark.executor.memory", sparkMemory); } return new JavaSparkContext(sparkConf); }
3.68
hadoop_TaskId_readFields
/** {@inheritDoc} */ public final void readFields(final DataInput in) throws IOException { jobId = new JobId(); jobId.readFields(in); this.taskId = WritableUtils.readVLong(in); }
3.68
flink_CopyOnWriteSkipListStateMap_getFirstNodeWithNamespace
/** * Find the first node with the given namespace at level 0. * * @param namespaceSegment memory segment storing the namespace. * @param namespaceOffset offset of the namespace. * @param namespaceLen length of the namespace. * @return the first node with the given namespace. NIL_NODE will be returned if not exist. */ private long getFirstNodeWithNamespace( MemorySegment namespaceSegment, int namespaceOffset, int namespaceLen) { int currentLevel = levelIndexHeader.getLevel(); long prevNode = HEAD_NODE; long currentNode = helpGetNextNode(prevNode, currentLevel); int c; // find the predecessor node at level 0. for (; ; ) { if (currentNode != NIL_NODE) { c = compareNamespaceAndNode( namespaceSegment, namespaceOffset, namespaceLen, currentNode); if (c > 0) { prevNode = currentNode; currentNode = helpGetNextNode(prevNode, currentLevel); continue; } } currentLevel--; if (currentLevel < 0) { break; } currentNode = helpGetNextNode(prevNode, currentLevel); } // find the first node that has not been logically removed while (currentNode != NIL_NODE) { if (isNodeRemoved(currentNode)) { currentNode = helpGetNextNode(currentNode, 0); continue; } c = compareNamespaceAndNode( namespaceSegment, namespaceOffset, namespaceLen, currentNode); if (c == 0) { return currentNode; } if (c < 0) { break; } } return NIL_NODE; }
3.68
morf_SchemaValidator_isNameConventional
/** * <p> * Method to establish if a name matches an allowed pattern of characters to * follow the correct naming convention. * </p> * <p> * The name must: * </p> * <ul> * <li>begin with an alphabetic character [a-zA-Z]</li> * <li>only contain alphanumeric characters or underscore [a-zA-Z0-9_]</li> * </ul> * * @param name The string to check if it follows the correct naming convention * @return true if the name is valid otherwise false */ boolean isNameConventional(String name){ return validNamePattern.matcher(name).matches(); }
3.68
hadoop_FilterFileSystem_copyToLocalFile
/** * The src file is under FS, and the dst is on the local disk. * Copy it from FS control to the local dst name. * delSrc indicates if the src will be removed or not. */ @Override public void copyToLocalFile(boolean delSrc, Path src, Path dst) throws IOException { fs.copyToLocalFile(delSrc, src, dst); }
3.68
hadoop_SpillCallBackPathsFinder_getInvalidSpillEntries
/** * Gets the set of path:pos of the entries that were accessed incorrectly. * @return a set of string in the format of {@literal Path[Pos]} */ public Set<String> getInvalidSpillEntries() { Set<String> result = new LinkedHashSet<>(); for (Entry<Path, Set<Long>> spillMapEntry: invalidAccessMap.entrySet()) { for (Long singleEntry : spillMapEntry.getValue()) { result.add(String.format("%s[%d]", spillMapEntry.getKey(), singleEntry)); } } return result; }
3.68
hudi_RequestHandler_jsonifyResult
/** * Serializes the result into JSON String. * * @param ctx Javalin context * @param obj object to serialize * @param metricsRegistry {@code Registry} instance for storing metrics * @param objectMapper JSON object mapper * @param logger {@code Logger} instance * @return JSON String from the input object * @throws JsonProcessingException */ public static String jsonifyResult( Context ctx, Object obj, Registry metricsRegistry, ObjectMapper objectMapper, Logger logger) throws JsonProcessingException { HoodieTimer timer = HoodieTimer.start(); boolean prettyPrint = ctx.queryParam("pretty") != null; String result = prettyPrint ? objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(obj) : objectMapper.writeValueAsString(obj); final long jsonifyTime = timer.endTimer(); metricsRegistry.add("WRITE_VALUE_CNT", 1); metricsRegistry.add("WRITE_VALUE_TIME", jsonifyTime); if (logger.isDebugEnabled()) { logger.debug("Jsonify TimeTaken=" + jsonifyTime); } return result; }
3.68
framework_VTabsheet_getSelectTabKey
/** * Gets the key to select the focused tab when navigating using * previous/next (left/right) keys. * * @return the key to select the focused tab. * * @see #getNextTabKey() * @see #getPreviousTabKey() */ protected int getSelectTabKey() { return KeyCodes.KEY_SPACE; }
3.68
flink_RocksDBKeyedStateBackend_getInstanceBasePath
/** Only visible for testing, DO NOT USE. */ File getInstanceBasePath() { return instanceBasePath; }
3.68
flink_SystemProcessingTimeService_finalize
// safety net to destroy the thread pool @Override protected void finalize() throws Throwable { super.finalize(); timerService.shutdownNow(); }
3.68
flink_StreamOperatorFactory_isStreamSource
/** Is this factory for {@link StreamSource}. */ default boolean isStreamSource() { return false; }
3.68
flink_ExceptionUtils_stripException
/** * Unpacks an specified exception and returns its cause. Otherwise the given {@link Throwable} * is returned. * * @param throwableToStrip to strip * @param typeToStrip type to strip * @return Unpacked cause or given Throwable if not packed */ public static Throwable stripException( Throwable throwableToStrip, Class<? extends Throwable> typeToStrip) { while (typeToStrip.isAssignableFrom(throwableToStrip.getClass()) && throwableToStrip.getCause() != null) { throwableToStrip = throwableToStrip.getCause(); } return throwableToStrip; }
3.68
hbase_CatalogFamilyFormat_getSeqNumColumn
/** * Returns the column qualifier for seqNum column for replicaId * @param replicaId the replicaId of the region * @return a byte[] for seqNum column qualifier */ public static byte[] getSeqNumColumn(int replicaId) { return replicaId == 0 ? HConstants.SEQNUM_QUALIFIER : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); }
3.68
hibernate-validator_ValueContexts_getLocalExecutionContextForExecutable
/** * Creates a value context for validating an executable. Can be applied to both parameter and * return value validation. Does not require a bean metadata information. */ public static <T, V> ValueContext<T, V> getLocalExecutionContextForExecutable( ExecutableParameterNameProvider parameterNameProvider, T value, Validatable validatable, PathImpl propertyPath) { return new ValueContext<>( parameterNameProvider, value, validatable, propertyPath ); }
3.68
flink_StreamExecutionEnvironment_setNumberOfExecutionRetries
/** * Sets the number of times that failed tasks are re-executed. A value of zero effectively * disables fault tolerance. A value of {@code -1} indicates that the system default value (as * defined in the configuration) should be used. * * @param numberOfExecutionRetries The number of times the system will try to re-execute failed * tasks. * @deprecated This method will be replaced by {@link #setRestartStrategy}. The {@link * RestartStrategies#fixedDelayRestart(int, Time)} contains the number of execution retries. */ @Deprecated @PublicEvolving public void setNumberOfExecutionRetries(int numberOfExecutionRetries) { config.setNumberOfExecutionRetries(numberOfExecutionRetries); }
3.68
flink_BlockInfo_getAccumulatedRecordCount
/** * Returns the accumulated record count. * * @return the accumulated record count */ public long getAccumulatedRecordCount() { return this.accumulatedRecordCount; }
3.68
flink_KeyGroupRangeAssignment_assignToKeyGroup
/** * Assigns the given key to a key-group index. * * @param key the key to assign * @param maxParallelism the maximum supported parallelism, aka the number of key-groups. * @return the key-group to which the given key is assigned */ public static int assignToKeyGroup(Object key, int maxParallelism) { Preconditions.checkNotNull(key, "Assigned key must not be null!"); return computeKeyGroupForKeyHash(key.hashCode(), maxParallelism); }
3.68
hudi_HoodieTableMetadataUtil_getPartitionLatestFileSlices
/** * Get the latest file slices for a Metadata Table partition. The list of file slices * returned is sorted in the correct order of file group name. * * @param metaClient - Instance of {@link HoodieTableMetaClient}. * @param fsView - Metadata table filesystem view * @param partition - The name of the partition whose file groups are to be loaded. * @return List of latest file slices for all file groups in a given partition. */ public static List<FileSlice> getPartitionLatestFileSlices(HoodieTableMetaClient metaClient, Option<HoodieTableFileSystemView> fsView, String partition) { LOG.info("Loading latest file slices for metadata table partition " + partition); return getPartitionFileSlices(metaClient, fsView, partition, false); }
3.68
flink_InPlaceMutableHashTable_readPointer
/** * Note: this is sometimes a negated length instead of a pointer (see * HashTableProber.updateMatch). */ public long readPointer() throws IOException { return inView.readLong(); }
3.68
hbase_MasterObserver_postBalance
/** * Called after the balancing plan has been submitted. * @param ctx the environment to interact with the framework and master * @param request the request used to trigger the balance * @param plans the RegionPlans which master has executed. RegionPlan serves as hint as for the * final destination for the underlying region but may not represent the final * state of assignment */ default void postBalance(final ObserverContext<MasterCoprocessorEnvironment> ctx, BalanceRequest request, List<RegionPlan> plans) throws IOException { }
3.68
framework_VMenuBar_setParentMenu
/** * Set the parent menu of this menu. * * @param parent */ public void setParentMenu(VMenuBar parent) { parentMenu = parent; }
3.68
framework_Escalator_verifyEscalatorCount
/** * Make sure that there is a correct amount of escalator rows: Add more * if needed, or remove any superfluous ones. * <p> * This method should be called when e.g. the height of the Escalator * changes. * <p> * <em>Note:</em> This method will make sure that the escalator rows are * placed in the proper places. By default new rows are added below, but * if the content is scrolled down, the rows are populated on top * instead. */ public void verifyEscalatorCount() { /* * This method indeed has a smell very similar to paintRemoveRows * and paintInsertRows. * * Unfortunately, those the code can't trivially be shared, since * there are some slight differences in the respective * responsibilities. The "paint" methods fake the addition and * removal of rows, and make sure to either push existing data out * of view, or draw new data into view. Only in some special cases * will the DOM element count change. * * This method, however, has the explicit responsibility to verify * that when "something" happens, we still have the correct amount * of escalator rows in the DOM, and if not, we make sure to modify * that count. Only in some special cases do we need to take into * account other things than simply modifying the DOM element count. */ Profiler.enter("Escalator.BodyRowContainer.verifyEscalatorCount"); if (!isAttached()) { return; } final int maxVisibleRowCount = getMaxVisibleRowCount(); final int neededEscalatorRows = Math.min(maxVisibleRowCount, body.getRowCount()); final int neededEscalatorRowsDiff = neededEscalatorRows - visualRowOrder.size(); if (neededEscalatorRowsDiff > 0) { // needs more /* * This is a workaround for the issue where we might be scrolled * to the bottom, and the widget expands beyond the content * range */ final int index = visualRowOrder.size(); final int nextLastLogicalIndex; if (!visualRowOrder.isEmpty()) { nextLastLogicalIndex = getLogicalRowIndex( visualRowOrder.getLast()) + 1; } else { nextLastLogicalIndex = 0; } final boolean contentWillFit = nextLastLogicalIndex < getRowCount() - neededEscalatorRowsDiff; if (contentWillFit) { final List<TableRowElement> addedRows = fillAndPopulateEscalatorRowsIfNeeded( index, neededEscalatorRowsDiff); /* * Since fillAndPopulateEscalatorRowsIfNeeded operates on * the assumption that index == visual index == logical * index, we thank for the added escalator rows, but since * they're painted in the wrong CSS position, we need to * move them to their actual locations. * * Note: this is the second (see body.paintInsertRows) * occasion where fillAndPopulateEscalatorRowsIfNeeded would * behave "more correctly" if it only would add escalator * rows to the DOM and appropriate bookkeping, and not * actually populate them :/ */ moveAndUpdateEscalatorRows( Range.withLength(index, addedRows.size()), index, nextLastLogicalIndex); } else { /* * TODO [[optimize]] * * We're scrolled so far down that all rows can't be simply * appended at the end, since we might start displaying * escalator rows that don't exist. To avoid the mess that * is body.paintRemoveRows, this is a dirty hack that dumbs * the problem down to a more basic and already-solved * problem: * * 1) scroll all the way up 2) add the missing escalator * rows 3) scroll back to the original position. * * Letting the browser scroll back to our original position * will automatically solve any possible overflow problems, * since the browser will not allow us to scroll beyond the * actual content. */ final double oldScrollTop = getScrollTop(); setScrollTop(0); scroller.onScroll(); fillAndPopulateEscalatorRowsIfNeeded(index, neededEscalatorRowsDiff); setScrollTop(oldScrollTop); scroller.onScroll(); } } else if (neededEscalatorRowsDiff < 0) { // needs less final ListIterator<TableRowElement> iter = visualRowOrder .listIterator(visualRowOrder.size()); for (int i = 0; i < -neededEscalatorRowsDiff; i++) { final Element last = iter.previous(); last.removeFromParent(); iter.remove(); } /* * If we were scrolled to the bottom so that we didn't have an * extra escalator row at the bottom, we'll probably end up with * blank space at the bottom of the escalator, and one extra row * above the header. * * Experimentation idea #1: calculate "scrollbottom" vs content * bottom and remove one row from top, rest from bottom. This * FAILED, since setHeight has already happened, thus we never * will detect ourselves having been scrolled all the way to the * bottom. */ if (!visualRowOrder.isEmpty()) { final double firstRowTop = getRowTop( visualRowOrder.getFirst()); final double firstRowMinTop = tBodyScrollTop - getDefaultRowHeight(); if (firstRowTop < firstRowMinTop) { final int newLogicalIndex = getLogicalRowIndex( visualRowOrder.getLast()) + 1; moveAndUpdateEscalatorRows(Range.withOnly(0), visualRowOrder.size(), newLogicalIndex); updateTopRowLogicalIndex(1); } } } if (neededEscalatorRowsDiff != 0) { fireRowVisibilityChangeEvent(); } Profiler.leave("Escalator.BodyRowContainer.verifyEscalatorCount"); }
3.68
flink_FlinkFilterJoinRule_isSmart
/** Whether to try to strengthen join-type, default false. */ @Value.Default default boolean isSmart() { return false; }
3.68
hibernate-validator_LuhnCheckValidator_isCheckDigitValid
/** * Validate check digit using Luhn algorithm * * @param digits The digits over which to calculate the checksum * @param checkDigit the check digit * * @return {@code true} if the luhn check result matches the check digit, {@code false} otherwise */ @Override public boolean isCheckDigitValid(List<Integer> digits, char checkDigit) { int modResult = ModUtil.calculateLuhnMod10Check( digits ); if ( !Character.isDigit( checkDigit ) ) { return false; } int checkValue = extractDigit( checkDigit ); return checkValue == modResult; }
3.68
flink_TypeExtractor_createTypeInfoFromFactory
/** Creates type information using a given factory. */ @SuppressWarnings("unchecked") private <IN1, IN2, OUT> TypeInformation<OUT> createTypeInfoFromFactory( Type t, TypeInformation<IN1> in1Type, TypeInformation<IN2> in2Type, List<Type> factoryHierarchy, TypeInfoFactory<? super OUT> factory, Type factoryDefiningType) { // infer possible type parameters from input final Map<String, TypeInformation<?>> genericParams; if (factoryDefiningType instanceof ParameterizedType) { genericParams = new HashMap<>(); final ParameterizedType paramDefiningType = (ParameterizedType) factoryDefiningType; final Type[] args = typeToClass(paramDefiningType).getTypeParameters(); final TypeInformation<?>[] subtypeInfo = createSubTypesInfo( t, paramDefiningType, factoryHierarchy, in1Type, in2Type, true); assert subtypeInfo != null; for (int i = 0; i < subtypeInfo.length; i++) { genericParams.put(args[i].toString(), subtypeInfo[i]); } } else { genericParams = Collections.emptyMap(); } final TypeInformation<OUT> createdTypeInfo = (TypeInformation<OUT>) factory.createTypeInfo(t, genericParams); if (createdTypeInfo == null) { throw new InvalidTypesException( "TypeInfoFactory returned invalid TypeInformation 'null'"); } return createdTypeInfo; }
3.68
hbase_RegionScannerImpl_joinedHeapMayHaveData
/** Returns true when the joined heap may have data for the current row */ private boolean joinedHeapMayHaveData(Cell currentRowCell) throws IOException { Cell nextJoinedKv = joinedHeap.peek(); boolean matchCurrentRow = nextJoinedKv != null && CellUtil.matchingRows(nextJoinedKv, currentRowCell); boolean matchAfterSeek = false; // If the next value in the joined heap does not match the current row, try to seek to the // correct row if (!matchCurrentRow) { Cell firstOnCurrentRow = PrivateCellUtil.createFirstOnRow(currentRowCell); boolean seekSuccessful = this.joinedHeap.requestSeek(firstOnCurrentRow, true, true); matchAfterSeek = seekSuccessful && joinedHeap.peek() != null && CellUtil.matchingRows(joinedHeap.peek(), currentRowCell); } return matchCurrentRow || matchAfterSeek; }
3.68
streampipes_ImageZipAdapter_start
/** * First extracts the user input and then starts a thread publishing events with images in the zip file * * @param collector is used to pre-process and publish events on message broker * @param extractor to extract configurations * @param infinite Describes if the replay should be restarted when it is finished or not */ public void start(IEventCollector collector, IStaticPropertyExtractor extractor, boolean infinite) throws AdapterException { Integer timeBetweenReplay = extractor.singleValueParameter(ImageZipUtils.INTERVAL_KEY, Integer.class); String zipFileUrl = extractor.selectedFilename(ImageZipUtils.ZIP_FILE_KEY); ZipFileImageIterator zipFileImageIterator; try { zipFileImageIterator = new ZipFileImageIterator(zipFileUrl, infinite); } catch (IOException e) { throw new AdapterException("Error while reading images in the zip file"); } running = true; task = new Thread(() -> { while (running && zipFileImageIterator.hasNext()) { try { String image = zipFileImageIterator.next(); Map<String, Object> result = new HashMap<>(); result.put(ImageZipUtils.TIMESTAMP, System.currentTimeMillis()); result.put(ImageZipUtils.IMAGE, image); collector.collect(result); } catch (IOException e) { LOG.error("Error while reading an image from the zip file " + e.getMessage()); } try { TimeUnit.MILLISECONDS.sleep(timeBetweenReplay); } catch (InterruptedException e) { LOG.error("Error while waiting for next replay round" + e.getMessage()); } } }); task.start(); }
3.68
framework_FocusableGrid_addBlurHandler
/* * (non-Javadoc) * * @see * com.google.gwt.event.dom.client.HasBlurHandlers#addBlurHandler(com.google * .gwt.event.dom.client.BlurHandler) */ @Override public HandlerRegistration addBlurHandler(BlurHandler handler) { return addDomHandler(handler, BlurEvent.getType()); }
3.68
hmily_SubCoordinator_addXaResource
/** * Add a xa Resource. * * @param xaResource the xa resource * @return boolean boolean */ public synchronized boolean addXaResource(final XAResource xaResource) { switch (state) { case STATUS_MARKED_ROLLBACK: break; case STATUS_ACTIVE: break; default: throw new RuntimeException("status == " + state); } Optional<XAResource> isSame = resources.stream().filter(e -> { try { return e.isSameRM(xaResource); } catch (XAException xaException) { logger.error("xa isSameRM,{}:{}", xaException, HmilyXaException.getMessage(xaException)); return false; } }).findFirst(); if (!isSame.isPresent()) { this.resources.add(xaResource); return false; } return true; }
3.68
framework_CustomFieldConnector_getChildComponents
/* * (non-Javadoc) * * @see com.vaadin.client.HasComponentsConnector#getChildren() */ @Override public List<ComponentConnector> getChildComponents() { if (childComponents == null) { return Collections.emptyList(); } return childComponents; }
3.68
flink_SqlNodeConverters_convertSqlNode
/** * Convert the given validated SqlNode into Operation if there is a registered converter for the * node. */ @SuppressWarnings({"unchecked", "rawtypes"}) public static Optional<Operation> convertSqlNode( SqlNode validatedSqlNode, ConvertContext context) { // match by class first SqlNodeConverter classConverter = CLASS_CONVERTERS.get(validatedSqlNode.getClass()); if (classConverter != null) { return Optional.of(classConverter.convertSqlNode(validatedSqlNode, context)); } // match by kind if no matching items in class converters SqlNodeConverter sqlKindConverter = SQLKIND_CONVERTERS.get(validatedSqlNode.getKind()); if (sqlKindConverter != null) { return Optional.of(sqlKindConverter.convertSqlNode(validatedSqlNode, context)); } else { return Optional.empty(); } }
3.68
morf_InsertStatementBuilder_build
/** * @see org.alfasoftware.morf.util.Builder#build() */ @Override public InsertStatement build() { return new InsertStatement(this); }
3.68
hbase_RegionInfoDisplay_getRegionNameForDisplay
/** * Get the region name for display. Optionally hide the start key. * @return region name bytes */ public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) { boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true); if (displayKey || ri.getTable().equals(TableName.META_TABLE_NAME)) { return ri.getRegionName(); } else { // create a modified regionname with the startkey replaced but preserving // the other parts including the encodedname. try { byte[][] regionNameParts = RegionInfo.parseRegionName(ri.getRegionName()); regionNameParts[1] = HIDDEN_START_KEY; // replace the real startkey int len = 0; // get the total length for (byte[] b : regionNameParts) { len += b.length; } byte[] encodedRegionName = Bytes.toBytes(RegionInfo.encodeRegionName(ri.getRegionName())); len += encodedRegionName.length; // allocate some extra bytes for the delimiters and the last '.' byte[] modifiedName = new byte[len + regionNameParts.length + 1]; int lengthSoFar = 0; int loopCount = 0; for (byte[] b : regionNameParts) { System.arraycopy(b, 0, modifiedName, lengthSoFar, b.length); lengthSoFar += b.length; if (loopCount++ == 2) modifiedName[lengthSoFar++] = RegionInfo.REPLICA_ID_DELIMITER; else modifiedName[lengthSoFar++] = HConstants.DELIMITER; } // replace the last comma with '.' modifiedName[lengthSoFar - 1] = RegionInfo.ENC_SEPARATOR; System.arraycopy(encodedRegionName, 0, modifiedName, lengthSoFar, encodedRegionName.length); lengthSoFar += encodedRegionName.length; modifiedName[lengthSoFar] = RegionInfo.ENC_SEPARATOR; return modifiedName; } catch (IOException e) { // LOG.warn("Encountered exception " + e); throw new RuntimeException(e); } } }
3.68
morf_AbstractSqlDialectTest_testUpdateWithLiteralValues
/** * Test whether the right update SQL statement was generated */ @Test public void testUpdateWithLiteralValues() { UpdateStatement stmt = update(tableRef(TEST_TABLE)) .set(literal("Value").as(STRING_FIELD)) .set(blobLiteral(NEW_BLOB_VALUE).as("blobFieldOne")) .set(blobLiteral(NEW_BLOB_VALUE.getBytes(StandardCharsets.UTF_8)).as("blobFieldTwo")) .where(and( field("field1").eq(true), field("field2").eq(false), field("field3").eq(literal(true)), field("field4").eq(literal(false)), field("field5").eq(new LocalDate(2010, 1, 2)), field("field6").eq(literal(new LocalDate(2010, 1, 2))), field("field7").eq("Value"), field("field8").eq(literal("Value")), field("field9").eq(blobLiteral(OLD_BLOB_VALUE)), field("field10").eq(blobLiteral(OLD_BLOB_VALUE.getBytes(StandardCharsets.UTF_8))) )); assertEquals( "Update with literal values", expectedUpdateWithLiteralValues(), testDialect.convertStatementToSQL(stmt) ); }
3.68
hudi_HoodieAppendHandle_flushToDiskIfRequired
/** * Checks if the number of records have reached the set threshold and then flushes the records to disk. */ private void flushToDiskIfRequired(HoodieRecord record, boolean appendDeleteBlocks) { if (numberOfRecords >= (int) (maxBlockSize / averageRecordSize) || numberOfRecords % NUMBER_OF_RECORDS_TO_ESTIMATE_RECORD_SIZE == 0) { averageRecordSize = (long) (averageRecordSize * 0.8 + sizeEstimator.sizeEstimate(record) * 0.2); } // Append if max number of records reached to achieve block size if (numberOfRecords >= (maxBlockSize / averageRecordSize)) { // Recompute averageRecordSize before writing a new block and update existing value with // avg of new and old LOG.info("Flush log block to disk, the current avgRecordSize => " + averageRecordSize); // Delete blocks will be appended after appending all the data blocks. appendDataAndDeleteBlocks(header, appendDeleteBlocks); estimatedNumberOfBytesWritten += averageRecordSize * numberOfRecords; numberOfRecords = 0; } }
3.68
pulsar_ManagedCursorImpl_filterReadEntries
/** * Given a list of entries, filter out the entries that have already been individually deleted. * * @param entries * a list of entries * @return a list of entries not containing deleted messages */ List<Entry> filterReadEntries(List<Entry> entries) { lock.readLock().lock(); try { Range<PositionImpl> entriesRange = Range.closed((PositionImpl) entries.get(0).getPosition(), (PositionImpl) entries.get(entries.size() - 1).getPosition()); if (log.isDebugEnabled()) { log.debug("[{}] [{}] Filtering entries {} - alreadyDeleted: {}", ledger.getName(), name, entriesRange, individualDeletedMessages); } Range<PositionImpl> span = individualDeletedMessages.isEmpty() ? null : individualDeletedMessages.span(); if (span == null || !entriesRange.isConnected(span)) { // There are no individually deleted messages in this entry list, no need to perform filtering if (log.isDebugEnabled()) { log.debug("[{}] [{}] No filtering needed for entries {}", ledger.getName(), name, entriesRange); } return entries; } else { // Remove from the entry list all the entries that were already marked for deletion return Lists.newArrayList(Collections2.filter(entries, entry -> { boolean includeEntry = !individualDeletedMessages.contains(entry.getLedgerId(), entry.getEntryId()); if (!includeEntry) { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Filtering entry at {} - already deleted", ledger.getName(), name, entry.getPosition()); } entry.release(); } return includeEntry; })); } } finally { lock.readLock().unlock(); } }
3.68
hadoop_ExpressionFactory_isExpression
/** * Determines whether the given expression name represents and actual * expression. * * @param expressionName * name of the expression * @return true if expressionName represents an expression */ boolean isExpression(String expressionName) { return expressionMap.containsKey(expressionName); }
3.68
hadoop_SinglePendingCommit_getDestinationKey
/** @return destination key in the bucket. */ public String getDestinationKey() { return destinationKey; }
3.68
hadoop_BlockDispatcher_sendRequest
/** Send a reportedBlock replace request to the output stream. */ private static void sendRequest(DataOutputStream out, ExtendedBlock eb, Token<BlockTokenIdentifier> accessToken, DatanodeInfo source, StorageType targetStorageType) throws IOException { new Sender(out).replaceBlock(eb, targetStorageType, accessToken, source.getDatanodeUuid(), source, null); }
3.68
flink_StateTable_getState
/** Returns the internal data structure. */ @VisibleForTesting public StateMap<K, N, S>[] getState() { return keyGroupedStateMaps; }
3.68
flink_TriFunctionWithException_unchecked
/** * Convert at {@link TriFunctionWithException} into a {@link TriFunction}. * * @param triFunctionWithException function with exception to convert into a function * @param <A> first input type * @param <B> second input type * @param <C> third input type * @param <D> output type * @return {@link BiFunction} which throws all checked exception as an unchecked exception. */ static <A, B, C, D> TriFunction<A, B, C, D> unchecked( TriFunctionWithException<A, B, C, D, ?> triFunctionWithException) { return (A a, B b, C c) -> { try { return triFunctionWithException.apply(a, b, c); } catch (Throwable t) { ExceptionUtils.rethrow(t); // we need this to appease the compiler :-( return null; } }; }
3.68
hbase_SegmentScanner_toString
// debug method @Override public String toString() { String res = "Store segment scanner of type " + this.getClass().getName() + "; "; res += "Scanner order " + getScannerOrder() + "; "; res += getSegment().toString(); return res; }
3.68
hmily_MongoEntityConvert_convert
/** * 转换mongo对象. * @param entity mongo entity. * @return hmily entity. */ public HmilyLock convert(final LockMongoEntity entity) { return new HmilyLock(entity.getTransId(), entity.getParticipantId(), entity.getResourceId(), entity.getTargetTableName(), entity.getTargetTablePk()); }
3.68
morf_DataValueLookupMetadata_getChildren
/** * Given the following record arrangements: * * [A] * [A, B] * [A, B, C] * [A, D, E] * [A, D, F] * * We arrange our metadata in a tree: * * [A] * - [+ B] * - [+ C] * - [+ D] * - [+ E] * - [+ F] * * This method returns the children of this record arrangement in that tree. * It is volatile and changes as internment occurs (see {@link #setChildren(ImmutableMap)}). * * @return */ ImmutableMap<CaseInsensitiveString, DataValueLookupMetadata> getChildren() { return children; }
3.68
flink_LineBreakElement_linebreak
/** Creates a line break in the description. */ public static LineBreakElement linebreak() { return new LineBreakElement(); }
3.68
framework_DefaultDeploymentConfiguration_isProductionMode
/** * {@inheritDoc} * * The default is false. */ @Override public boolean isProductionMode() { return productionMode; }
3.68
hadoop_TaskPool_abortWith
/** * Task to abort with after another task failed. * @param task task to execute * @return the builder */ public Builder<I> abortWith(Task<I, ?> task) { this.abortTask = task; return this; }
3.68
graphhopper_VectorTile_addAllTags
/** * <pre> * Tags of this feature are encoded as repeated pairs of * integers. * A detailed description of tags is located in sections * 4.2 and 4.4 of the specification * </pre> * * <code>repeated uint32 tags = 2 [packed = true];</code> */ public Builder addAllTags( java.lang.Iterable<? extends java.lang.Integer> values) { ensureTagsIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( values, tags_); onChanged(); return this; }
3.68