name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_Hash_hash
/** * Calculate a hash using all bytes from the input argument, * and a provided seed value. * @param bytes input bytes * @param initval seed value * @return hash value */ public int hash(byte[] bytes, int initval) { return hash(bytes, bytes.length, initval); }
3.68
morf_AbstractSqlDialectTest_testAlterTableColumn
/** * Utility method for testing 'ALTER TABLE ... COLUMN ...' statements. */ private void testAlterTableColumn(AlterationType alterationType, Column newColumn, List<String> expectedStatements) { testAlterTableColumn(TEST_TABLE, alterationType, null, newColumn, expectedStatements); }
3.68
querydsl_BeanPath_createBoolean
/** * Create a new Boolean path * * @param property property name * @return property path */ protected BooleanPath createBoolean(String property) { return add(new BooleanPath(forProperty(property))); }
3.68
framework_VCalendar_setDropHandler
/** * Set the drop handler. * * @param dropHandler * The drophandler to use */ public void setDropHandler(CalendarDropHandler dropHandler) { this.dropHandler = dropHandler; }
3.68
hudi_HoodieMetaSyncOperations_getLastReplicatedTime
/** * Get the timestamp of last replication. */ default Option<String> getLastReplicatedTime(String tableName) { return Option.empty(); }
3.68
framework_GridAlignment_buildLayout
/** * Build Layout for test */ private void buildLayout() { layout.setColumns(3); layout.setRows(3); for (int i = 0; i < components.length; i++) { layout.addComponent(components[i]); layout.setComponentAlignment(components[i], alignments[i]); } }
3.68
hadoop_JobTokenIdentifier_write
/** {@inheritDoc} */ @Override public void write(DataOutput out) throws IOException { jobid.write(out); }
3.68
zxing_Detector_isWhiteOrBlackRectangle
/** * @return true if the border of the rectangle passed in parameter is compound of white points only * or black points only */ private boolean isWhiteOrBlackRectangle(Point p1, Point p2, Point p3, Point p4) { int corr = 3; p1 = new Point(Math.max(0, p1.getX() - corr), Math.min(image.getHeight() - 1, p1.getY() + corr)); p2 = new Point(Math.max(0, p2.getX() - corr), Math.max(0, p2.getY() - corr)); p3 = new Point(Math.min(image.getWidth() - 1, p3.getX() + corr), Math.max(0, Math.min(image.getHeight() - 1, p3.getY() - corr))); p4 = new Point(Math.min(image.getWidth() - 1, p4.getX() + corr), Math.min(image.getHeight() - 1, p4.getY() + corr)); int cInit = getColor(p4, p1); if (cInit == 0) { return false; } int c = getColor(p1, p2); if (c != cInit) { return false; } c = getColor(p2, p3); if (c != cInit) { return false; } c = getColor(p3, p4); return c == cInit; }
3.68
pulsar_DispatchRateLimiter_getAvailableDispatchRateLimitOnMsg
/** * returns available msg-permit if msg-dispatch-throttling is enabled else it returns -1. * * @return */ public long getAvailableDispatchRateLimitOnMsg() { return dispatchRateLimiterOnMessage == null ? -1 : dispatchRateLimiterOnMessage.getAvailablePermits(); }
3.68
flink_SingleInputGate_queueChannelUnsafe
/** * Queues the channel if not already enqueued and not received EndOfPartition, potentially * raising the priority. * * @return true iff it has been enqueued/prioritized = some change to {@link * #inputChannelsWithData} happened */ private boolean queueChannelUnsafe(InputChannel channel, boolean priority) { assert Thread.holdsLock(inputChannelsWithData); if (channelsWithEndOfPartitionEvents.get(channel.getChannelIndex())) { return false; } final boolean alreadyEnqueued = enqueuedInputChannelsWithData.get(channel.getChannelIndex()); if (alreadyEnqueued && (!priority || inputChannelsWithData.containsPriorityElement(channel))) { // already notified / prioritized (double notification), ignore return false; } inputChannelsWithData.add(channel, priority, alreadyEnqueued); if (!alreadyEnqueued) { enqueuedInputChannelsWithData.set(channel.getChannelIndex()); } return true; }
3.68
hadoop_NativeTaskOutputFiles_getSpillFile
/** * Return a local map spill file created earlier. * * @param spillNumber the number */ public Path getSpillFile(int spillNumber) throws IOException { String path = String.format(SPILL_FILE_FORMAT_STRING, id, TASKTRACKER_OUTPUT, spillNumber); return lDirAlloc.getLocalPathToRead(path, conf); }
3.68
pulsar_ConsumerConfiguration_setProperty
/** * Set a name/value property with this consumer. * * @param key * @param value * @return */ public ConsumerConfiguration setProperty(String key, String value) { checkArgument(key != null); checkArgument(value != null); conf.getProperties().put(key, value); return this; }
3.68
hbase_ColumnPrefixFilter_parseFrom
/** * Parses a serialized representation of the {@link ColumnPrefixFilter} * @param pbBytes A pb serialized {@link ColumnPrefixFilter} instance * @return An instance of {@link ColumnPrefixFilter} made from <code>bytes</code> * @throws DeserializationException if an error occurred * @see #toByteArray */ public static ColumnPrefixFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.ColumnPrefixFilter proto; try { proto = FilterProtos.ColumnPrefixFilter.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } return new ColumnPrefixFilter(proto.getPrefix().toByteArray()); }
3.68
hbase_StorageClusterStatusModel_setWriteRequestsCount
/** * @param writeRequestsCount The current total write requests made to region */ public void setWriteRequestsCount(long writeRequestsCount) { this.writeRequestsCount = writeRequestsCount; }
3.68
flink_IntValue_setValue
/** * Sets the encapsulated int to the specified value. * * @param value the new value of the encapsulated int. */ public void setValue(int value) { this.value = value; }
3.68
graphhopper_OSMReader_acceptWay
/** * This method is called for each way during the first and second pass of the {@link WaySegmentParser}. All OSM * ways that are not accepted here and all nodes that are not referenced by any such way will be ignored. */ protected boolean acceptWay(ReaderWay way) { // ignore broken geometry if (way.getNodes().size() < 2) return false; // ignore multipolygon geometry if (!way.hasTags()) return false; return osmParsers.acceptWay(way); }
3.68
morf_SchemaAdapter_tableExists
/** * @see org.alfasoftware.morf.metadata.Schema#tableExists(java.lang.String) */ @Override public boolean tableExists(String name) { return delegate.tableExists(name); }
3.68
framework_HierarchicalDataCommunicator_collapse
/** * Collapses the given item and removes its sub-hierarchy. Calling this * method will have no effect if the row is already collapsed. * * @param item * the item to collapse * @param index * the index of the item */ public void collapse(T item, Integer index) { doCollapse(item, index, true); }
3.68
framework_Table_setSortAscending
/** * Internal method to set sort ascending. With doSort flag actual sort can * be bypassed. * * @param ascending * @param doSort */ private void setSortAscending(boolean ascending, boolean doSort) { if (sortAscending != ascending) { sortAscending = ascending; if (doSort) { sort(); // Assures the visual refresh. This should not be necessary as // sort() calls refreshRowCache refreshRenderedCells(); } } }
3.68
rocketmq-connect_ConnectUtil_offsetTopics
/** * Get topic offsets */ public static Map<String, Map<MessageQueue, TopicOffset>> offsetTopics( WorkerConfig config, List<String> topics) { Map<String, Map<MessageQueue, TopicOffset>> offsets = Maps.newConcurrentMap(); DefaultMQAdminExt adminClient = null; try { adminClient = startMQAdminTool(config); for (String topic : topics) { TopicStatsTable topicStatsTable = adminClient.examineTopicStats(topic); offsets.put(topic, topicStatsTable.getOffsetTable()); } return offsets; } catch (MQClientException | MQBrokerException | RemotingException | InterruptedException e) { throw new RuntimeException(e); } finally { if (adminClient != null) { adminClient.shutdown(); } } }
3.68
flink_WindowedOperatorTransformation_reduce
/** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * <p>Arriving data is incrementally aggregated using the given reducer. * * @param reduceFunction The reduce function that is used for incremental aggregation. * @param function The window function. * @return The data stream that is the result of applying the window function to the window. */ @Internal public <R> BootstrapTransformation<T> reduce( ReduceFunction<T> reduceFunction, ProcessWindowFunction<T, R, K, W> function) { // clean the closures function = input.clean(function); reduceFunction = input.clean(reduceFunction); WindowOperator<K, T, ?, R, W> operator = builder.reduce(reduceFunction, function); SavepointWriterOperatorFactory factory = (timestamp, path) -> new StateBootstrapWrapperOperator<>(timestamp, path, operator); return new BootstrapTransformation<>( input, operatorMaxParallelism, timestamper, factory, keySelector, keyType); }
3.68
hadoop_FederationPolicyInitializationContext_getFederationSubclusterResolver
/** * Getter for the {@link SubClusterResolver}. * * @return the {@link SubClusterResolver} to be used for initialization. */ public SubClusterResolver getFederationSubclusterResolver() { return federationSubclusterResolver; }
3.68
hbase_ByteBufferUtils_longFitsIn
/** * Check how many bytes are required to store value. * @param value Value which size will be tested. * @return How many bytes are required to store value. */ public static int longFitsIn(final long value) { if (value < 0) { return 8; } if (value < (1L << (4 * 8))) { // no more than 4 bytes if (value < (1L << (2 * 8))) { if (value < (1L << (1 * 8))) { return 1; } return 2; } if (value < (1L << (3 * 8))) { return 3; } return 4; } // more than 4 bytes if (value < (1L << (6 * 8))) { if (value < (1L << (5 * 8))) { return 5; } return 6; } if (value < (1L << (7 * 8))) { return 7; } return 8; }
3.68
flink_ArrowFieldWriter_getValueVector
/** Returns the underlying container which stores the sequence of values of a column. */ public ValueVector getValueVector() { return valueVector; }
3.68
hibernate-validator_ValueExtractorResolver_getPotentialValueExtractorCandidatesForCascadedValidation
/** * Used to determine the value extractors which potentially could be applied to the runtime type of a given declared type. * <p> * An example could be when there's a declaration like {@code private PotentiallyContainerAtRuntime<@Valid Bean>;} and there's * no value extractor present for {@code PotentiallyContainerAtRuntime} but there's one available for * {@code Container extends PotentiallyContainerAtRuntime}. * <p> * Returned set of extractors is used to determine if at runtime a value extractor can be applied to a runtime type, * and if {@link PotentiallyContainerCascadingMetaData} should be promoted to {@link ContainerCascadingMetaData}. * * @return a set of {@link ValueExtractorDescriptor}s that possibly might be applied to a {@code declaredType} * at a runtime. */ public Set<ValueExtractorDescriptor> getPotentialValueExtractorCandidatesForCascadedValidation(Type declaredType) { return registeredValueExtractors .stream() .filter( e -> TypeHelper.isAssignable( declaredType, e.getContainerType() ) ) .collect( Collectors.collectingAndThen( Collectors.toSet(), CollectionHelper::toImmutableSet ) ); }
3.68
pulsar_Topics_createSubscriptionAsync
/** * Create a new subscription on a topic. * * @param topic * topic name * @param subscriptionName * Subscription name * @param messageId * The {@link MessageId} on where to initialize the subscription. It could be {@link MessageId#latest}, * {@link MessageId#earliest} or a specific message id. * * @param replicated * replicated subscriptions. */ default CompletableFuture<Void> createSubscriptionAsync(String topic, String subscriptionName, MessageId messageId, boolean replicated) { return createSubscriptionAsync(topic, subscriptionName, messageId, replicated, null); }
3.68
flink_PartitionTable_stopTrackingPartitions
/** Stops the tracking of the given set of partitions for the given key. */ public void stopTrackingPartitions(K key, Collection<ResultPartitionID> partitionIds) { Preconditions.checkNotNull(key); Preconditions.checkNotNull(partitionIds); // If the key is unknown we do not fail here, in line with // ShuffleEnvironment#releaseFinishedPartitions trackedPartitionsPerKey.computeIfPresent( key, (ignored, resultPartitionIDS) -> { resultPartitionIDS.removeAll(partitionIds); return resultPartitionIDS.isEmpty() ? null : resultPartitionIDS; }); }
3.68
framework_ClickSelectHandler_setDeselectAllowed
/** * Sets whether clicking the currently selected row should deselect the row. * * @param deselectAllowed * <code>true</code> to allow deselecting the selected row; * otherwise <code>false</code> */ public void setDeselectAllowed(boolean deselectAllowed) { this.deselectAllowed = deselectAllowed; }
3.68
pulsar_PulsarSchemaToKafkaSchema_parseAvroSchema
// Parse json to shaded schema private static org.apache.avro.Schema parseAvroSchema(String schemaJson) { final org.apache.avro.Schema.Parser parser = new org.apache.avro.Schema.Parser(); parser.setValidateDefaults(false); return parser.parse(schemaJson); }
3.68
framework_VFilterSelect_onBrowserEvent
/* * (non-Javadoc) * * @see * com.google.gwt.user.client.ui.Composite#onBrowserEvent(com.google.gwt * .user.client.Event) */ @Override public void onBrowserEvent(Event event) { super.onBrowserEvent(event); if (event.getTypeInt() == Event.ONPASTE) { if (textInputEnabled) { Scheduler.get().scheduleDeferred(new ScheduledCommand() { @Override public void execute() { filterOptions(currentPage); } }); } } }
3.68
flink_SchemaValidator_deriveFieldMapping
/** * Finds a table source field mapping. * * @param properties The properties describing a schema. * @param inputType The input type that a connector and/or format produces. This parameter can * be used to resolve a rowtime field against an input field. */ public static Map<String, String> deriveFieldMapping( DescriptorProperties properties, Optional<TypeInformation<?>> inputType) { Map<String, String> mapping = new HashMap<>(); TableSchema schema = properties.getTableSchema(SCHEMA); List<String> columnNames = new ArrayList<>(); inputType.ifPresent( t -> columnNames.addAll(Arrays.asList(((CompositeType) t).getFieldNames()))); // add all source fields first because rowtime might reference one of them columnNames.forEach(name -> mapping.put(name, name)); // add all schema fields first for implicit mappings Arrays.stream(schema.getFieldNames()).forEach(name -> mapping.put(name, name)); Map<String, String> names = properties.getIndexedProperty(SCHEMA, SCHEMA_NAME); for (int i = 0; i < names.size(); i++) { String name = properties.getString(SCHEMA + "." + i + "." + SCHEMA_NAME); Optional<String> source = properties.getOptionalString(SCHEMA + "." + i + "." + SCHEMA_FROM); if (source.isPresent()) { // add explicit mapping mapping.put(name, source.get()); } else { // implicit mapping or time boolean isProctime = properties .getOptionalBoolean(SCHEMA + "." + i + "." + SCHEMA_PROCTIME) .orElse(false); boolean isRowtime = properties.containsKey(SCHEMA + "." + i + "." + ROWTIME_TIMESTAMPS_TYPE); boolean isGeneratedColumn = properties.containsKey(SCHEMA + "." + i + "." + EXPR); // remove proctime/rowtime from mapping if (isProctime || isRowtime || isGeneratedColumn) { mapping.remove(name); } // check for invalid fields else if (!columnNames.contains(name)) { throw new ValidationException( format( "Could not map the schema field '%s' to a field " + "from source. Please specify the source field from which it can be derived.", name)); } } } return mapping; }
3.68
graphhopper_GraphHopper_setElevation
/** * Enable storing and fetching elevation data. Default is false */ public GraphHopper setElevation(boolean includeElevation) { this.elevation = includeElevation; return this; }
3.68
pulsar_BrokerInterceptor_onConnectionCreated
/** * Called by the broker when a new connection is created. */ default void onConnectionCreated(ServerCnx cnx){ }
3.68
pulsar_WatermarkCountTriggerPolicy_handleWaterMarkEvent
/** * Triggers all the pending windows up to the waterMarkEvent timestamp * based on the sliding interval count. * * @param waterMarkEvent the watermark event */ private void handleWaterMarkEvent(Event<T> waterMarkEvent) { long watermarkTs = waterMarkEvent.getTimestamp(); List<Long> eventTs = windowManager.getSlidingCountTimestamps(lastProcessedTs, watermarkTs, count); for (long ts : eventTs) { evictionPolicy.setContext(new DefaultEvictionContext(ts, null, (long) count)); handler.onTrigger(); lastProcessedTs = ts; } }
3.68
hbase_HFileArchiver_deleteRegionWithoutArchiving
/** * Without regard for backup, delete a region. Should be used with caution. * @param regionDir {@link Path} to the region to be deleted. * @param fs FileSystem from which to delete the region * @return <tt>true</tt> on successful deletion, <tt>false</tt> otherwise * @throws IOException on filesystem operation failure */ private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path regionDir) throws IOException { if (fs.delete(regionDir, true)) { LOG.debug("Deleted {}", regionDir); return true; } LOG.debug("Failed to delete directory {}", regionDir); return false; }
3.68
hbase_TimeRange_isAllTime
/** * Check if it is for all time * @return true if it is for all time */ public boolean isAllTime() { return allTime; }
3.68
flink_DataSetUtils_summarize
/** * Summarize a DataSet of Tuples by collecting single pass statistics for all columns. * * <p>Example usage: * * <pre>{@code * Dataset<Tuple3<Double, String, Boolean>> input = // [...] * Tuple3<NumericColumnSummary,StringColumnSummary, BooleanColumnSummary> summary = DataSetUtils.summarize(input) * * summary.f0.getStandardDeviation() * summary.f1.getMaxLength() * }</pre> * * @return the summary as a Tuple the same width as input rows */ public static <R extends Tuple, T extends Tuple> R summarize(DataSet<T> input) throws Exception { if (!input.getType().isTupleType()) { throw new IllegalArgumentException( "summarize() is only implemented for DataSet's of Tuples"); } final TupleTypeInfoBase<?> inType = (TupleTypeInfoBase<?>) input.getType(); DataSet<TupleSummaryAggregator<R>> result = input.mapPartition( new MapPartitionFunction<T, TupleSummaryAggregator<R>>() { @Override public void mapPartition( Iterable<T> values, Collector<TupleSummaryAggregator<R>> out) throws Exception { TupleSummaryAggregator<R> aggregator = SummaryAggregatorFactory.create(inType); for (Tuple value : values) { aggregator.aggregate(value); } out.collect(aggregator); } }) .reduce( new ReduceFunction<TupleSummaryAggregator<R>>() { @Override public TupleSummaryAggregator<R> reduce( TupleSummaryAggregator<R> agg1, TupleSummaryAggregator<R> agg2) throws Exception { agg1.combine(agg2); return agg1; } }); return result.collect().get(0).result(); }
3.68
flink_TypeExtractor_createTypeInfo
/** * Creates a {@link TypeInformation} from the given parameters. * * <p>If the given {@code instance} implements {@link ResultTypeQueryable}, its information is * used to determine the type information. Otherwise, the type information is derived based on * the given class information. * * @param instance instance to determine type information for * @param baseClass base class of {@code instance} * @param clazz class of {@code instance} * @param returnParamPos index of the return type in the type arguments of {@code clazz} * @param <OUT> output type * @return type information */ @SuppressWarnings("unchecked") @PublicEvolving public static <OUT> TypeInformation<OUT> createTypeInfo( Object instance, Class<?> baseClass, Class<?> clazz, int returnParamPos) { if (instance instanceof ResultTypeQueryable) { return ((ResultTypeQueryable<OUT>) instance).getProducedType(); } else { return createTypeInfo(baseClass, clazz, returnParamPos, null, null); } }
3.68
hbase_MetricsREST_incrementSucessfulAppendRequests
/** * @param inc How much to add to sucessfulAppendCount. */ public synchronized void incrementSucessfulAppendRequests(final int inc) { source.incrementSucessfulAppendRequests(inc); }
3.68
framework_VAbsoluteLayout_contains
/** * Does this layout contain a widget. * * @param widget * The widget to check * @return Returns true if the widget is in this layout, false if not */ public boolean contains(Widget widget) { return getChildWrapper(widget) != null; }
3.68
dubbo_ServiceAnnotationPostProcessor_processScannedBeanDefinition
/** * Registers {@link ServiceBean} from new annotated {@link Service} {@link BeanDefinition} * * @param beanDefinitionHolder * @see ServiceBean * @see BeanDefinition */ private void processScannedBeanDefinition(BeanDefinitionHolder beanDefinitionHolder) { Class<?> beanClass = resolveClass(beanDefinitionHolder); Annotation service = findServiceAnnotation(beanClass); // The attributes of @Service annotation Map<String, Object> serviceAnnotationAttributes = AnnotationUtils.getAttributes(service, true); String serviceInterface = resolveInterfaceName(serviceAnnotationAttributes, beanClass); String annotatedServiceBeanName = beanDefinitionHolder.getBeanName(); // ServiceBean Bean name String beanName = generateServiceBeanName(serviceAnnotationAttributes, serviceInterface); AbstractBeanDefinition serviceBeanDefinition = buildServiceBeanDefinition(serviceAnnotationAttributes, serviceInterface, annotatedServiceBeanName); registerServiceBeanDefinition(beanName, serviceBeanDefinition, serviceInterface); }
3.68
hudi_AbstractTableFileSystemView_refreshCompletionTimeQueryView
/** * Refresh the completion time query view. */ protected void refreshCompletionTimeQueryView() { this.completionTimeQueryView = new CompletionTimeQueryView(metaClient); }
3.68
hbase_StoreFlusher_performFlush
/** * Performs memstore flush, writing data from scanner into sink. * @param scanner Scanner to get data from. * @param sink Sink to write data to. Could be StoreFile.Writer. * @param throughputController A controller to avoid flush too fast */ protected void performFlush(InternalScanner scanner, CellSink sink, ThroughputController throughputController) throws IOException { int compactionKVMax = conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); List<Cell> kvs = new ArrayList<>(); boolean hasMore; String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush"); // no control on system table (such as meta, namespace, etc) flush boolean control = throughputController != null && !store.getRegionInfo().getTable().isSystemTable(); if (control) { throughputController.start(flushName); } try { do { hasMore = scanner.next(kvs, scannerContext); if (!kvs.isEmpty()) { for (Cell c : kvs) { sink.append(c); if (control) { throughputController.control(flushName, c.getSerializedSize()); } } kvs.clear(); } } while (hasMore); } catch (InterruptedException e) { throw new InterruptedIOException( "Interrupted while control throughput of flushing " + flushName); } finally { if (control) { throughputController.finish(flushName); } } }
3.68
framework_LegacyPaint_paint
/** * * <p> * Paints the Paintable into a UIDL stream. This method creates the UIDL * sequence describing it and outputs it to the given UIDL stream. * </p> * * <p> * It is called when the contents of the component should be painted in * response to the component first being shown or having been altered so * that its visual representation is changed. * </p> * * <p> * <b>Do not override this to paint your component.</b> Override * {@link LegacyComponent#paintContent(PaintTarget)} instead. * </p> * * * @param target * the target UIDL stream where the component should paint itself * to. * @throws PaintException * if the paint operation failed. */ public static void paint(Component component, PaintTarget target) throws PaintException { // Only paint content of visible components. if (!LegacyCommunicationManager.isComponentVisibleToClient(component)) { return; } final String tag = target.getTag(component); final PaintStatus status = target.startPaintable(component, tag); if (PaintStatus.CACHED == status) { // nothing to do but flag as cached and close the paintable tag target.addAttribute("cached", true); } else { // Paint the contents of the component if (component instanceof LegacyComponent) { ((LegacyComponent) component).paintContent(target); } } target.endPaintable(component); }
3.68
flink_CompressionUtils_extractTarFileUsingJava
// Follow the pattern suggested in // https://commons.apache.org/proper/commons-compress/examples.html private static void extractTarFileUsingJava( String inFilePath, String targetDirPath, boolean gzipped) throws IOException { try (InputStream fi = Files.newInputStream(Paths.get(inFilePath)); InputStream bi = new BufferedInputStream(fi); final TarArchiveInputStream tai = new TarArchiveInputStream( gzipped ? new GzipCompressorInputStream(bi) : bi)) { final File targetDir = new File(targetDirPath); TarArchiveEntry entry; while ((entry = tai.getNextTarEntry()) != null) { unpackEntry(tai, entry, targetDir); } } }
3.68
flink_ExceptionHistoryEntry_fromTaskManagerLocation
/** * Creates a {@code ArchivedTaskManagerLocation} copy of the passed {@link * TaskManagerLocation}. * * @param taskManagerLocation The {@code TaskManagerLocation} that's going to be copied. * @return The corresponding {@code ArchivedTaskManagerLocation} or {@code null} if {@code * null} was passed. */ @VisibleForTesting @Nullable static ArchivedTaskManagerLocation fromTaskManagerLocation( TaskManagerLocation taskManagerLocation) { if (taskManagerLocation == null) { return null; } return new ArchivedTaskManagerLocation( taskManagerLocation.getResourceID(), taskManagerLocation.addressString(), taskManagerLocation.dataPort(), taskManagerLocation.getHostname(), taskManagerLocation.getFQDNHostname()); }
3.68
hmily_ExpressionHandler_getValue
/** * Get expression value. * * @param parameters SQL parameters * @param expressionSegment expression segment * @return expression value */ public static Object getValue(final List<Object> parameters, final HmilyExpressionSegment expressionSegment) { if (expressionSegment instanceof HmilyCommonExpressionSegment) { String value = ((HmilyCommonExpressionSegment) expressionSegment).getText(); return "null".equals(value) ? null : value; } if (expressionSegment instanceof HmilyParameterMarkerExpressionSegment) { return parameters.get(((HmilyParameterMarkerExpressionSegment) expressionSegment).getParameterMarkerIndex()); } if (expressionSegment instanceof HmilyExpressionProjectionSegment) { String value = ((HmilyExpressionProjectionSegment) expressionSegment).getText(); return "null".equals(value) ? null : value; } if (expressionSegment instanceof HmilyBinaryOperationExpression) { Object left = getValue(parameters, ((HmilyBinaryOperationExpression) expressionSegment).getLeft()); Object right = getValue(parameters, ((HmilyBinaryOperationExpression) expressionSegment).getRight()); return String.format("%s %s %s", left, ((HmilyBinaryOperationExpression) expressionSegment).getOperator(), right); } if (expressionSegment instanceof HmilyColumnSegment) { return ((HmilyColumnSegment) expressionSegment).getQualifiedName(); } // TODO match result type with metadata return ((HmilyLiteralExpressionSegment) expressionSegment).getLiterals(); }
3.68
flink_StreamOperatorWrapper_isClosed
/** * Checks if the wrapped operator has been closed. * * <p>Note that this method must be called in the task thread. */ public boolean isClosed() { return closed; }
3.68
hbase_BalancerClusterState_getTotalRegionHFileSizeMB
/** * Returns the size of hFiles from the most recent RegionLoad for region */ public int getTotalRegionHFileSizeMB(int region) { Deque<BalancerRegionLoad> load = regionLoads[region]; if (load == null) { // This means, that the region has no actual data on disk return 0; } return regionLoads[region].getLast().getRegionSizeMB(); }
3.68
hbase_ScanWildcardColumnTracker_checkVersions
/** * {@inheritDoc} This receives puts *and* deletes. Deletes do not count as a version, but rather * take the version of the previous put (so eventually all but the last can be reclaimed). */ @Override public ScanQueryMatcher.MatchCode checkVersions(Cell cell, long timestamp, byte type, boolean ignoreCount) throws IOException { if (columnCell == null) { // first iteration. resetCell(cell); if (ignoreCount) { return ScanQueryMatcher.MatchCode.INCLUDE; } // do not count a delete marker as another version return checkVersion(type, timestamp); } int cmp = comparator.compareQualifiers(cell, this.columnCell); if (cmp == 0) { if (ignoreCount) { return ScanQueryMatcher.MatchCode.INCLUDE; } // If column matches, check if it is a duplicate timestamp if (sameAsPreviousTSAndType(timestamp, type)) { return ScanQueryMatcher.MatchCode.SKIP; } return checkVersion(type, timestamp); } resetTSAndType(); // new col > old col if (cmp > 0) { // switched columns, lets do something.x resetCell(cell); if (ignoreCount) { return ScanQueryMatcher.MatchCode.INCLUDE; } return checkVersion(type, timestamp); } // new col < oldcol // WARNING: This means that very likely an edit for some other family // was incorrectly stored into the store for this one. Throw an exception, // because this might lead to data corruption. throw new IOException("ScanWildcardColumnTracker.checkColumn ran into a column actually " + "smaller than the previous column: " + Bytes.toStringBinary(CellUtil.cloneQualifier(cell))); }
3.68
AreaShop_BuyRegion_getResellPrice
/** * Get the resell price of this region. * @return The resell price if isInResellingMode(), otherwise 0.0 */ public double getResellPrice() { return Math.max(0, config.getDouble("buy.resellPrice")); }
3.68
framework_VOverlay_setApplicationConnection
/** * Sets the {@link ApplicationConnection} that this overlay belongs to. * * @see #getApplicationConnection() * * @param ac * the connection */ public void setApplicationConnection(ApplicationConnection ac) { this.ac = ac; }
3.68
framework_VTreeTable_handleNavigation
/** For internal use only. May be removed or replaced in the future. */ @Override public boolean handleNavigation(int keycode, boolean ctrl, boolean shift) { if (collapseRequest || focusParentResponsePending) { // Enqueue the event if there might be pending content changes from // the server if (pendingNavigationEvents.size() < 10) { // Only keep 10 keyboard events in the queue PendingNavigationEvent pendingNavigationEvent = new PendingNavigationEvent( keycode, ctrl, shift); pendingNavigationEvents.add(pendingNavigationEvent); } return true; } VTreeTableRow focusedRow = (VTreeTableRow) getFocusedRow(); if (focusedRow != null) { if (focusedRow.canHaveChildren && ((keycode == KeyCodes.KEY_RIGHT && !focusedRow.open) || (keycode == KeyCodes.KEY_LEFT && focusedRow.open))) { if (!ctrl) { client.updateVariable(paintableId, "selectCollapsed", true, false); } sendSelectedRows(false); sendToggleCollapsedUpdate(focusedRow.getKey()); return true; } else if (keycode == KeyCodes.KEY_RIGHT && focusedRow.open) { // already expanded, move selection down if next is on a deeper // level (is-a-child) VTreeTableScrollBody body = (VTreeTableScrollBody) focusedRow .getParent(); Iterator<Widget> iterator = body.iterator(); VTreeTableRow next = null; while (iterator.hasNext()) { next = (VTreeTableRow) iterator.next(); if (next == focusedRow) { next = (VTreeTableRow) iterator.next(); break; } } if (next != null) { if (next.depth > focusedRow.depth) { selectionPending = true; return super.handleNavigation(getNavigationDownKey(), ctrl, shift); } } else { // Note, a minor change here for a bit false behavior if // cache rows is disabled + last visible row + no childs for // the node selectionPending = true; return super.handleNavigation(getNavigationDownKey(), ctrl, shift); } } else if (keycode == KeyCodes.KEY_LEFT) { // already collapsed move selection up to parent node // do on the server side as the parent is not necessary // rendered on the client, could check if parent is visible if // a performance issue arises client.updateVariable(paintableId, "focusParent", focusedRow.getKey(), true); // Set flag that we should enqueue navigation events until we // get a response to this request focusParentResponsePending = true; return true; } } return super.handleNavigation(keycode, ctrl, shift); }
3.68
hadoop_TimelineWriteResponse_setEntityType
/** * Set the entity type. * * @param type the entity type. */ public void setEntityType(String type) { this.entityType = type; }
3.68
framework_BasicEvent_setEnd
/* * (non-Javadoc) * * @see * com.vaadin.addon.calendar.event.CalendarEventEditor#setEnd(java.util. * Date) */ @Override public void setEnd(Date end) { this.end = end; fireEventChange(); }
3.68
flink_RegisteredRpcConnection_close
/** Close connection. */ public void close() { closed = true; // make sure we do not keep re-trying forever if (pendingRegistration != null) { pendingRegistration.cancel(); } }
3.68
pulsar_AuthorizationService_grantPermissionAsync
/** * Grant authorization-action permission on a topic to the given client. * * NOTE: used to complete with {@link IllegalArgumentException} when namespace not found or with * {@link IllegalStateException} when failed to grant permission. * * @param topicName * @param role * @param authDataJson * additional authdata in json for targeted authorization provider * @completesWith null when the permissions are updated successfully. * @completesWith {@link MetadataStoreException} when the MetadataStore is not updated. */ public CompletableFuture<Void> grantPermissionAsync(TopicName topicName, Set<AuthAction> actions, String role, String authDataJson) { return provider.grantPermissionAsync(topicName, actions, role, authDataJson); }
3.68
hbase_HRegionServer_convertRegionSize
/** * Converts a pair of {@link RegionInfo} and {@code long} into a {@link RegionSpaceUse} protobuf * message. * @param regionInfo The RegionInfo * @param sizeInBytes The size in bytes of the Region * @return The protocol buffer */ RegionSpaceUse convertRegionSize(RegionInfo regionInfo, Long sizeInBytes) { return RegionSpaceUse.newBuilder() .setRegionInfo(ProtobufUtil.toRegionInfo(Objects.requireNonNull(regionInfo))) .setRegionSize(Objects.requireNonNull(sizeInBytes)).build(); }
3.68
hbase_MasterProcedureScheduler_wakeServerExclusiveLock
/** * Wake the procedures waiting for the specified server * @see #waitServerExclusiveLock(Procedure,ServerName) * @param procedure the procedure releasing the lock * @param serverName the server that has the exclusive lock */ public void wakeServerExclusiveLock(final Procedure<?> procedure, final ServerName serverName) { schedLock(); try { final LockAndQueue lock = locking.getServerLock(serverName); // Only SCP will acquire/release server lock so do not need to check the return value here. lock.releaseExclusiveLock(procedure); // In tests we may pass procedures other than ServerProcedureInterface, just pass null if // so. addToRunQueue(serverRunQueue, getServerQueue(serverName, procedure instanceof ServerProcedureInterface ? (ServerProcedureInterface) procedure : null), () -> procedure + " released exclusive lock"); int waitingCount = wakeWaitingProcedures(lock); wakePollIfNeeded(waitingCount); } finally { schedUnlock(); } }
3.68
hibernate-validator_ExecutableMetaData_getParameterMetaData
/** * Returns meta data for the specified parameter of the represented executable. * * @param parameterIndex the index of the parameter * * @return Meta data for the specified parameter. Will never be {@code null}. */ public ParameterMetaData getParameterMetaData(int parameterIndex) { return parameterMetaDataList.get( parameterIndex ); }
3.68
hudi_BaseHoodieTableServiceClient_archive
/** * Trigger archival for the table. This ensures that the number of commits do not explode * and keep increasing unbounded over time. * * @param table table to commit on. */ protected void archive(HoodieTable table) { if (!tableServicesEnabled(config)) { return; } try { final Timer.Context timerContext = metrics.getArchiveCtx(); // We cannot have unbounded commit files. Archive commits if we have to archive HoodieTimelineArchiver archiver = new HoodieTimelineArchiver(config, table); int instantsToArchive = archiver.archiveIfRequired(context, true); if (timerContext != null) { long durationMs = metrics.getDurationInMs(timerContext.stop()); this.metrics.updateArchiveMetrics(durationMs, instantsToArchive); } } catch (IOException ioe) { throw new HoodieIOException("Failed to archive", ioe); } }
3.68
dubbo_LoggerFactory_getLevel
/** * Get logging level * * @return logging level */ public static Level getLevel() { return loggerAdapter.getLevel(); }
3.68
hbase_MetricsAssignmentManager_getMergeProcMetrics
/** Returns Set of common metrics for merge procedure */ public ProcedureMetrics getMergeProcMetrics() { return mergeProcMetrics; }
3.68
flink_BinarySegmentUtils_readRawValueData
/** Gets an instance of {@link RawValueData} from underlying {@link MemorySegment}. */ public static <T> RawValueData<T> readRawValueData( MemorySegment[] segments, int baseOffset, long offsetAndSize) { final int size = ((int) offsetAndSize); int offset = (int) (offsetAndSize >> 32); return new BinaryRawValueData<>(segments, offset + baseOffset, size, null); }
3.68
hbase_TimeRange_between
/** * Represents the time interval [minStamp, maxStamp) * @param minStamp the minimum timestamp, inclusive * @param maxStamp the maximum timestamp, exclusive */ public static TimeRange between(long minStamp, long maxStamp) { check(minStamp, maxStamp); return new TimeRange(minStamp, maxStamp); }
3.68
hadoop_StoragePolicySatisfyManager_isSatisfierRunning
/** * @return true if the internal storage policy satisfier daemon is running, * false otherwise. */ @VisibleForTesting public boolean isSatisfierRunning() { return spsService.isRunning(); }
3.68
morf_AbstractSqlDialectTest_expectedRowNumber
/** * @return The expected SQL for retrieving the row number */ protected String expectedRowNumber() { return "ROW_NUMBER()"; }
3.68
morf_ResultSetMismatch_getLeftValue
/** * @return Value from the left hand result set. */ public String getLeftValue() { return leftValue; }
3.68
hudi_HoodieRepairTool_copyFiles
/** * Copies the list of files from source base path to destination base path. * The destination file path (base + relative) should not already exist. * * @param context {@link HoodieEngineContext} instance. * @param relativeFilePaths A {@link List} of relative file paths for copying. * @param sourceBasePath Source base path. * @param destBasePath Destination base path. * @return {@code true} if all successful; {@code false} otherwise. */ static boolean copyFiles( HoodieEngineContext context, List<String> relativeFilePaths, String sourceBasePath, String destBasePath) { SerializableConfiguration conf = context.getHadoopConf(); List<Boolean> allResults = context.parallelize(relativeFilePaths) .mapPartitions(iterator -> { List<Boolean> results = new ArrayList<>(); FileSystem fs = FSUtils.getFs(destBasePath, conf.get()); iterator.forEachRemaining(filePath -> { boolean success = false; Path sourcePath = new Path(sourceBasePath, filePath); Path destPath = new Path(destBasePath, filePath); try { if (!fs.exists(destPath)) { FileIOUtils.copy(fs, sourcePath, destPath); success = true; } } catch (IOException e) { // Copy Fail LOG.error(String.format("Copying file fails: source [%s], destination [%s]", sourcePath, destPath)); } finally { results.add(success); } }); return results.iterator(); }, true) .collectAsList(); return allResults.stream().reduce((r1, r2) -> r1 && r2).orElse(false); }
3.68
pulsar_AvroRecordBuilderImpl_clear
/** * Clears the value of the given field. * * @param index the index of the field to clear. * @return a reference to the RecordBuilder. */ protected GenericRecordBuilder clear(int index) { avroRecordBuilder.clear( genericSchema.getAvroSchema().getFields().get(index)); return this; }
3.68
morf_SqlParameter_getMetadata
/** * Returns the field metadata for the parameter. * * @return the field metadata for the parameter. */ public Column getMetadata() { return SchemaUtils.column(name, type, width, scale); }
3.68
open-banking-gateway_HbciConsentInfo_noTransactionConsentPresent
/** * Any kind of consent exists? */ public boolean noTransactionConsentPresent(TransactionListHbciContext ctx) { if (ctx.isConsentIncompatible()) { return true; } Optional<HbciResultCache> cached = cachedResultAccessor.resultFromCache(ctx); return cached.map( hbciResultCache -> null == hbciResultCache.getTransactionsById() || null == hbciResultCache.getTransactionsById().get(ctx.getAccountIban()) ).orElse(true); }
3.68
framework_ColorPickerPopup_isTabVisible
/** * Checks the visibility of the given tab * * @param tab * The tab to check * @return true if tab is visible, false otherwise */ private boolean isTabVisible(Component tab) { for (Component child : tabs) { if (child == tab) { return true; } } return false; }
3.68
hbase_HBaseReplicationEndpoint_fetchSlavesAddresses
/** * Get the list of all the region servers from the specified peer * @return list of region server addresses or an empty list if the slave is unavailable */ protected List<ServerName> fetchSlavesAddresses() { List<String> children = null; try { synchronized (zkwLock) { children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.getZNodePaths().rsZNode); } } catch (KeeperException ke) { if (LOG.isDebugEnabled()) { LOG.debug("Fetch slaves addresses failed", ke); } reconnect(ke); } if (children == null) { return Collections.emptyList(); } List<ServerName> addresses = new ArrayList<>(children.size()); for (String child : children) { addresses.add(ServerName.parseServerName(child)); } return addresses; }
3.68
hbase_RestoreSnapshotProcedure_getMonitorStatus
/** * Set up monitor status if it is not created. */ private MonitoredTask getMonitorStatus() { if (monitorStatus == null) { monitorStatus = TaskMonitor.get().createStatus( "Restoring snapshot '" + snapshot.getName() + "' to table " + getTableName()); } return monitorStatus; }
3.68
morf_SchemaValidator_isEntityNameLengthValid
/** * Method to establish if a given string representing an Entity name is within the allowed length of charchaters. * * @see #MAX_LENGTH * * @param name the string to establish if its within the allowed length * @return true if its within the allowed length otherwise false. */ boolean isEntityNameLengthValid(String name){ return name.length() <= MAX_LENGTH; }
3.68
hadoop_S3ClientFactory_withRegion
/** * Set region. * * @param value new value * @return the builder */ public S3ClientCreationParameters withRegion( final String value) { region = value; return this; }
3.68
flink_HiveSourceBuilder_setLimit
/** Sets the maximum number of records this source should return. */ public HiveSourceBuilder setLimit(Long limit) { this.limit = limit; return this; }
3.68
hibernate-validator_ConstraintHelper_isConstraintAnnotation
/** * Checks whether the specified annotation is a valid constraint annotation. A constraint annotation has to * fulfill the following conditions: * <ul> * <li>Must be annotated with {@link Constraint} * <li>Define a message parameter</li> * <li>Define a group parameter</li> * <li>Define a payload parameter</li> * </ul> * * @param annotationType The annotation type to test. * * @return {@code true} if the annotation fulfills the above conditions, {@code false} otherwise. */ public boolean isConstraintAnnotation(Class<? extends Annotation> annotationType) { // Note: we don't use isJdkAnnotation() here as it does more harm than good. if ( isBuiltinConstraint( annotationType ) ) { return true; } if ( annotationType.getAnnotation( Constraint.class ) == null ) { return false; } return externalConstraints.computeIfAbsent( annotationType, a -> { assertMessageParameterExists( a ); assertGroupsParameterExists( a ); assertPayloadParameterExists( a ); assertValidationAppliesToParameterSetUpCorrectly( a ); assertNoParameterStartsWithValid( a ); return Boolean.TRUE; } ); }
3.68
hbase_HMaster_skipRegionManagementAction
/** * Checks master state before initiating action over region topology. * @param action the name of the action under consideration, for logging. * @return {@code true} when the caller should exit early, {@code false} otherwise. */ @Override public boolean skipRegionManagementAction(final String action) { // Note: this method could be `default` on MasterServices if but for logging. if (!isInitialized()) { LOG.debug("Master has not been initialized, don't run {}.", action); return true; } if (this.getServerManager().isClusterShutdown()) { LOG.info("Cluster is shutting down, don't run {}.", action); return true; } if (isInMaintenanceMode()) { LOG.info("Master is in maintenance mode, don't run {}.", action); return true; } return false; }
3.68
querydsl_AbstractHibernateQuery_setReadOnly
/** * Entities retrieved by this query will be loaded in * a read-only mode where Hibernate will never dirty-check * them or make changes persistent. * * @return the current object * */ @SuppressWarnings("unchecked") public Q setReadOnly(boolean readOnly) { this.readOnly = readOnly; return (Q) this; }
3.68
framework_GridDragSourceConnector_getSelectedVisibleRows
/** * Collects the data of all selected visible rows. * * @return List of data of all selected visible rows. */ private List<JsonObject> getSelectedVisibleRows() { return getSelectedRowsInRange(getEscalator().getVisibleRowRange()); }
3.68
flink_SourceTestSuiteBase_addCollectSink
/** Add a collect sink in the job. */ protected CollectIteratorBuilder<T> addCollectSink(DataStream<T> stream) { TypeSerializer<T> serializer = stream.getType().createSerializer(stream.getExecutionConfig()); String accumulatorName = "dataStreamCollect_" + UUID.randomUUID(); CollectSinkOperatorFactory<T> factory = new CollectSinkOperatorFactory<>(serializer, accumulatorName); CollectSinkOperator<T> operator = (CollectSinkOperator<T>) factory.getOperator(); CollectStreamSink<T> sink = new CollectStreamSink<>(stream, factory); sink.name("Data stream collect sink"); stream.getExecutionEnvironment().addOperator(sink.getTransformation()); return new CollectIteratorBuilder<>( operator, serializer, accumulatorName, stream.getExecutionEnvironment().getCheckpointConfig()); }
3.68
hbase_TableDescriptorBuilder_isMetaRegion
/** * Checks if this table is <code> hbase:meta </code> region. * @return true if this table is <code> hbase:meta </code> region */ @Override public boolean isMetaRegion() { return getOrDefault(IS_META_KEY, Boolean::valueOf, false); }
3.68
hbase_SpaceQuotaSnapshot_getQuotaStatus
/** * Returns the status of the quota. */ @Override public SpaceQuotaStatus getQuotaStatus() { return quotaStatus; }
3.68
flink_SkipListUtils_helpGetNextValuePointer
/** * Returns the next value pointer of the value. * * @param valuePointer the value pointer of current value. * @param spaceAllocator the space allocator. */ static long helpGetNextValuePointer(long valuePointer, Allocator spaceAllocator) { Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(valuePointer)); int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(valuePointer); MemorySegment segment = chunk.getMemorySegment(offsetInChunk); int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk); return getNextValuePointer(segment, offsetInByteBuffer); }
3.68
flink_NettyProtocol_getClientChannelHandlers
/** * Returns the client channel handlers. * * <pre> * +-----------+----------+ +----------------------+ * | Remote input channel | | request client | * +-----------+----------+ +-----------+----------+ * | | (1) write * +---------------+-----------------------------------+---------------+ * | | CLIENT CHANNEL PIPELINE | | * | | \|/ | * | +----------+----------+ +----------------------+ | * | | Request handler + | Message encoder | | * | +----------+----------+ +-----------+----------+ | * | /|\ \|/ | * | | | | * | +----------+------------+ | | * | | Message+Frame decoder | | | * | +----------+------------+ | | * | /|\ | | * +---------------+-----------------------------------+---------------+ * | | (3) server response \|/ (2) client request * +---------------+-----------------------------------+---------------+ * | | | | * | [ Socket.read() ] [ Socket.write() ] | * | | * | Netty Internal I/O Threads (Transport Implementation) | * +-------------------------------------------------------------------+ * </pre> * * @return channel handlers */ public ChannelHandler[] getClientChannelHandlers() { NetworkClientHandler networkClientHandler = new CreditBasedPartitionRequestClientHandler(); return new ChannelHandler[] { messageEncoder, new NettyMessageClientDecoderDelegate(networkClientHandler), networkClientHandler }; }
3.68
querydsl_Expressions_operation
/** * Create a new Operation expression * * @param type type of expression * @param operator operator * @param args operation arguments * @return operation expression */ public static <T> SimpleOperation<T> operation(Class<? extends T> type, Operator operator, Expression<?>... args) { return simpleOperation(type, operator, args); }
3.68
dubbo_TypeDefinition_formatGenericType
/** * Replacing <code>", "</code> to <code>","</code> will not change the semantic of * {@link ParameterizedType#toString()} * * @param type * @return formatted type * @see sun.reflect.generics.reflectiveObjects.ParameterizedTypeImpl */ private static String formatGenericType(String type) { return replace(type, ", ", ","); }
3.68
framework_VaadinFinderLocatorStrategy_getElementsByPathStartingAtConnector
/** * Finds a list of elements by the specified path, starting traversal of the * connector hierarchy from the specified root. * * @param path * the locator path * @param root * the root connector * @return the list of elements identified by path or empty list if not * found. */ private List<Element> getElementsByPathStartingAtConnector(String path, ComponentConnector root, Element actualRoot) { String[] pathComponents = path.split(SUBPART_SEPARATOR); List<ComponentConnector> connectors; if (!pathComponents[0].isEmpty()) { connectors = findConnectorsByPath(pathComponents[0], Arrays.asList(root)); } else { connectors = Arrays.asList(root); } List<Element> output = new ArrayList<>(); if (null != connectors && !connectors.isEmpty()) { for (ComponentConnector connector : connectors) { if (!actualRoot .isOrHasChild(connector.getWidget().getElement())) { // Filter out widgets that are not children of actual root continue; } if (pathComponents.length > 1) { // We have SubParts if (connector.getWidget() instanceof SubPartAware) { output.add(((SubPartAware) connector.getWidget()) .getSubPartElement(pathComponents[1])); } } else { output.add(connector.getWidget().getElement()); } } } return eliminateDuplicates(output); }
3.68
querydsl_AbstractMongodbQuery_asDBObject
/** * Get the where definition as a DBObject instance * * @return */ public DBObject asDBObject() { return createQuery(queryMixin.getMetadata().getWhere()); }
3.68
framework_TreeGridDropEvent_getDropTargetRowDepth
/** * Gets the depth of the drop target row in the hierarchy. * * @return the depth of the drop target row in the hierarchy */ public Optional<Integer> getDropTargetRowDepth() { return Optional.ofNullable(depth); }
3.68
framework_JSR356WebsocketInitializer_isAtmosphereFrameworkAttribute
/** * Checks if the given attribute name matches the convention used for * storing AtmosphereFramework references. * * @param attributeName * the attribute name to check * @return <code>true</code> if the attribute name matches the convention, * <code>false</code> otherwise */ private static boolean isAtmosphereFrameworkAttribute( String attributeName) { return attributeName .startsWith(JSR356WebsocketInitializer.class.getName() + "."); }
3.68
morf_SelectStatement_useIndex
/** * If supported by the dialect, hints to the database that a particular index should be used * in the query, but places no obligation on the database to do so. * * <p>In general, as with all query plan modification, <strong>do not use this unless you know * exactly what you are doing</strong>.</p> * * <p>As for all query plan modification (see also {@link #optimiseForRowCount(int)} * and {@link #useImplicitJoinOrder()}): where supported on the target database, these directives * applied in the SQL in the order they are called on {@link SelectStatement}. This usually * affects their precedence or relative importance, depending on the platform.</p> * * @param table The table whose index to use. * @param indexName The name of the index to use. * @return a new select statement with the change applied. */ public SelectStatement useIndex(TableReference table, String indexName) { return copyOnWriteOrMutate( (SelectStatementBuilder b) -> b.useIndex(table, indexName), () -> this.hints.add(new UseIndex(table, indexName)) ); }
3.68
pulsar_AbstractPushSource_consume
/** * Send this message to be written to Pulsar. * Pass null if you you are done with this task * @param record next message from source which should be sent to a Pulsar topic */ public void consume(Record<T> record) { try { if (record != null) { queue.put(record); } else { queue.put(nullRecord); } } catch (InterruptedException e) { throw new RuntimeException(e); } }
3.68
flink_DispatcherGateway_stopWithSavepointAndGetLocation
/** * Stops the job with a savepoint, returning a future that completes with the savepoint location * when the savepoint is completed. * * @param jobId the job id * @param targetDirectory Target directory for the savepoint. * @param savepointMode context of the savepoint operation * @param timeout for the rpc call * @return Future which is completed with the savepoint location once it is completed */ default CompletableFuture<String> stopWithSavepointAndGetLocation( JobID jobId, String targetDirectory, SavepointFormatType formatType, TriggerSavepointMode savepointMode, @RpcTimeout final Time timeout) { throw new UnsupportedOperationException(); }
3.68
framework_VCalendar_setRangeSelectAllowed
/** * Set selecting a range allowed. * * @param rangeSelectAllowed * Should selecting a range be allowed */ public void setRangeSelectAllowed(boolean rangeSelectAllowed) { this.rangeSelectAllowed = rangeSelectAllowed; }
3.68
hudi_AvroSchemaCompatibility_hashCode
/** * {@inheritDoc} */ @Override public int hashCode() { return Arrays.hashCode(new Object[] {mResult, mReader, mWriter, mDescription}); }
3.68
framework_BeanUtil_checkSerialization
/** * Checks if the object is serializable or not. To be used in assertion * checks only, since the check might be a bit heavyweight. * * @param obj * to be checked * @return {@code true} * @throws AssertionError * if the object is not serializable */ public static boolean checkSerialization(Object obj) { try { ObjectOutputStream dummyObjectOutputStream = new ObjectOutputStream( new OutputStream() { @Override public void write(int b) { } @SuppressWarnings("NullableProblems") @Override public void write(byte[] ignored) { } @SuppressWarnings("NullableProblems") @Override public void write(byte[] b, int off, int len) { } }); dummyObjectOutputStream.writeObject(obj); } catch (Throwable e) { throw new AssertionError( "Formatter supplier should be serializable", e); } return true; }
3.68
hudi_BoundedInMemoryQueue_expectMoreRecords
/** * Checks if records are either available in the queue or expected to be written in future. */ private boolean expectMoreRecords() { return !isWriteDone.get() || (isWriteDone.get() && !queue.isEmpty()); }
3.68