name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_EntityColumn_getColumnQualifier
/** * @return the column name value */ private String getColumnQualifier() { return columnQualifier; }
3.68
framework_GridElement_isEditable
/** * Gets whether the column with the given index is editable, that is, * has an associated editor field. * * @param colIndex * the column index * @return {@code true} if the column has an editor field, {@code false} * otherwise */ public boolean isEditable(int colIndex) { return grid .isElementPresent(By.vaadin("#editor[" + colIndex + "]")); }
3.68
morf_SelectStatement_having
/** * Filters the grouped records by some criteria. * * <blockquote><pre> * select() * .from(tableRef("Foo")) * .groupBy(field("age")) * .having(min(field("age")).greaterThan(20));</pre></blockquote> * * @param criterion the criteria on which to filter the grouped records * @return a new select statement with the change applied. */ public SelectStatement having(Criterion criterion) { return copyOnWriteOrMutate( (SelectStatementBuilder b) -> b.having(criterion), () -> { if (criterion == null) { throw new IllegalArgumentException("Criterion was null in having clause"); } if (having != null) { throw new UnsupportedOperationException("Cannot specify more than one having clause per statement"); } // Add the singleton having = criterion; } ); }
3.68
framework_TouchScrollDelegate_getWidget
/** * @return The widget this {@link TouchScrollHandler} is connected to. */ protected Widget getWidget() { return widget; }
3.68
flink_StreamGraphHasherV2_generateNodeHash
/** * Generates a hash for the node and returns whether the operation was successful. * * @param node The node to generate the hash for * @param hashFunction The hash function to use * @param hashes The current state of generated hashes * @return <code>true</code> if the node hash has been generated. <code>false</code>, otherwise. * If the operation is not successful, the hash needs be generated at a later point when all * input is available. * @throws IllegalStateException If node has user-specified hash and is intermediate node of a * chain */ private boolean generateNodeHash( StreamNode node, HashFunction hashFunction, Map<Integer, byte[]> hashes, boolean isChainingEnabled, StreamGraph streamGraph) { // Check for user-specified ID String userSpecifiedHash = node.getTransformationUID(); if (userSpecifiedHash == null) { // Check that all input nodes have their hashes computed for (StreamEdge inEdge : node.getInEdges()) { // If the input node has not been visited yet, the current // node will be visited again at a later point when all input // nodes have been visited and their hashes set. if (!hashes.containsKey(inEdge.getSourceId())) { return false; } } Hasher hasher = hashFunction.newHasher(); byte[] hash = generateDeterministicHash(node, hasher, hashes, isChainingEnabled, streamGraph); if (hashes.put(node.getId(), hash) != null) { // Sanity check throw new IllegalStateException( "Unexpected state. Tried to add node hash " + "twice. This is probably a bug in the JobGraph generator."); } return true; } else { Hasher hasher = hashFunction.newHasher(); byte[] hash = generateUserSpecifiedHash(node, hasher); for (byte[] previousHash : hashes.values()) { if (Arrays.equals(previousHash, hash)) { throw new IllegalArgumentException( "Hash collision on user-specified ID " + "\"" + userSpecifiedHash + "\". " + "Most likely cause is a non-unique ID. Please check that all IDs " + "specified via `uid(String)` are unique."); } } if (hashes.put(node.getId(), hash) != null) { // Sanity check throw new IllegalStateException( "Unexpected state. Tried to add node hash " + "twice. This is probably a bug in the JobGraph generator."); } return true; } }
3.68
dubbo_EnvironmentUtils_extractProperties
/** * Extras The properties from {@link ConfigurableEnvironment} * * @param environment {@link ConfigurableEnvironment} * @return Read-only Map */ public static Map<String, Object> extractProperties(ConfigurableEnvironment environment) { return Collections.unmodifiableMap(doExtraProperties(environment)); }
3.68
hadoop_FlowActivityRowKey_getRowKeyAsString
/** * Constructs a row key for the flow activity table as follows: * {@code clusterId!dayTimestamp!user!flowName}. * @return String representation of row key */ public String getRowKeyAsString() { return flowActivityRowKeyConverter.encodeAsString(this); }
3.68
flink_ProcessingTimeoutTrigger_of
/** * Creates a new {@link ProcessingTimeoutTrigger} that fires when the inner trigger is fired or * when the timeout timer fires. * * <p>For example: {@code ProcessingTimeoutTrigger.of(CountTrigger.of(3), 100, false, true)}, * will create a CountTrigger with timeout of 100 millis. So, if the first record arrives at * time {@code t}, and the second record arrives at time {@code t+50 }, the trigger will fire * when the third record arrives or when the time is {code t+100} (timeout). * * @param nestedTrigger the nested {@link Trigger} * @param timeout the timeout interval * @param resetTimerOnNewRecord each time a new element arrives, reset the timer and start a new * one * @param shouldClearOnTimeout whether to call {@link Trigger#clear(Window, TriggerContext)} * when the processing-time timer fires * @param <T> The type of the element. * @param <W> The type of {@link Window Windows} on which this trigger can operate. * @return {@link ProcessingTimeoutTrigger} with the above configuration. */ public static <T, W extends Window> ProcessingTimeoutTrigger<T, W> of( Trigger<T, W> nestedTrigger, Duration timeout, boolean resetTimerOnNewRecord, boolean shouldClearOnTimeout) { return new ProcessingTimeoutTrigger<>( nestedTrigger, timeout.toMillis(), resetTimerOnNewRecord, shouldClearOnTimeout); }
3.68
hadoop_BlockGrouper_setSchema
/** * Set EC schema. * @param schema schema. */ public void setSchema(ECSchema schema) { this.schema = schema; }
3.68
dubbo_ScopeModelAware_setScopeModel
/** * Override this method if you need get the scope model (maybe one of FrameworkModel/ApplicationModel/ModuleModel). * @param scopeModel */ default void setScopeModel(ScopeModel scopeModel) {}
3.68
streampipes_SwingingDoorTrendingFilter_filter
/** * input a newly arrived event and output whether a new characteristic event is filtered * * @param time the timestamp extracted from the newly arrived event * @param value the value extracted from the newly arrived event * @param event the newly arrived event * @return true if a new characteristic event is filtered */ public boolean filter(long time, double value, Event event) { // store the first time and value pair if (isFirstValue) { isFirstValue = false; lastReadTimestamp = time; lastReadDouble = value; lastReadEvent = event; lastStoredTimestamp = time; lastStoredDouble = value; lastStoredEvent = event; return true; } // if current point to the last stored point's time distance is within compressionMinTimeInterval, // will not check two doors nor store any point within the compressionMinTimeInterval time range if (time - lastStoredTimestamp <= compressionMinTimeInterval) { return false; } // if current point to the last stored point's time distance is larger than compressionMaxTimeInterval, // will reset two doors, and store current point; if (time - lastStoredTimestamp >= compressionMaxTimeInterval) { reset(time, value, event); return true; } final double currentUpperSlope = (value - lastStoredDouble - compressionDeviation) / (time - lastStoredTimestamp); if (currentUpperSlope > upperDoor) { upperDoor = currentUpperSlope; } final double currentLowerSlope = (value - lastStoredDouble + compressionDeviation) / (time - lastStoredTimestamp); if (currentLowerSlope < lowerDoor) { lowerDoor = currentLowerSlope; } // current point to the lastStoredPair's value exceeds compDev, will store lastReadPair and // update two doors if (upperDoor >= lowerDoor) { lastStoredTimestamp = lastReadTimestamp; lastStoredDouble = lastReadDouble; lastStoredEvent = lastReadEvent; upperDoor = (value - lastStoredDouble - compressionDeviation) / (time - lastStoredTimestamp); lowerDoor = (value - lastStoredDouble + compressionDeviation) / (time - lastStoredTimestamp); lastReadDouble = value; lastReadTimestamp = time; lastReadEvent = event; return true; } lastReadDouble = value; lastReadTimestamp = time; lastReadEvent = event; return false; }
3.68
morf_SchemaHomology_difference
/** * @see org.alfasoftware.morf.metadata.SchemaHomology.DifferenceWriter#difference(java.lang.String) */ @Override public void difference(String message) { differences.add(message); }
3.68
hadoop_ProducerConsumer_hasWork
/** * Returns true if there are items in ProducerConsumer that are either * pending for processing or waiting to be consumed. * * @return True if there were more items put() to ProducerConsumer than * consumed by take() or blockingTake(). */ public boolean hasWork() { return workCnt.get() > 0; }
3.68
framework_VSlider_handleNavigation
/** * Handles the keyboard events handled by the Slider. * * @param keycode * The key code received * @param ctrl * Whether {@code CTRL} was pressed * @param shift * Whether {@code SHIFT} was pressed * @return true if the navigation event was handled */ public boolean handleNavigation(int keycode, boolean ctrl, boolean shift) { // No support for ctrl moving if (ctrl) { return false; } if (keycode == getNavigationUpKey() && isVertical() || keycode == getNavigationRightKey() && !isVertical()) { if (shift) { for (int a = 0; a < acceleration; a++) { increaseValue(false); } acceleration++; } else { increaseValue(false); } return true; } else if (keycode == getNavigationDownKey() && isVertical() || keycode == getNavigationLeftKey() && !isVertical()) { if (shift) { for (int a = 0; a < acceleration; a++) { decreaseValue(false); } acceleration++; } else { decreaseValue(false); } return true; } return false; }
3.68
pulsar_ManagedLedgerConfig_setLedgerRolloverTimeout
/** * @param ledgerRolloverTimeout * the ledgerRolloverTimeout to set */ public ManagedLedgerConfig setLedgerRolloverTimeout(int ledgerRolloverTimeout) { this.ledgerRolloverTimeout = ledgerRolloverTimeout; return this; }
3.68
hudi_PartialUpdateAvroPayload_isRecordNewer
/** * Returns whether the given record is newer than the record of this payload. * * @param orderingVal * @param record The record * @param prop The payload properties * @return true if the given record is newer */ private static boolean isRecordNewer(Comparable orderingVal, IndexedRecord record, Properties prop) { String orderingField = ConfigUtils.getOrderingField(prop); if (!StringUtils.isNullOrEmpty(orderingField)) { boolean consistentLogicalTimestampEnabled = Boolean.parseBoolean(prop.getProperty( KeyGeneratorOptions.KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED.key(), KeyGeneratorOptions.KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED.defaultValue())); Comparable oldOrderingVal = (Comparable) HoodieAvroUtils.getNestedFieldVal( (GenericRecord) record, orderingField, true, consistentLogicalTimestampEnabled); // pick the payload with greater ordering value as insert record return oldOrderingVal != null && ReflectionUtils.isSameClass(oldOrderingVal, orderingVal) && oldOrderingVal.compareTo(orderingVal) > 0; } return false; }
3.68
flink_StreamExecutionEnvironment_readTextFile
/** * Reads the given file line-by-line and creates a data stream that contains a string with the * contents of each such line. The {@link java.nio.charset.Charset} with the given name will be * used to read the files. * * <p><b>NOTES ON CHECKPOINTING: </b> The source monitors the path, creates the {@link * org.apache.flink.core.fs.FileInputSplit FileInputSplits} to be processed, forwards them to * the downstream readers to read the actual data, and exits, without waiting for the readers to * finish reading. This implies that no more checkpoint barriers are going to be forwarded after * the source exits, thus having no checkpoints after that point. * * @param filePath The path of the file, as a URI (e.g., "file:///some/local/file" or * "hdfs://host:port/file/path") * @param charsetName The name of the character set used to read the file * @return The data stream that represents the data read from the given file as text lines * @deprecated Use {@code * FileSource#forRecordStreamFormat()/forBulkFileFormat()/forRecordFileFormat() instead}. An * example of reading a file using a simple {@code TextLineInputFormat}: * <pre>{@code * FileSource<String> source = * FileSource.forRecordStreamFormat( * new TextLineInputFormat("UTF-8"), new Path("/foo/bar")) * .build(); * }</pre> */ @Deprecated public DataStreamSource<String> readTextFile(String filePath, String charsetName) { Preconditions.checkArgument( !StringUtils.isNullOrWhitespaceOnly(filePath), "The file path must not be null or blank."); TextInputFormat format = new TextInputFormat(new Path(filePath)); format.setFilesFilter(FilePathFilter.createDefaultFilter()); TypeInformation<String> typeInfo = BasicTypeInfo.STRING_TYPE_INFO; format.setCharsetName(charsetName); return readFile(format, filePath, FileProcessingMode.PROCESS_ONCE, -1, typeInfo); }
3.68
hbase_Scan_setRowPrefixFilter
/** * <p> * Set a filter (using stopRow and startRow) so the result set only contains rows where the rowKey * starts with the specified prefix. * </p> * <p> * This is a utility method that converts the desired rowPrefix into the appropriate values for * the startRow and stopRow to achieve the desired result. * </p> * <p> * This can safely be used in combination with setFilter. * </p> * <p> * <strong>This CANNOT be used in combination with withStartRow and/or withStopRow.</strong> Such * a combination will yield unexpected and even undefined results. * </p> * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.) * @deprecated since 2.5.0, will be removed in 4.0.0. The name of this method is considered to be * confusing as it does not use a {@link Filter} but uses setting the startRow and * stopRow instead. Use {@link #setStartStopRowForPrefixScan(byte[])} instead. */ @Deprecated public Scan setRowPrefixFilter(byte[] rowPrefix) { return setStartStopRowForPrefixScan(rowPrefix); }
3.68
hadoop_AzureBlobFileSystem_getXAttr
/** * Get the value of an attribute for a path. * * @param path The path on which to get the attribute * @param name The attribute to get * @return The bytes of the attribute's value (encoded in latin-1) * or null if the attribute does not exist * @throws IOException If there was an issue getting the attribute from Azure * @throws IllegalArgumentException If name is null or empty */ @Override public byte[] getXAttr(final Path path, final String name) throws IOException { LOG.debug("AzureBlobFileSystem.getXAttr path: {}", path); if (name == null || name.isEmpty()) { throw new IllegalArgumentException("A valid name must be specified."); } Path qualifiedPath = makeQualified(path); byte[] value = null; try { TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.GET_ATTR, true, tracingHeaderFormat, listener); Hashtable<String, String> properties; String xAttrName = ensureValidAttributeName(name); if (path.isRoot()) { properties = abfsStore.getFilesystemProperties(tracingContext); } else { properties = abfsStore.getPathStatus(qualifiedPath, tracingContext); } if (properties.containsKey(xAttrName)) { String xAttrValue = properties.get(xAttrName); value = abfsStore.encodeAttribute(xAttrValue); } } catch (AzureBlobFileSystemException ex) { checkException(path, ex); } return value; }
3.68
hbase_RequestConverter_buildFlushRegionRequest
/** * Create a protocol buffer FlushRegionRequest for a given region name * @param regionName the name of the region to get info * @param columnFamily column family within a region * @return a protocol buffer FlushRegionRequest */ public static FlushRegionRequest buildFlushRegionRequest(final byte[] regionName, byte[] columnFamily, boolean writeFlushWALMarker) { FlushRegionRequest.Builder builder = FlushRegionRequest.newBuilder(); RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); builder.setWriteFlushWalMarker(writeFlushWALMarker); if (columnFamily != null) { builder.setFamily(UnsafeByteOperations.unsafeWrap(columnFamily)); } return builder.build(); }
3.68
hmily_HmilyRepositoryNode_getHmilyParticipantUndoRealPath
/** * Get hmily participant undo real path. * * @param undoId undo id * @return hmily participant undo real path */ public String getHmilyParticipantUndoRealPath(final Long undoId) { return Joiner.on("/").join(getHmilyParticipantUndoRootPath(), undoId); }
3.68
rocketmq-connect_MetricsReporter_onTimerRemoved
/** * Called when a {@link Timer} is removed from the registry. * * @param name the timer's name */ public void onTimerRemoved(String name) { this.onCounterRemoved(MetricUtils.stringToMetricName(name)); }
3.68
pulsar_JSONSchema_getBackwardsCompatibleJsonSchemaInfo
/** * Implemented for backwards compatibility reasons. * since the original schema generated by JSONSchema was based off the json schema standard * since then we have standardized on Avro * * @return */ public SchemaInfo getBackwardsCompatibleJsonSchemaInfo() { SchemaInfo backwardsCompatibleSchemaInfo; try { ObjectWriter objectWriter = ObjectMapperFactory.getMapperWithIncludeAlways().writer(); JsonSchemaGenerator schemaGen = new JsonSchemaGenerator(objectWriter); JsonSchema jsonBackwardsCompatibleSchema = schemaGen.generateSchema(pojo); backwardsCompatibleSchemaInfo = SchemaInfoImpl.builder() .name("") .properties(schemaInfo.getProperties()) .type(SchemaType.JSON) .schema(objectWriter.writeValueAsBytes(jsonBackwardsCompatibleSchema)) .build(); } catch (JsonProcessingException ex) { throw new RuntimeException(ex); } return backwardsCompatibleSchemaInfo; }
3.68
framework_TreeGrid_setItemCollapseAllowedProvider
/** * Sets the item collapse allowed provider for this TreeGrid. The provider * should return {@code true} for any item that the user can collapse. * <p> * <strong>Note:</strong> This callback will be accessed often when sending * data to the client. The callback should not do any costly operations. * <p> * This method is a shortcut to method with the same name in * {@link HierarchicalDataCommunicator}. * * @param provider * the item collapse allowed provider, not {@code null} * * @see HierarchicalDataCommunicator#setItemCollapseAllowedProvider(ItemCollapseAllowedProvider) */ public void setItemCollapseAllowedProvider( ItemCollapseAllowedProvider<T> provider) { getDataCommunicator().setItemCollapseAllowedProvider(provider); }
3.68
flink_Tuple17_setFields
/** * Sets new values to all fields of the tuple. * * @param f0 The value for field 0 * @param f1 The value for field 1 * @param f2 The value for field 2 * @param f3 The value for field 3 * @param f4 The value for field 4 * @param f5 The value for field 5 * @param f6 The value for field 6 * @param f7 The value for field 7 * @param f8 The value for field 8 * @param f9 The value for field 9 * @param f10 The value for field 10 * @param f11 The value for field 11 * @param f12 The value for field 12 * @param f13 The value for field 13 * @param f14 The value for field 14 * @param f15 The value for field 15 * @param f16 The value for field 16 */ public void setFields( T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12, T13 f13, T14 f14, T15 f15, T16 f16) { this.f0 = f0; this.f1 = f1; this.f2 = f2; this.f3 = f3; this.f4 = f4; this.f5 = f5; this.f6 = f6; this.f7 = f7; this.f8 = f8; this.f9 = f9; this.f10 = f10; this.f11 = f11; this.f12 = f12; this.f13 = f13; this.f14 = f14; this.f15 = f15; this.f16 = f16; }
3.68
framework_SASSAddonImportFileCreator_main
/** * * @param args * Theme directory where the addons.scss file should be created */ public static void main(String[] args) throws IOException { if (args.length == 0) { printUsage(); } else { String themeDirectory = args[0]; updateTheme(themeDirectory); } }
3.68
hbase_HelloHBase_getAndPrintRowContents
/** * Invokes Table#get and prints out the contents of the retrieved row. * @param table Standard Table object * @throws IOException If IO problem encountered */ static void getAndPrintRowContents(final Table table) throws IOException { Result row = table.get(new Get(MY_ROW_ID)); System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table [" + table.getName().getNameAsString() + "] in HBase, with the following content:"); for (Entry<byte[], NavigableMap<byte[], byte[]>> colFamilyEntry : row.getNoVersionMap() .entrySet()) { String columnFamilyName = Bytes.toString(colFamilyEntry.getKey()); System.out.println(" Columns in Column Family [" + columnFamilyName + "]:"); for (Entry<byte[], byte[]> columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) { System.out.println(" Value of Column [" + columnFamilyName + ":" + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " + Bytes.toString(columnNameAndValueMap.getValue())); } } }
3.68
querydsl_SQLExpressions_addSeconds
/** * Add the given amount of seconds to the date * * @param date datetime * @param seconds seconds to add * @return converted datetime */ public static <D extends Comparable> DateTimeExpression<D> addSeconds(DateTimeExpression<D> date, int seconds) { return Expressions.dateTimeOperation(date.getType(), Ops.DateTimeOps.ADD_SECONDS, date, ConstantImpl.create(seconds)); }
3.68
pulsar_PulsarChannelInitializer_initTls
/** * Initialize TLS for a channel. Should be invoked before the channel is connected to the remote address. * * @param ch the channel * @param sniHost the value of this argument will be passed as peer host and port when creating the SSLEngine (which * in turn will use these values to set SNI header when doing the TLS handshake). Cannot be * <code>null</code>. * @return a {@link CompletableFuture} that completes when the TLS is set up. */ CompletableFuture<Channel> initTls(Channel ch, InetSocketAddress sniHost) { Objects.requireNonNull(ch, "A channel is required"); Objects.requireNonNull(sniHost, "A sniHost is required"); if (!tlsEnabled) { throw new IllegalStateException("TLS is not enabled in client configuration"); } CompletableFuture<Channel> initTlsFuture = new CompletableFuture<>(); ch.eventLoop().execute(() -> { try { SslHandler handler = tlsEnabledWithKeyStore ? new SslHandler(nettySSLContextAutoRefreshBuilder.get() .createSSLEngine(sniHost.getHostString(), sniHost.getPort())) : sslContextSupplier.get().newHandler(ch.alloc(), sniHost.getHostString(), sniHost.getPort()); if (tlsHostnameVerificationEnabled) { SecurityUtility.configureSSLHandler(handler); } ch.pipeline().addFirst(TLS_HANDLER, handler); initTlsFuture.complete(ch); } catch (Throwable t) { initTlsFuture.completeExceptionally(t); } }); return initTlsFuture; }
3.68
flink_MemoryManager_shutdown
/** * Shuts the memory manager down, trying to release all the memory it managed. Depending on * implementation details, the memory does not necessarily become reclaimable by the garbage * collector, because there might still be references to allocated segments in the code that * allocated them from the memory manager. */ public void shutdown() { if (!isShutDown) { // mark as shutdown and release memory isShutDown = true; reservedMemory.clear(); // go over all allocated segments and release them for (Set<MemorySegment> segments : allocatedSegments.values()) { for (MemorySegment seg : segments) { seg.free(); } segments.clear(); } allocatedSegments.clear(); } }
3.68
flink_SinkTestSuiteBase_sort
/** Sort the list. */ private List<T> sort(List<T> list) { return list.stream().sorted().collect(Collectors.toList()); }
3.68
hadoop_ZookeeperUtils_splitToPairs
/** * Take a quorum list and split it to (trimmed) pairs * @param hostPortQuorumList list of form h1:port, h2:port2,... * @return a possibly empty list of values between commas. They may not be * valid hostname:port pairs */ public static List<String> splitToPairs(String hostPortQuorumList) { // split an address hot String[] strings = StringUtils.getStrings(hostPortQuorumList); int len = 0; if (strings != null) { len = strings.length; } List<String> tuples = new ArrayList<String>(len); if (strings != null) { for (String s : strings) { tuples.add(s.trim()); } } return tuples; }
3.68
hadoop_ProxyUtils_rejectNonHttpRequests
/** * Reject any request that isn't from an HTTP servlet * @param req request * @throws ServletException if the request is of the wrong type */ public static void rejectNonHttpRequests(ServletRequest req) throws ServletException { if (!(req instanceof HttpServletRequest)) { throw new ServletException(E_HTTP_HTTPS_ONLY); } }
3.68
framework_VCalendar_sortEvents
/** * Sort the events by current sort order. * * @param events * The events to sort * @return An array where the events has been sorted */ public CalendarEvent[] sortEvents(Collection<CalendarEvent> events) { if (EventSortOrder.DURATION_DESC.equals(eventSortOrder)) { return sortEventsByDuration(events); } else if (!EventSortOrder.UNSORTED.equals(eventSortOrder)) { CalendarEvent[] sorted = events .toArray(new CalendarEvent[events.size()]); switch (eventSortOrder) { case DURATION_ASC: Arrays.sort(sorted, new EventDurationComparator(true)); break; case START_DATE_ASC: Arrays.sort(sorted, new StartDateComparator(true)); break; case START_DATE_DESC: Arrays.sort(sorted, new StartDateComparator(false)); break; } return sorted; } return events.toArray(new CalendarEvent[events.size()]); }
3.68
framework_VComboBox_popupKeyDown
/** * Triggered when a key was pressed in the suggestion popup. * * @param event * The KeyDownEvent of the key */ private void popupKeyDown(KeyDownEvent event) { if (enableDebug) { debug("VComboBox: popupKeyDown(" + event.getNativeKeyCode() + ")"); } // Propagation of handled events is stopped so other handlers such as // shortcut key handlers do not also handle the same events. switch (event.getNativeKeyCode()) { case KeyCodes.KEY_DOWN: suggestionPopup.selectNextItem(); DOM.eventPreventDefault(DOM.eventGetCurrentEvent()); event.stopPropagation(); break; case KeyCodes.KEY_UP: suggestionPopup.selectPrevItem(); DOM.eventPreventDefault(DOM.eventGetCurrentEvent()); event.stopPropagation(); break; case KeyCodes.KEY_PAGEDOWN: selectNextPage(); event.stopPropagation(); break; case KeyCodes.KEY_PAGEUP: selectPrevPage(); event.stopPropagation(); break; case KeyCodes.KEY_ESCAPE: reset(); DOM.eventPreventDefault(DOM.eventGetCurrentEvent()); event.stopPropagation(); break; case KeyCodes.KEY_TAB: case KeyCodes.KEY_ENTER: // queue this, may be cancelled by selection int selectedIndex = suggestionPopup.menu.getSelectedIndex(); if (!allowNewItems && selectedIndex != -1 && !currentSuggestions.isEmpty()) { onSuggestionSelected(currentSuggestions.get(selectedIndex)); } else { dataReceivedHandler.reactOnInputWhenReady(tb.getText()); } suggestionPopup.hide(); event.stopPropagation(); break; } }
3.68
flink_RemoteStreamEnvironment_getPort
/** * Gets the port of the master (JobManager), where the program will be executed. * * @return The port of the master */ public int getPort() { return configuration.getInteger(JobManagerOptions.PORT); }
3.68
flink_RpcServiceUtils_createRandomName
/** * Creates a random name of the form prefix_X, where X is an increasing number. * * @param prefix Prefix string to prepend to the monotonically increasing name offset number * @return A random name of the form prefix_X where X is an increasing number */ public static String createRandomName(String prefix) { Preconditions.checkNotNull(prefix, "Prefix must not be null."); long nameOffset; // obtain the next name offset by incrementing it atomically do { nameOffset = nextNameOffset.get(); } while (!nextNameOffset.compareAndSet(nameOffset, nameOffset + 1L)); return prefix + '_' + nameOffset; }
3.68
framework_GridLayoutExtraSpacing_getTicketNumber
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber() */ @Override protected Integer getTicketNumber() { // TODO Auto-generated method stub return null; }
3.68
flink_MessageSerializer_deserializeServerFailure
/** * De-serializes the failure message sent to the {@link * org.apache.flink.queryablestate.network.Client} in case of server related errors. * * <pre> * <b>The buffer is expected to be at the correct position.</b> * </pre> * * @param buf The {@link ByteBuf} containing the serialized failure message. * @return The failure message. */ public static Throwable deserializeServerFailure(final ByteBuf buf) throws IOException, ClassNotFoundException { try (ByteBufInputStream bis = new ByteBufInputStream(buf); ObjectInputStream in = new ObjectInputStream(bis)) { return (Throwable) in.readObject(); } }
3.68
rocketmq-connect_MemoryClusterManagementServiceImpl_getAllAliveWorkers
/** * Get all alive workers in the cluster. * * @return */ @Override public List<String> getAllAliveWorkers() { return Collections.singletonList(this.config.getWorkerId()); }
3.68
hadoop_MoveStep_getBytesToMove
/** * Returns number of bytes to move. * * @return - long */ @Override public long getBytesToMove() { return bytesToMove; }
3.68
flink_DefaultRollingPolicy_withMaxPartSize
/** * Sets the part size above which a part file will have to roll. * * @param size the allowed part size. * @deprecated Use {@link #withMaxPartSize(MemorySize)} instead. */ @Deprecated public DefaultRollingPolicy.PolicyBuilder withMaxPartSize(final long size) { Preconditions.checkState(size > 0L); return new PolicyBuilder(size, rolloverInterval, inactivityInterval); }
3.68
morf_SchemaModificationAdapter_dropExistingViewsIfNecessary
/** * Drops all views from the existing schema if it has not already done so. This should be called whenever tables are dropped or modified to guard against an invalid situation. */ private synchronized void dropExistingViewsIfNecessary() { if (viewsDropped) { return; } SqlScriptExecutor sqlExecutor = databaseDataSetConsumer.getSqlExecutor(); for (View view : schemaResource.views()) { sqlExecutor.execute(sqlDialect.dropStatements(view), connection); } viewsDropped = true; }
3.68
hbase_Triple_create
// ctor cannot infer types w/o warning but a method can. public static <A, B, C> Triple<A, B, C> create(A first, B second, C third) { return new Triple<>(first, second, third); }
3.68
zxing_FinderPatternFinder_haveMultiplyConfirmedCenters
/** * @return true iff we have found at least 3 finder patterns that have been detected * at least {@link #CENTER_QUORUM} times each, and, the estimated module size of the * candidates is "pretty similar" */ private boolean haveMultiplyConfirmedCenters() { int confirmedCount = 0; float totalModuleSize = 0.0f; int max = possibleCenters.size(); for (FinderPattern pattern : possibleCenters) { if (pattern.getCount() >= CENTER_QUORUM) { confirmedCount++; totalModuleSize += pattern.getEstimatedModuleSize(); } } if (confirmedCount < 3) { return false; } // OK, we have at least 3 confirmed centers, but, it's possible that one is a "false positive" // and that we need to keep looking. We detect this by asking if the estimated module sizes // vary too much. We arbitrarily say that when the total deviation from average exceeds // 5% of the total module size estimates, it's too much. float average = totalModuleSize / max; float totalDeviation = 0.0f; for (FinderPattern pattern : possibleCenters) { totalDeviation += Math.abs(pattern.getEstimatedModuleSize() - average); } return totalDeviation <= 0.05f * totalModuleSize; }
3.68
hadoop_IOStatisticsBinding_aggregateCounters
/** * Aggregate two counters. * @param l left value * @param r right value * @return the aggregate value */ public static Long aggregateCounters(Long l, Long r) { return Math.max(l, 0) + Math.max(r, 0); }
3.68
flink_ExecutionEnvironment_createRemoteEnvironment
/** * Creates a {@link RemoteEnvironment}. The remote environment sends (parts of) the program to a * cluster for execution. Note that all file paths used in the program must be accessible from * the cluster. The execution will use the specified parallelism. * * @param host The host name or address of the master (JobManager), where the program should be * executed. * @param port The port of the master (JobManager), where the program should be executed. * @param parallelism The parallelism to use during the execution. * @param jarFiles The JAR files with code that needs to be shipped to the cluster. If the * program uses user-defined functions, user-defined input formats, or any libraries, those * must be provided in the JAR files. * @return A remote environment that executes the program on a cluster. */ public static ExecutionEnvironment createRemoteEnvironment( String host, int port, int parallelism, String... jarFiles) { RemoteEnvironment rec = new RemoteEnvironment(host, port, jarFiles); rec.setParallelism(parallelism); return rec; }
3.68
hbase_Scan_setCaching
/** * Set the number of rows for caching that will be passed to scanners. If not set, the * Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will apply. Higher * caching values will enable faster scanners but will use more memory. * @param caching the number of rows for caching */ public Scan setCaching(int caching) { this.caching = caching; return this; }
3.68
flink_AvroFactory_create
/** * Creates Avro Writer and Reader for a specific type. * * <p>Given an input type, and possible the current schema, and a previously known schema (also * known as writer schema) create will deduce the best way to initialize a reader and writer * according to the following rules: * * <ul> * <li>If type is an Avro generated class (an {@link SpecificRecord} then the reader would use * the previousSchema for reading (if present) otherwise it would use the schema attached * to the auto generated class. * <li>If the type is a GenericRecord then the reader and the writer would be created with the * supplied (mandatory) schema. * <li>Otherwise, we use Avro's reflection based reader and writer that would deduce the * schema via reflection. If the previous schema is also present (when restoring a * serializer for example) then the reader would be created with both schemas. * </ul> */ static <T> AvroFactory<T> create( Class<T> type, @Nullable Schema currentSchema, @Nullable Schema previousSchema) { final ClassLoader cl = Thread.currentThread().getContextClassLoader(); if (SpecificRecord.class.isAssignableFrom(type)) { return fromSpecific(type, cl, Optional.ofNullable(previousSchema)); } if (GenericRecord.class.isAssignableFrom(type)) { return fromGeneric(cl, currentSchema); } return fromReflective(type, cl, Optional.ofNullable(previousSchema)); }
3.68
hbase_ZKWatcher_getZNodePaths
/** * Get the znodePaths. * <p> * Mainly used for mocking as mockito can not mock a field access. */ public ZNodePaths getZNodePaths() { return znodePaths; }
3.68
framework_BrowserInfo_getGeckoVersion
/** * Returns the Gecko version if the browser is Gecko based. The Gecko * version for Firefox 2 is 1.8 and 1.9 for Firefox 3. * * @return The Gecko version or -1 if the browser is not Gecko based */ public float getGeckoVersion() { if (!browserDetails.isGecko()) { return -1; } return browserDetails.getBrowserEngineVersion(); }
3.68
hadoop_AzureNativeFileSystemStore_connectUsingSASCredentials
/** * Connect to Azure storage using shared access signature credentials. */ private void connectUsingSASCredentials(final String accountName, final String containerName, final String sas) throws InvalidKeyException, StorageException, IOException, URISyntaxException { StorageCredentials credentials = new StorageCredentialsSharedAccessSignature( sas); connectingUsingSAS = true; connectUsingCredentials(accountName, credentials, containerName); }
3.68
hadoop_JsonSerialization_fromJsonStream
/** * Read from an input stream. * @param stream stream to read from * @return the parsed entity * @throws IOException IO problems * @throws JsonParseException If the input is not well-formatted * @throws JsonMappingException failure to map from the JSON to this class */ public synchronized T fromJsonStream(InputStream stream) throws IOException { return mapper.readValue(stream, classType); }
3.68
framework_MouseEvents_getButton
/** * Returns an identifier describing which mouse button the user pushed. * Compare with {@link MouseButton#LEFT},{@link MouseButton#MIDDLE}, * {@link MouseButton#RIGHT} to find out which button it is. * * @return one of {@link MouseButton#LEFT}, {@link MouseButton#MIDDLE}, * {@link MouseButton#RIGHT}. */ public MouseButton getButton() { return details.getButton(); }
3.68
hudi_SqlQueryPreCommitValidator_validateRecordsBeforeAndAfter
/** * Takes input datasets 1) before commit started and 2) with inflight commit. Perform required validation * and throw error if validation fails */ @Override public void validateRecordsBeforeAndAfter(Dataset<Row> before, Dataset<Row> after, final Set<String> partitionsAffected) { String hoodieTableName = "staged_table_" + TABLE_COUNTER.incrementAndGet(); String hoodieTableBeforeCurrentCommit = hoodieTableName + "_before"; String hoodieTableWithInflightCommit = hoodieTableName + "_after"; before.registerTempTable(hoodieTableBeforeCurrentCommit); after.registerTempTable(hoodieTableWithInflightCommit); JavaSparkContext jsc = HoodieSparkEngineContext.getSparkContext(getEngineContext()); SQLContext sqlContext = new SQLContext(jsc); String[] queries = getQueriesToRun(); Arrays.asList(queries).parallelStream().forEach( query -> validateUsingQuery(query, hoodieTableBeforeCurrentCommit, hoodieTableWithInflightCommit, sqlContext)); }
3.68
hudi_StreamWriteFunction_preWrite
/** * Sets up before flush: patch up the first record with correct partition path and fileID. * * <p>Note: the method may modify the given records {@code records}. */ public void preWrite(List<HoodieRecord> records) { // rewrite the first record with expected fileID HoodieRecord<?> first = records.get(0); HoodieRecord<?> record = new HoodieAvroRecord<>(first.getKey(), (HoodieRecordPayload) first.getData(), first.getOperation()); HoodieRecordLocation newLoc = new HoodieRecordLocation(first.getCurrentLocation().getInstantTime(), fileID); record.setCurrentLocation(newLoc); records.set(0, record); }
3.68
morf_Version2to4TransformingReader_read
/** * @see java.io.Reader#read(char[], int, int) */ @Override public int read(char[] cbuf, int off, int len) throws IOException { // We need to transform &#0; into \0... int charsRead; // if there's no temporary buffer from a previous call, read from the main source if (temporary.length == 0) { // This is the common path charsRead = delegateReader.read(cbuf, off, len); } else { // there is a temporary buffer from a previous match, use that if (temporary.length > len) { // The temporary buffer is too big to fit in the buffer that's been supplied. This is an edge case, but we need to deal with it. // Copy out what we can, then create another temporary buffer for the remainder System.arraycopy(temporary, 0, cbuf, off, len); charsRead = len; char[] newTemporary = new char[temporary.length-len]; System.arraycopy(temporary, len, newTemporary, 0, temporary.length-len); temporary = newTemporary; } else { // copy the entire temporary buffer into the output System.arraycopy(temporary, 0, cbuf, off, temporary.length); charsRead = temporary.length; temporary = new char[] {}; } } // now search for the string we're replacing for (int idx = 0; idx < charsRead; idx++) { // we need to skip chars if we've put a backslash in the output if (skipChars > 0) { skipChars--; continue; } char testChar = cbuf[off + idx]; if (testChar == '&') { // look for the ampersand // The first char matches. // Check whether the subsequent chars make up the ref ReferenceInfo characterReferenceToTransform = characterReferenceToTransform(cbuf, off + idx, charsRead - idx); if (characterReferenceToTransform != null) { String escapedString = String.format("\\u%04x", characterReferenceToTransform.referenceValue); return processEscape(cbuf, off, charsRead, idx, characterReferenceToTransform.sequenceLength, escapedString); } } if (testChar == '\\' && inputVersion == 2) { // backslash gets escaped to a double-backslash, but not in v3 as this will already have been done. return processEscape(cbuf, off, charsRead, idx, 1, "\\\\"); } if (testChar == '\ufffe') { // unusual unicode that's not valid XML return processEscape(cbuf, off, charsRead, idx, 1, "\\ufffe"); } if (testChar == '\uffff') { // unusual unicode that's not valid XML return processEscape(cbuf, off, charsRead, idx, 1, "\\uffff"); } } // If we got here we found no matches to replace, so we can just return the buffer as read. // This is the common path return charsRead; }
3.68
pulsar_PulsarClientException_wrap
// wrap an exception to enriching more info messages. public static Throwable wrap(Throwable t, String msg) { msg += "\n" + t.getMessage(); // wrap an exception with new message info if (t instanceof TimeoutException) { return new TimeoutException(msg); } else if (t instanceof InvalidConfigurationException) { return new InvalidConfigurationException(msg); } else if (t instanceof AuthenticationException) { return new AuthenticationException(msg); } else if (t instanceof IncompatibleSchemaException) { return new IncompatibleSchemaException(msg); } else if (t instanceof TooManyRequestsException) { return new TooManyRequestsException(msg); } else if (t instanceof LookupException) { return new LookupException(msg); } else if (t instanceof ConnectException) { return new ConnectException(msg); } else if (t instanceof AlreadyClosedException) { return new AlreadyClosedException(msg); } else if (t instanceof TopicTerminatedException) { return new TopicTerminatedException(msg); } else if (t instanceof AuthorizationException) { return new AuthorizationException(msg); } else if (t instanceof GettingAuthenticationDataException) { return new GettingAuthenticationDataException(msg); } else if (t instanceof UnsupportedAuthenticationException) { return new UnsupportedAuthenticationException(msg); } else if (t instanceof BrokerPersistenceException) { return new BrokerPersistenceException(msg); } else if (t instanceof BrokerMetadataException) { return new BrokerMetadataException(msg); } else if (t instanceof ProducerBusyException) { return new ProducerBusyException(msg); } else if (t instanceof ConsumerBusyException) { return new ConsumerBusyException(msg); } else if (t instanceof NotConnectedException) { return new NotConnectedException(); } else if (t instanceof InvalidMessageException) { return new InvalidMessageException(msg); } else if (t instanceof InvalidTopicNameException) { return new InvalidTopicNameException(msg); } else if (t instanceof NotSupportedException) { return new NotSupportedException(msg); } else if (t instanceof NotAllowedException) { return new NotAllowedException(msg); } else if (t instanceof ProducerQueueIsFullError) { return new ProducerQueueIsFullError(msg); } else if (t instanceof ProducerBlockedQuotaExceededError) { return new ProducerBlockedQuotaExceededError(msg); } else if (t instanceof ProducerBlockedQuotaExceededException) { return new ProducerBlockedQuotaExceededException(msg); } else if (t instanceof ChecksumException) { return new ChecksumException(msg); } else if (t instanceof CryptoException) { return new CryptoException(msg); } else if (t instanceof ConsumerAssignException) { return new ConsumerAssignException(msg); } else if (t instanceof MessageAcknowledgeException) { return new MessageAcknowledgeException(msg); } else if (t instanceof TransactionConflictException) { return new TransactionConflictException(msg); } else if (t instanceof TransactionHasOperationFailedException) { return new TransactionHasOperationFailedException(msg); } else if (t instanceof PulsarClientException) { return new PulsarClientException(msg); } else if (t instanceof CompletionException) { return t; } else if (t instanceof RuntimeException) { return new RuntimeException(msg, t.getCause()); } else if (t instanceof InterruptedException) { return t; } else if (t instanceof ExecutionException) { return t; } return t; }
3.68
flink_BinarySegmentUtils_getShort
/** * get short from segments. * * @param segments target segments. * @param offset value offset. */ public static short getShort(MemorySegment[] segments, int offset) { if (inFirstSegment(segments, offset, 2)) { return segments[0].getShort(offset); } else { return getShortMultiSegments(segments, offset); } }
3.68
hbase_BloomFilterFactory_getMaxKeys
/** Returns max key for the Bloom filter from the configuration */ public static int getMaxKeys(Configuration conf) { return conf.getInt(IO_STOREFILE_BLOOM_MAX_KEYS, 128 * 1000 * 1000); }
3.68
dubbo_RpcStatus_endCount
/** * @param url * @param elapsed * @param succeeded */ public static void endCount(URL url, String methodName, long elapsed, boolean succeeded) { endCount(getStatus(url), elapsed, succeeded); endCount(getStatus(url, methodName), elapsed, succeeded); }
3.68
flink_DefaultExecutionGraph_getArchivedExecutionConfig
/** * Returns the serializable {@link ArchivedExecutionConfig}. * * @return ArchivedExecutionConfig which may be null in case of errors */ @Override public ArchivedExecutionConfig getArchivedExecutionConfig() { // create a summary of all relevant data accessed in the web interface's JobConfigHandler try { ExecutionConfig executionConfig = jobInformation.getSerializedExecutionConfig().deserializeValue(userClassLoader); if (executionConfig != null) { return executionConfig.archive(); } } catch (IOException | ClassNotFoundException e) { LOG.error("Couldn't create ArchivedExecutionConfig for job {} ", getJobID(), e); } return null; }
3.68
morf_NamedParameterPreparedStatement_getIndexesForParameter
/** * For testing only. * * @return the list of indexes at which the named * parameter can be found. */ List<Integer> getIndexesForParameter(String parameterName) { return indexMap.get(parameterName) == null ? ImmutableList.<Integer>of() : ImmutableList.copyOf(indexMap.get(parameterName)); }
3.68
hibernate-validator_ValueExtractorResolver_getValueExtractorCandidatesForContainerDetectionOfGlobalCascadedValidation
/** * Used to determine the possible value extractors that can be applied to a declared type. * <p> * Used when building cascading metadata in {@link CascadingMetaDataBuilder} to decide if it should be promoted to * {@link ContainerCascadingMetaData} with cascaded constrained type arguments. * <p> * An example could be when we need to upgrade BV 1.1 style {@code @Valid private List<SomeBean> list;} * to {@code private List<@Valid SomeBean> list;} * <p> * Searches only for maximally specific value extractors based on a type. * <p> * Types that are assignable to {@link Map} are handled as a special case - key value extractor is ignored for them. */ public Set<ValueExtractorDescriptor> getValueExtractorCandidatesForContainerDetectionOfGlobalCascadedValidation(Type enclosingType) { // if it's a Map assignable type, it gets a special treatment to conform to the Bean Validation specification boolean mapAssignable = TypeHelper.isAssignable( Map.class, enclosingType ); Class<?> enclosingClass = ReflectionHelper.getClassFromType( enclosingType ); return getRuntimeCompliantValueExtractors( enclosingClass, registeredValueExtractors ) .stream() .filter( ved -> !mapAssignable || !ved.equals( MapKeyExtractor.DESCRIPTOR ) ) .collect( Collectors.collectingAndThen( Collectors.toSet(), CollectionHelper::toImmutableSet ) ); }
3.68
flink_SimpleStreamFormat_isSplittable
/** This format is always not splittable. */ @Override public final boolean isSplittable() { return false; }
3.68
flink_MemoryManager_computeNumberOfPages
/** * Computes to how many pages the given number of bytes corresponds. If the given number of * bytes is not an exact multiple of a page size, the result is rounded down, such that a * portion of the memory (smaller than the page size) is not included. * * @param fraction the fraction of the total memory per slot * @return The number of pages to which */ public int computeNumberOfPages(double fraction) { validateFraction(fraction); return (int) (totalNumberOfPages * fraction); }
3.68
hadoop_AbstractConfigurableFederationPolicy_getPolicyContext
/** * Getter method for the {@link FederationPolicyInitializationContext}. * * @return the context for this policy. */ public FederationPolicyInitializationContext getPolicyContext() { return policyContext; }
3.68
hadoop_StoreContext_decrementGauge
/** * Decrement a gauge by a specific value. * @param statistic The operation to decrement * @param count the count to decrement */ public void decrementGauge(Statistic statistic, long count) { instrumentation.decrementGauge(statistic, count); }
3.68
hadoop_SaslInputStream_disposeSasl
/** * Disposes of any system resources or security-sensitive information Sasl * might be using. * * @exception SaslException * if a SASL error occurs. */ private void disposeSasl() throws SaslException { if (saslClient != null) { saslClient.dispose(); } if (saslServer != null) { saslServer.dispose(); } }
3.68
morf_AbstractSqlDialectTest_expectedSqlForMathOperations9
/** * @return expected SQL for math operation 9 */ protected String expectedSqlForMathOperations9() { return "a + b + (c / d) + e + 100 + f / 5"; }
3.68
flink_DefaultLeaderRetrievalService_notifyLeaderAddress
/** * Called by specific {@link LeaderRetrievalDriver} to notify leader address. * * @param leaderInformation new notified leader information address. The exception will be * handled by leader listener. */ @Override @GuardedBy("lock") public void notifyLeaderAddress(LeaderInformation leaderInformation) { final UUID newLeaderSessionID = leaderInformation.getLeaderSessionID(); final String newLeaderAddress = leaderInformation.getLeaderAddress(); synchronized (lock) { if (running) { if (!Objects.equals(newLeaderAddress, lastLeaderAddress) || !Objects.equals(newLeaderSessionID, lastLeaderSessionID)) { if (LOG.isDebugEnabled()) { if (newLeaderAddress == null && newLeaderSessionID == null) { LOG.debug( "Leader information was lost: The listener will be notified accordingly."); } else { LOG.debug( "New leader information: Leader={}, session ID={}.", newLeaderAddress, newLeaderSessionID); } } lastLeaderAddress = newLeaderAddress; lastLeaderSessionID = newLeaderSessionID; // Notify the listener only when the leader is truly changed. leaderListener.notifyLeaderAddress(newLeaderAddress, newLeaderSessionID); } } else { if (LOG.isDebugEnabled()) { LOG.debug( "Ignoring notification since the {} has already been closed.", leaderRetrievalDriver); } } } }
3.68
graphhopper_BaseGraph_withTurnCosts
// todo: maybe rename later, but for now this makes it easier to replace GraphBuilder public Builder withTurnCosts(boolean withTurnCosts) { this.withTurnCosts = withTurnCosts; return this; }
3.68
flink_Acknowledge_readResolve
/** * Read resolve to preserve the singleton object property. (per best practices, this should have * visibility 'protected') */ protected Object readResolve() throws java.io.ObjectStreamException { return INSTANCE; }
3.68
shardingsphere-elasticjob_GuaranteeService_isRegisterCompleteSuccess
/** * Judge whether sharding items are register complete success. * * @param shardingItems current sharding items * @return current sharding items are all complete success or not */ public boolean isRegisterCompleteSuccess(final Collection<Integer> shardingItems) { for (int each : shardingItems) { if (!jobNodeStorage.isJobNodeExisted(GuaranteeNode.getCompletedNode(each))) { return false; } } return true; }
3.68
hudi_HoodieMetaSyncOperations_updateLastReplicatedTimeStamp
/** * Update the timestamp of last replication. */ default void updateLastReplicatedTimeStamp(String tableName, String timeStamp) { }
3.68
hbase_CellUtil_matchingColumnFamilyAndQualifierPrefix
/** Returns True if matching column family and the qualifier starts with <code>qual</code> */ public static boolean matchingColumnFamilyAndQualifierPrefix(final Cell left, final byte[] fam, final byte[] qual) { return matchingFamily(left, fam) && PrivateCellUtil.qualifierStartsWith(left, qual); }
3.68
framework_VCalendar_getDayNames
/** * Get the names of the week days. */ public String[] getDayNames() { return dayNames; }
3.68
pulsar_PulsarConnectorConfig_getManagedLedgerCacheSizeMB
// --- ManagedLedger public long getManagedLedgerCacheSizeMB() { return managedLedgerCacheSizeMB; }
3.68
AreaShop_FileManager_getGroupNames
/** * Get a list of names of all groups. * @return A String list with all the names */ public List<String> getGroupNames() { ArrayList<String> result = new ArrayList<>(); for(RegionGroup group : getGroups()) { result.add(group.getName()); } return result; }
3.68
dubbo_SingleRouterChain_initWithRouters
/** * the resident routers must being initialized before address notification. * only for ut */ public void initWithRouters(List<Router> builtinRouters) { this.builtinRouters = builtinRouters; this.routers = new LinkedList<>(builtinRouters); }
3.68
morf_SqlDialect_md5HashHexEncoded
/** * @param toHash the String to convert * @return the md5 hash of the string. */ @SuppressWarnings("deprecation") private String md5HashHexEncoded(String toHash) { try { return CharSource.wrap(toHash).asByteSource(StandardCharsets.UTF_8).hash(Hashing.md5()).toString(); } catch (IOException e) { throw new RuntimeException("error when hashing string [" + toHash + "]", e); } }
3.68
hbase_TableMapReduceUtil_initMultiTableSnapshotMapperJob
/** * Sets up the job for reading from one or more table snapshots, with one or more scans per * snapshot. It bypasses hbase servers and read directly from snapshot files. * @param snapshotScans map of snapshot name to scans on that snapshot. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. * @param job The current job to adjust. Make sure the passed job is carrying all * necessary HBase configuration. * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via * the distributed cache (tmpjars). */ public static void initMultiTableSnapshotMapperJob(Map<String, Collection<Scan>> snapshotScans, Class<? extends TableMapper> mapper, Class<?> outputKeyClass, Class<?> outputValueClass, Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { MultiTableSnapshotInputFormat.setInput(job.getConfiguration(), snapshotScans, tmpRestoreDir); job.setInputFormatClass(MultiTableSnapshotInputFormat.class); if (outputValueClass != null) { job.setMapOutputValueClass(outputValueClass); } if (outputKeyClass != null) { job.setMapOutputKeyClass(outputKeyClass); } job.setMapperClass(mapper); Configuration conf = job.getConfiguration(); HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); if (addDependencyJars) { addDependencyJars(job); addDependencyJarsForClasses(job.getConfiguration(), MetricRegistry.class); } resetCacheConfig(job.getConfiguration()); }
3.68
framework_NativeSelectElement_setValue
/** * Select item of the native select element with the specified value. * * @param chars * value of the native select item will be selected */ public void setValue(CharSequence chars) throws ReadOnlyException { selectByText((String) chars); }
3.68
flink_OperationTreeBuilder_addColumns
/** Adds additional columns. Existing fields will be replaced if replaceIfExist is true. */ public QueryOperation addColumns( boolean replaceIfExist, List<Expression> fieldLists, QueryOperation child) { final List<Expression> newColumns; if (replaceIfExist) { final List<String> fieldNames = child.getResolvedSchema().getColumnNames(); newColumns = ColumnOperationUtils.addOrReplaceColumns(fieldNames, fieldLists); } else { newColumns = new ArrayList<>(fieldLists); newColumns.add(0, unresolvedRef("*")); } return project(newColumns, child, false); }
3.68
hbase_HRegionServer_reportRegionSizesForQuotas
/** * Reports the given map of Regions and their size on the filesystem to the active Master. * @param regionSizeStore The store containing region sizes * @return false if FileSystemUtilizationChore should pause reporting to master. true otherwise */ public boolean reportRegionSizesForQuotas(RegionSizeStore regionSizeStore) { RegionServerStatusService.BlockingInterface rss = rssStub; if (rss == null) { // the current server could be stopping. LOG.trace("Skipping Region size report to HMaster as stub is null"); return true; } try { buildReportAndSend(rss, regionSizeStore); } catch (ServiceException se) { IOException ioe = ProtobufUtil.getRemoteException(se); if (ioe instanceof PleaseHoldException) { LOG.trace("Failed to report region sizes to Master because it is initializing." + " This will be retried.", ioe); // The Master is coming up. Will retry the report later. Avoid re-creating the stub. return true; } if (rssStub == rss) { rssStub = null; } createRegionServerStatusStub(true); if (ioe instanceof DoNotRetryIOException) { DoNotRetryIOException doNotRetryEx = (DoNotRetryIOException) ioe; if (doNotRetryEx.getCause() != null) { Throwable t = doNotRetryEx.getCause(); if (t instanceof UnsupportedOperationException) { LOG.debug("master doesn't support ReportRegionSpaceUse, pause before retrying"); return false; } } } LOG.debug("Failed to report region sizes to Master. This will be retried.", ioe); } return true; }
3.68
hbase_HRegion_checkSplit
/** * Return the split point. An empty result indicates the region isn't splittable. */ public Optional<byte[]> checkSplit(boolean force) { // Can't split META if (this.getRegionInfo().isMetaRegion()) { return Optional.empty(); } // Can't split a region that is closing. if (this.isClosing()) { return Optional.empty(); } if (!force && !splitPolicy.shouldSplit()) { return Optional.empty(); } byte[] ret = splitPolicy.getSplitPoint(); if (ret != null && ret.length > 0) { ret = splitRestriction.getRestrictedSplitPoint(ret); } if (ret != null) { try { checkRow(ret, "calculated split"); } catch (IOException e) { LOG.error("Ignoring invalid split for region {}", this, e); return Optional.empty(); } return Optional.of(ret); } else { return Optional.empty(); } }
3.68
hbase_MasterObserver_postCompletedEnableTableAction
/** * Called after the enableTable operation has been requested. Called as part of enable table * procedure and it is async to the enable table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postCompletedEnableTableAction( final ObserverContext<MasterCoprocessorEnvironment> ctx, final TableName tableName) throws IOException { }
3.68
hbase_RegionCoprocessorHost_preStoreFileReaderOpen
/** * @param fs fileystem to read from * @param p path to the file * @param in {@link FSDataInputStreamWrapper} * @param size Full size of the file * @param r original reference file. This will be not null only when reading a split file. * @return a Reader instance to use instead of the base reader if overriding default behavior, * null otherwise */ public StoreFileReader preStoreFileReaderOpen(final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf, final Reference r) throws IOException { if (coprocEnvironments.isEmpty()) { return null; } return execOperationWithResult( new ObserverOperationWithResult<RegionObserver, StoreFileReader>(regionObserverGetter, null) { @Override public StoreFileReader call(RegionObserver observer) throws IOException { return observer.preStoreFileReaderOpen(this, fs, p, in, size, cacheConf, r, getResult()); } }); }
3.68
framework_LayoutDependencyTree_logDependencyStatus
/** * Logs horizontal and vertical {@link LayoutDependency} state for the given * connector. * * @param connector * the connector whose state to log, should not be {@code null} */ public void logDependencyStatus(ComponentConnector connector) { getLogger().info("===="); String connectorId = connector.getConnectorId(); getLogger().info(getDependency(connectorId, HORIZONTAL).toString()); getLogger().info(getDependency(connectorId, VERTICAL).toString()); }
3.68
flink_StopWithSavepointTerminationManager_stopWithSavepoint
/** * Enforces the correct completion order of the passed {@code CompletableFuture} instances in * accordance to the contract of {@link StopWithSavepointTerminationHandler}. * * @param completedSavepointFuture The {@code CompletableFuture} of the savepoint creation step. * @param terminatedExecutionStatesFuture The {@code CompletableFuture} of the termination step. * @param mainThreadExecutor The executor the {@code StopWithSavepointTerminationHandler} * operations run on. * @return A {@code CompletableFuture} containing the path to the created savepoint. */ public CompletableFuture<String> stopWithSavepoint( CompletableFuture<CompletedCheckpoint> completedSavepointFuture, CompletableFuture<Collection<ExecutionState>> terminatedExecutionStatesFuture, ComponentMainThreadExecutor mainThreadExecutor) { FutureUtils.assertNoException( completedSavepointFuture // the completedSavepointFuture could also be completed by // CheckpointCanceller which doesn't run in the mainThreadExecutor .handleAsync( (completedSavepoint, throwable) -> { stopWithSavepointTerminationHandler.handleSavepointCreation( completedSavepoint, throwable); return null; }, mainThreadExecutor) .thenRun( () -> FutureUtils.assertNoException( // the execution termination has to run in a // separate Runnable to disconnect it from any // previous task failure handling terminatedExecutionStatesFuture.thenAcceptAsync( stopWithSavepointTerminationHandler ::handleExecutionsTermination, mainThreadExecutor)))); return stopWithSavepointTerminationHandler.getSavepointPath(); }
3.68
hbase_WALKeyImpl_setOrigLogSeqNum
/** * Used to set original sequenceId for WALKeyImpl during WAL replay */ public void setOrigLogSeqNum(final long sequenceId) { this.origLogSeqNum = sequenceId; }
3.68
graphhopper_VectorTile_clearStringValue
/** * <pre> * Exactly one of these values must be present in a valid message * </pre> * * <code>optional string string_value = 1;</code> */ public Builder clearStringValue() { bitField0_ = (bitField0_ & ~0x00000001); stringValue_ = getDefaultInstance().getStringValue(); onChanged(); return this; }
3.68
flink_SerializedCompositeKeyBuilder_build
/** Returns a serialized composite key, from whatever was set so far. */ @Nonnull public byte[] build() throws IOException { return keyOutView.getCopyOfBuffer(); }
3.68
flink_Configuration_getValue
/** * Returns the value associated with the given config option as a string. * * @param configOption The configuration option * @return the (default) value associated with the given config option */ @PublicEvolving public String getValue(ConfigOption<?> configOption) { return Optional.ofNullable( getRawValueFromOption(configOption).orElseGet(configOption::defaultValue)) .map(String::valueOf) .orElse(null); }
3.68
flink_StandardDeCompressors_getCommonSuffixes
/** Gets all common file extensions of supported file compression formats. */ public static Collection<String> getCommonSuffixes() { return COMMON_SUFFIXES; }
3.68
hbase_QuotaTableUtil_createDeletesForExistingNamespaceSnapshotSizes
/** * Returns a list of {@code Delete} to remove all namespace snapshot entries from quota table. * @param connection connection to re-use */ static List<Delete> createDeletesForExistingNamespaceSnapshotSizes(Connection connection) throws IOException { return createDeletesForExistingSnapshotsFromScan(connection, createScanForNamespaceSnapshotSizes()); }
3.68
hbase_SingleColumnValueFilter_getFilterIfMissing
/** * Get whether entire row should be filtered if column is not found. * @return true if row should be skipped if column not found, false if row should be let through * anyways */ public boolean getFilterIfMissing() { return filterIfMissing; }
3.68
framework_GridLayout_space
/** * Moves the cursor forward by one. If the cursor goes out of the right grid * border, it is moved to the first column of the next row. * * @see #newLine() */ public void space() { cursorX++; if (cursorX >= getColumns()) { cursorX = 0; cursorY++; } }
3.68
hbase_JmxCacheBuster_stop
/** * Stops the clearing of JMX metrics and restarting the Hadoop metrics system. This is needed for * some test environments where we manually inject sources or sinks dynamically. */ public static void stop() { stopped.set(true); ScheduledFuture future = fut.get(); future.cancel(false); }
3.68
rocketmq-connect_JdbcSourceConnector_start
/** * Start the component * * @param config component context */ @Override public void start(KeyValue config) { originalConfig = config; }
3.68