name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_VScrollTable_toggleSelection
/** * Toggle the selection of the row. */ public void toggleSelection() { selected = !selected; selectionChanged = true; if (selected) { selectedRowKeys.add(String.valueOf(rowKey)); addStyleName("v-selected"); } else { removeStyleName("v-selected"); selectedRowKeys.remove(String.valueOf(rowKey)); } }
3.68
hadoop_NamenodeStatusReport_getWebScheme
/** * Get the scheme of web address. * * @return The scheme of web address. */ public String getWebScheme() { return this.webScheme; }
3.68
morf_MySqlMetaDataProvider_dataTypeFromSqlType
/** * SqlServerDialect maps CLOB data types to NVARCHAR(MAX) but NVARCHAR sqlTypes are mapped as Strings in the {@link DatabaseMetaDataProvider}. * This method uses the column width to determine whether a sqlType == Type.NVARCHAR should be mapped to a String * or to a CLOB data type. If the column with is large (> ~ 1G) it will be mapped to a CLOB data type. Otherwise * is will be mapped as a String. * * @see org.alfasoftware.morf.jdbc.DatabaseMetaDataProvider#dataTypeFromSqlType(int, java.lang.String, int) */ @Override protected DataType dataTypeFromSqlType(int sqlType, String typeName, int width) { if (sqlType == Types.LONGVARCHAR && width > 1<<30) { return DataType.CLOB; } else { return super.dataTypeFromSqlType(sqlType, typeName, width); } }
3.68
flink_TypeExtractor_isHadoopWritable
// visible for testing static boolean isHadoopWritable(Class<?> typeClass) { // check if this is directly the writable interface if (typeClass.getName().equals(HADOOP_WRITABLE_CLASS)) { return false; } final HashSet<Class<?>> alreadySeen = new HashSet<>(); alreadySeen.add(typeClass); return hasHadoopWritableInterface(typeClass, alreadySeen); }
3.68
flink_EventTimeTrigger_create
/** * Creates an event-time trigger that fires once the watermark passes the end of the window. * * <p>Once the trigger fires all elements are discarded. Elements that arrive late immediately * trigger window evaluation with just this one element. */ public static EventTimeTrigger create() { return new EventTimeTrigger(); }
3.68
hadoop_FSBuilder_optDouble
/** * Set optional double parameter for the Builder. * * @param key key. * @param value value. * @return generic type B. * @see #opt(String, String) */ default B optDouble(@Nonnull String key, double value) { return opt(key, Double.toString(value)); }
3.68
hbase_HBaseTestingUtility_getHBaseCluster
/** * Get the Mini HBase cluster. * @return hbase cluster * @see #getHBaseClusterInterface() */ public MiniHBaseCluster getHBaseCluster() { return getMiniHBaseCluster(); }
3.68
flink_MutableHashTable_getPartitioningFanOutNoEstimates
/** * Gets the number of partitions to be used for an initial hash-table, when no estimates are * available. * * <p>The current logic makes sure that there are always between 10 and 127 partitions, and * close to 0.1 of the number of buffers. * * @param numBuffers The number of buffers available. * @return The number of partitions to use. */ public static int getPartitioningFanOutNoEstimates(int numBuffers) { return Math.max(10, Math.min(numBuffers / 10, MAX_NUM_PARTITIONS)); }
3.68
flink_StreamRecord_copy
/** * Creates a copy of this stream record. Uses the copied value as the value for the new record, * i.e., only copies timestamp fields. */ public StreamRecord<T> copy(T valueCopy) { StreamRecord<T> copy = new StreamRecord<>(valueCopy); copy.timestamp = this.timestamp; copy.hasTimestamp = this.hasTimestamp; return copy; }
3.68
querydsl_JTSLineStringExpression_numPoints
/** * The number of Points in this LineString. * * @return number of points */ public NumberExpression<Integer> numPoints() { if (numPoints == null) { numPoints = Expressions.numberOperation(Integer.class, SpatialOps.NUM_POINTS, mixin); } return numPoints; }
3.68
flink_CoreOptions_fileSystemConnectionLimitStreamInactivityTimeout
/** * If any connection limit is configured, this option can be optionally set to define after * which time (in milliseconds) inactive streams are reclaimed. This option can help to prevent * that inactive streams make up the full pool of limited connections, and no further * connections can be established. Unlimited timeout be default. */ public static ConfigOption<Long> fileSystemConnectionLimitStreamInactivityTimeout( String scheme) { return ConfigOptions.key("fs." + scheme + ".limit.stream-timeout") .longType() .defaultValue(0L); }
3.68
hadoop_StoreContext_submit
/** * Submit a closure for execution in the executor * returned by {@link #getExecutor()}. * @param <T> type of future * @param future future for the result. * @param call callable to invoke. * @return the future passed in */ public <T> CompletableFuture<T> submit( final CompletableFuture<T> future, final Callable<T> call) { getExecutor().submit(() -> LambdaUtils.eval(future, call)); return future; }
3.68
hadoop_OBSBlockOutputStream_getActiveBlock
/** * Synchronized accessor to the active block. * * @return the active block; null if there isn't one. */ synchronized OBSDataBlocks.DataBlock getActiveBlock() { return activeBlock; }
3.68
flink_CopyOnWriteSkipListStateMap_helpGetStateEntry
/** Returns the state entry of the node. */ private StateEntry<K, N, S> helpGetStateEntry(long node) { Node nodeStorage = getNodeSegmentAndOffset(node); MemorySegment segment = nodeStorage.nodeSegment; int offsetInSegment = nodeStorage.nodeOffset; int level = SkipListUtils.getLevel(segment, offsetInSegment); int keyDataLen = SkipListUtils.getKeyLen(segment, offsetInSegment); int keyDataOffset = offsetInSegment + SkipListUtils.getKeyDataOffset(level); K key = skipListKeySerializer.deserializeKey(segment, keyDataOffset, keyDataLen); N namespace = skipListKeySerializer.deserializeNamespace(segment, keyDataOffset, keyDataLen); long valuePointer = SkipListUtils.getValuePointer(segment, offsetInSegment); S state = helpGetState(valuePointer); return new StateEntry.SimpleStateEntry<>(key, namespace, state); }
3.68
pulsar_LoadManagerShared_removeMostServicingBrokersForNamespace
/** * Removes the brokers which have more bundles assigned to them in the same namespace as the incoming bundle than at * least one other available broker from consideration. * * @param assignedBundleName * Name of bundle to be assigned. * @param candidates * BrokersBase available for placement. * @param brokerToNamespaceToBundleRange * Map from brokers to namespaces to bundle ranges. */ public static void removeMostServicingBrokersForNamespace( final String assignedBundleName, final Set<String> candidates, final ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> brokerToNamespaceToBundleRange) { if (candidates.isEmpty()) { return; } final String namespaceName = getNamespaceNameFromBundleName(assignedBundleName); int leastBundles = Integer.MAX_VALUE; for (final String broker : candidates) { int bundles = (int) brokerToNamespaceToBundleRange .computeIfAbsent(broker, k -> ConcurrentOpenHashMap.<String, ConcurrentOpenHashSet<String>>newBuilder().build()) .computeIfAbsent(namespaceName, k -> ConcurrentOpenHashSet.<String>newBuilder().build()) .size(); leastBundles = Math.min(leastBundles, bundles); if (leastBundles == 0) { break; } } // Since `brokerToNamespaceToBundleRange` can be updated by other threads, // `leastBundles` may differ from the actual value. final int finalLeastBundles = leastBundles; candidates.removeIf( broker -> brokerToNamespaceToBundleRange.computeIfAbsent(broker, k -> ConcurrentOpenHashMap.<String, ConcurrentOpenHashSet<String>>newBuilder().build()) .computeIfAbsent(namespaceName, k -> ConcurrentOpenHashSet.<String>newBuilder().build()) .size() > finalLeastBundles); }
3.68
morf_AbstractSqlDialectTest_testSelectAndList
/** * Tests a select with a nested "and where" clause. */ @Test public void testSelectAndList() { SelectStatement stmt = new SelectStatement().from(new TableReference(TEST_TABLE)) .where(and(ImmutableList.of( eq(new FieldReference(STRING_FIELD), "A0001"), greaterThan(new FieldReference(INT_FIELD), 20080101) ))); String value = varCharCast("'A0001'"); String expectedSql = "SELECT * FROM " + tableName(TEST_TABLE) + " WHERE ((stringField = " + stringLiteralPrefix() +value+") AND (intField > 20080101))"; assertEquals("Select with multiple where clauses", expectedSql, testDialect.convertStatementToSQL(stmt)); }
3.68
hbase_ProcedureEvent_suspend
/** Mark the event as not ready. */ public synchronized void suspend() { ready = false; if (LOG.isTraceEnabled()) { LOG.trace("Suspend " + toString()); } }
3.68
morf_SqlUtils_concat
/** * Returns an expression concatenating all the passed expressions. * * @param fields the expressions to concatenate. * @return the expression concatenating the passed expressions. */ public static ConcatenatedField concat(Iterable<? extends AliasedField> fields) { return new ConcatenatedField(fields); }
3.68
flink_SubtaskCommittableManager_getNumCommittables
/** * Returns the number of committables that has been received so far. * * @return number of so far received committables */ int getNumCommittables() { return requests.size() + numDrained + numFailed; }
3.68
hbase_PreviousBlockCompressionRatePredicator_shouldFinishBlock
/** * Returns <b>true</b> if the passed uncompressed size is larger than the limit calculated by * <code>updateLatestBlockSizes</code>. * @param uncompressed true if the block should be finished. */ @Override public boolean shouldFinishBlock(int uncompressed) { if (uncompressed >= configuredMaxBlockSize) { return uncompressed >= adjustedBlockSize; } return false; }
3.68
hbase_RegionServerAccounting_getGlobalMemStoreHeapSize
/** Returns the global memstore on-heap size in the RegionServer */ public long getGlobalMemStoreHeapSize() { return this.globalMemStoreHeapSize.sum(); }
3.68
hbase_Increment_setReturnResults
/** * @param returnResults True (default) if the increment operation should return the results. A * client that is not interested in the result can save network bandwidth * setting this to false. */ @Override public Increment setReturnResults(boolean returnResults) { super.setReturnResults(returnResults); return this; }
3.68
hbase_HBaseTestingUtility_getMiniHBaseCluster
/** * @return Current mini hbase cluster. Only has something in it after a call to * {@link #startMiniCluster()}. * @see #startMiniCluster() */ public MiniHBaseCluster getMiniHBaseCluster() { if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) { return (MiniHBaseCluster) this.hbaseCluster; } throw new RuntimeException( hbaseCluster + " not an instance of " + MiniHBaseCluster.class.getName()); }
3.68
hbase_RegionMover_excludeFile
/** * Path of file containing hostnames to be excluded during region movement. Exclude file should * have 'host:port' per line. Port is mandatory here as we can have many RS running on a single * host. */ public RegionMoverBuilder excludeFile(String excludefile) { this.excludeFile = excludefile; return this; }
3.68
hbase_Scan_createScanFromCursor
/** * Create a new Scan with a cursor. It only set the position information like start row key. The * others (like cfs, stop row, limit) should still be filled in by the user. * {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor} */ public static Scan createScanFromCursor(Cursor cursor) { return new Scan().withStartRow(cursor.getRow()); }
3.68
hadoop_MutableRatesWithAggregation_collectThreadLocalStates
/** * Collects states maintained in {@link ThreadLocal}, if any. */ synchronized void collectThreadLocalStates() { final ConcurrentMap<String, ThreadSafeSampleStat> localStats = threadLocalMetricsMap.get(); if (localStats != null) { aggregateLocalStatesToGlobalMetrics(localStats); } }
3.68
dubbo_ThreadlessExecutor_shutdown
/** * The following methods are still not supported */ @Override public void shutdown() { shutdownNow(); }
3.68
morf_H2Dialect_internalTableDeploymentStatements
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#tableDeploymentStatements(org.alfasoftware.morf.metadata.Table) */ @Override public Collection<String> internalTableDeploymentStatements(Table table) { List<String> statements = new ArrayList<>(); // Create the table deployment statement StringBuilder createTableStatement = new StringBuilder(); createTableStatement.append("CREATE "); if (table.isTemporary()) { createTableStatement.append("TEMPORARY "); } createTableStatement.append("TABLE "); createTableStatement.append(schemaNamePrefix()); createTableStatement.append(table.getName()); createTableStatement.append(" ("); List<String> primaryKeys = new ArrayList<>(); boolean first = true; for (Column column : table.columns()) { if (!first) { createTableStatement.append(", "); } createTableStatement.append(column.getName()).append(" "); createTableStatement.append(sqlRepresentationOfColumnType(column)); if (column.isAutoNumbered()) { int autoNumberStart = column.getAutoNumberStart() == -1 ? 1 : column.getAutoNumberStart(); createTableStatement.append(" AUTO_INCREMENT(").append(autoNumberStart) .append(") COMMENT 'AUTONUMSTART:[").append(autoNumberStart).append("]'"); } if (column.isPrimaryKey()) { primaryKeys.add(column.getName()); } first = false; } if (!primaryKeys.isEmpty()) { createTableStatement.append(", CONSTRAINT "); createTableStatement.append(table.getName()); createTableStatement.append("_PK PRIMARY KEY ("); createTableStatement.append(Joiner.on(", ").join(primaryKeys)); createTableStatement.append(")"); } createTableStatement.append(")"); statements.add(createTableStatement.toString()); return statements; }
3.68
pulsar_ClientConfiguration_setOperationTimeout
/** * Set the operation timeout <i>(default: 30 seconds)</i>. * <p> * Producer-create, subscribe and unsubscribe operations will be retried until this interval, after which the * operation will be marked as failed * * @param operationTimeout * operation timeout * @param unit * time unit for {@code operationTimeout} */ public void setOperationTimeout(int operationTimeout, TimeUnit unit) { checkArgument(operationTimeout >= 0); confData.setOperationTimeoutMs(unit.toMillis(operationTimeout)); }
3.68
flink_InputGate_getPriorityEventAvailableFuture
/** * Notifies when a priority event has been enqueued. If this future is queried from task thread, * it is guaranteed that a priority event is available and retrieved through {@link #getNext()}. */ public CompletableFuture<?> getPriorityEventAvailableFuture() { return priorityAvailabilityHelper.getAvailableFuture(); }
3.68
hadoop_LeveldbIterator_close
/** * Closes the iterator. */ @Override public void close() throws IOException { try { iter.close(); } catch (RuntimeException e) { throw new IOException(e.getMessage(), e); } }
3.68
flink_BlobLibraryCacheManager_releaseClassLoader
/** * Release the class loader to ensure any file descriptors are closed and the cached * libraries are deleted immediately. */ private void releaseClassLoader() { runReleaseHooks(); if (!wrapsSystemClassLoader) { try { ((Closeable) classLoader).close(); } catch (IOException e) { LOG.warn( "Failed to release user code class loader for " + Arrays.toString(libraries.toArray())); } } // clear potential references to user-classes in the singleton cache TypeFactory.defaultInstance().clearCache(); }
3.68
flink_HeapKeyedStateBackend_numKeyValueStateEntries
/** Returns the total number of state entries across all keys for the given namespace. */ @VisibleForTesting public int numKeyValueStateEntries(Object namespace) { int sum = 0; for (StateTable<?, ?, ?> state : registeredKVStates.values()) { sum += state.sizeOfNamespace(namespace); } return sum; }
3.68
rocketmq-connect_LocalPositionManagementServiceImpl_restorePosition
/** * restore position */ protected void restorePosition() { set(PositionChange.ONLINE, new ExtendRecordPartition(null, new HashMap<>()), new RecordOffset(new HashMap<>())); }
3.68
framework_MouseEvents_isAltKey
/** * Checks if the Alt key was down when the mouse event took place. * * @return true if Alt was down when the event occurred, false otherwise */ public boolean isAltKey() { return details.isAltKey(); }
3.68
hudi_HoodieStreamerMetrics_updateStreamerHeartbeatTimestamp
/** * Update heartbeat from deltastreamer ingestion job when active for a table. * * @param heartbeatTimestampMs the timestamp in milliseconds at which heartbeat is emitted. */ public void updateStreamerHeartbeatTimestamp(long heartbeatTimestampMs) { if (writeConfig.isMetricsOn()) { metrics.registerGauge(getMetricsName("deltastreamer", "heartbeatTimestampMs"), heartbeatTimestampMs); } }
3.68
hbase_MasterObserver_preModifyTableAction
/** * Called prior to modifying a table's properties. Called as part of modify table procedure and it * is async to the modify table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param currentDescriptor current TableDescriptor of the table * @param newDescriptor after modify operation, table will have this descriptor */ default void preModifyTableAction(final ObserverContext<MasterCoprocessorEnvironment> ctx, final TableName tableName, final TableDescriptor currentDescriptor, final TableDescriptor newDescriptor) throws IOException { }
3.68
hadoop_IOStatisticsContext_getCurrentIOStatisticsContext
/** * Get the context's IOStatisticsContext. * * @return instance of IOStatisticsContext for the context. */ static IOStatisticsContext getCurrentIOStatisticsContext() { // the null check is just a safety check to highlight exactly where a null value would // be returned if HADOOP-18456 has resurfaced. return requireNonNull( IOStatisticsContextIntegration.getCurrentIOStatisticsContext(), "Null IOStatisticsContext"); }
3.68
framework_VErrorMessage_showAt
/** * Shows this error message next to given element. * * @param indicatorElement * * @since 7.2 */ public void showAt(Element indicatorElement) { showAt(DOM.asOld(indicatorElement)); }
3.68
AreaShop_DeletedFriendEvent_getFriend
/** * Get the OfflinePlayer that is getting added as friend. * @return The friend that is getting added */ public OfflinePlayer getFriend() { return friend; }
3.68
hudi_AvroInternalSchemaConverter_visitAvroSchemaToBuildType
/** * Converts an avro schema into hudi type. * * @param schema a avro schema. * @param visited track the visit node when do traversal for avro schema; used to check if the name of avro record schema is correct. * @param firstVisitRoot track whether the current visited schema node is a root node. * @param nextId a initial id which used to create id for all fields. * @return a hudi type match avro schema. */ private static Type visitAvroSchemaToBuildType(Schema schema, Deque<String> visited, Boolean firstVisitRoot, AtomicInteger nextId) { switch (schema.getType()) { case RECORD: String name = schema.getFullName(); if (visited.contains(name)) { throw new HoodieSchemaException(String.format("cannot convert recursive avro record %s", name)); } visited.push(name); List<Schema.Field> fields = schema.getFields(); List<Type> fieldTypes = new ArrayList<>(fields.size()); int nextAssignId = nextId.get(); // when first visit root record, set nextAssignId = 0; if (firstVisitRoot) { nextAssignId = 0; } nextId.set(nextAssignId + fields.size()); fields.stream().forEach(field -> { fieldTypes.add(visitAvroSchemaToBuildType(field.schema(), visited, false, nextId)); }); visited.pop(); List<Types.Field> internalFields = new ArrayList<>(fields.size()); for (int i = 0; i < fields.size(); i++) { Schema.Field field = fields.get(i); Type fieldType = fieldTypes.get(i); internalFields.add(Types.Field.get(nextAssignId, AvroInternalSchemaConverter.isOptional(field.schema()), field.name(), fieldType, field.doc())); nextAssignId += 1; } // NOTE: We're keeping a tab of full-name here to make sure we stay // compatible across various Spark (>= 2.4) and Avro (>= 1.8.2) versions; // Avro will be properly handling fully-qualified names on its own (splitting // them up into namespace/struct-name pair) return Types.RecordType.get(internalFields, schema.getFullName()); case UNION: List<Type> fTypes = new ArrayList<>(); schema.getTypes().stream().forEach(t -> { fTypes.add(visitAvroSchemaToBuildType(t, visited, false, nextId)); }); return fTypes.get(0) == null ? fTypes.get(1) : fTypes.get(0); case ARRAY: Schema elementSchema = schema.getElementType(); int elementId = nextId.get(); nextId.set(elementId + 1); Type elementType = visitAvroSchemaToBuildType(elementSchema, visited, false, nextId); return Types.ArrayType.get(elementId, AvroInternalSchemaConverter.isOptional(schema.getElementType()), elementType); case MAP: int keyId = nextId.get(); int valueId = keyId + 1; nextId.set(valueId + 1); Type valueType = visitAvroSchemaToBuildType(schema.getValueType(), visited, false, nextId); return Types.MapType.get(keyId, valueId, Types.StringType.get(), valueType, AvroInternalSchemaConverter.isOptional(schema.getValueType())); default: return visitAvroPrimitiveToBuildInternalType(schema); } }
3.68
hbase_CompactSplit_deregisterChildren
/** * {@inheritDoc} */ @Override public void deregisterChildren(ConfigurationManager manager) { // No children to register }
3.68
hadoop_S3AReadOpContext_getPath
/** * Get the path of this read. * @return path. */ public Path getPath() { return path; }
3.68
hbase_ServerSideScanMetrics_getMetricsMap
/** * Get all of the values. If reset is true, we will reset the all AtomicLongs back to 0. * @param reset whether to reset the AtomicLongs to 0. * @return A Map of String -&gt; Long for metrics */ public Map<String, Long> getMetricsMap(boolean reset) { // Create a builder ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder(); for (Map.Entry<String, AtomicLong> e : this.counters.entrySet()) { long value = reset ? e.getValue().getAndSet(0) : e.getValue().get(); builder.put(e.getKey(), value); } // Build the immutable map so that people can't mess around with it. return builder.build(); }
3.68
framework_AbstractComponent_setStyleName
/* * Sets the component's style. Don't add a JavaDoc comment here, we use the * default documentation from implemented interface. */ @Override public void setStyleName(String style) { if (style == null || style.isEmpty()) { getState().styles = null; return; } if (getState().styles == null) { getState().styles = new ArrayList<>(); } List<String> styles = getState().styles; styles.clear(); StringTokenizer tokenizer = new StringTokenizer(style, " "); while (tokenizer.hasMoreTokens()) { styles.add(tokenizer.nextToken()); } }
3.68
hbase_SnapshotManifest_getTableDescriptor
/** * Get the table descriptor from the Snapshot */ public TableDescriptor getTableDescriptor() { return this.htd; }
3.68
hbase_MasterCoprocessorHost_preSplitRegion
/** * Invoked just before calling the split region procedure * @param tableName the table where the region belongs to * @param splitRow the split point */ public void preSplitRegion(final TableName tableName, final byte[] splitRow) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { @Override public void call(MasterObserver observer) throws IOException { observer.preSplitRegion(this, tableName, splitRow); } }); }
3.68
hadoop_ExitUtil_halt
/** * Forcibly terminates the currently running Java virtual machine. * @param status status code * @param message message * @throws HaltException if {@link Runtime#halt(int)} is disabled. */ public static void halt(int status, String message) throws HaltException { halt(new HaltException(status, message)); }
3.68
flink_RegisteredRpcConnection_getTargetGateway
/** Gets the RegisteredGateway. This returns null until the registration is completed. */ public G getTargetGateway() { return targetGateway; }
3.68
flink_ActiveResourceManager_recordStartWorkerFailure
/** * Record failure number of starting worker in ResourceManagers. Return whether maximum failure * rate is reached. * * @return whether max failure rate is reached */ private boolean recordStartWorkerFailure() { startWorkerFailureRater.markEvent(); try { startWorkerFailureRater.checkAgainstThreshold(); } catch (ThresholdMeter.ThresholdExceedException e) { log.warn("Reaching max start worker failure rate: {}", e.getMessage()); return true; } return false; }
3.68
hmily_HmilyRepositoryEventDispatcher_getInstance
/** * Gets instance. * * @return the instance */ public static HmilyRepositoryEventDispatcher getInstance() { return INSTANCE; }
3.68
flink_FileInputFormat_getNumberOfRecords
/** * Gets the estimates number of records in the file, computed as the file size divided by * the average record width, rounded up. * * @return The estimated number of records in the file. * @see org.apache.flink.api.common.io.statistics.BaseStatistics#getNumberOfRecords() */ @Override public long getNumberOfRecords() { return (this.fileSize == SIZE_UNKNOWN || this.avgBytesPerRecord == AVG_RECORD_BYTES_UNKNOWN) ? NUM_RECORDS_UNKNOWN : (long) Math.ceil(this.fileSize / this.avgBytesPerRecord); }
3.68
morf_RenameTable_getOldTableName
/** * @return the existing table name to change from. */ public String getOldTableName() { return oldTableName; }
3.68
hudi_CompactionUtil_scheduleCompaction
/** * Schedules a new compaction instant. * * @param writeClient The write client * @param deltaTimeCompaction Whether the compaction is trigger by elapsed delta time * @param committed Whether the last instant was committed successfully */ public static void scheduleCompaction( HoodieFlinkWriteClient<?> writeClient, boolean deltaTimeCompaction, boolean committed) { if (committed) { writeClient.scheduleCompaction(Option.empty()); } else if (deltaTimeCompaction) { // if there are no new commits and the compaction trigger strategy is based on elapsed delta time, // schedules the compaction anyway. writeClient.scheduleCompaction(Option.empty()); } }
3.68
framework_AbstractSplitPanel_getMinSplitPosition
/** * Returns the current minimum position of the splitter, in * {@link #getMinSplitPositionUnit()} units. * * @return the minimum position of the splitter */ public float getMinSplitPosition() { return getSplitterState(false).minPosition; }
3.68
flink_CsvReader_includeFields
/** * Configures which fields of the CSV file should be included and which should be skipped. The * bits in the value (read from least significant to most significant) define whether the field * at the corresponding position in the CSV schema should be included. parser will look at the * first {@code n} fields, where {@code n} is the position of the most significant non-zero bit. * The parser will skip over all fields where the character at the corresponding bit is zero, * and include the fields where the corresponding bit is one. * * <p>Examples: * * <ul> * <li>A mask of {@code 0x7} would include the first three fields. * <li>A mask of {@code 0x26} (binary {@code 100110} would skip the first fields, include * fields two and three, skip fields four and five, and include field six. * </ul> * * @param mask The bit mask defining which fields to include and which to skip. * @return The CSV reader instance itself, to allow for fluent function chaining. */ public CsvReader includeFields(long mask) { if (mask == 0) { throw new IllegalArgumentException( "The description of fields to parse excluded all fields. At least one fields must be included."); } ArrayList<Boolean> fields = new ArrayList<Boolean>(); while (mask != 0) { fields.add((mask & 0x1L) != 0); mask >>>= 1; } boolean[] fieldsArray = new boolean[fields.size()]; for (int i = 0; i < fieldsArray.length; i++) { fieldsArray[i] = fields.get(i); } return includeFields(fieldsArray); }
3.68
hbase_HBaseCommonTestingUtility_deleteOnExit
/** Returns True if we should delete testing dirs on exit. */ boolean deleteOnExit() { String v = System.getProperty("hbase.testing.preserve.testdir"); // Let default be true, to delete on exit. return v == null ? true : !Boolean.parseBoolean(v); }
3.68
hadoop_JavaCommandLineBuilder_sysprop
/** * Add a system property definition -must be used before setting the main entry point * @param property * @param value */ public void sysprop(String property, String value) { Preconditions.checkArgument(property != null, "null property name"); Preconditions.checkArgument(value != null, "null value"); add("-D" + property + "=" + value); }
3.68
flink_StreamGraphGenerator_transformCoFeedback
/** * Transforms a {@code CoFeedbackTransformation}. * * <p>This will only transform feedback edges, the result of this transform will be wired to the * second input of a Co-Transform. The original input is wired directly to the first input of * the downstream Co-Transform. * * <p>This is responsible for creating the IterationSource and IterationSink which are used to * feed back the elements. */ private <F> Collection<Integer> transformCoFeedback(CoFeedbackTransformation<F> coIterate) { if (shouldExecuteInBatchMode) { throw new UnsupportedOperationException( "Iterations are not supported in BATCH" + " execution mode. If you want to execute such a pipeline, please set the " + "'" + ExecutionOptions.RUNTIME_MODE.key() + "'=" + RuntimeExecutionMode.STREAMING.name()); } // For Co-Iteration we don't need to transform the input and wire the input to the // head operator by returning the input IDs, the input is directly wired to the left // input of the co-operation. This transform only needs to return the ids of the feedback // edges, since they need to be wired to the second input of the co-operation. // create the fake iteration source/sink pair Tuple2<StreamNode, StreamNode> itSourceAndSink = streamGraph.createIterationSourceAndSink( coIterate.getId(), getNewIterationNodeId(), getNewIterationNodeId(), coIterate.getWaitTime(), coIterate.getParallelism(), coIterate.getMaxParallelism(), coIterate.getMinResources(), coIterate.getPreferredResources()); StreamNode itSource = itSourceAndSink.f0; StreamNode itSink = itSourceAndSink.f1; // We set the proper serializers for the sink/source streamGraph.setSerializers( itSource.getId(), null, null, coIterate.getOutputType().createSerializer(executionConfig)); streamGraph.setSerializers( itSink.getId(), coIterate.getOutputType().createSerializer(executionConfig), null, null); Collection<Integer> resultIds = Collections.singleton(itSource.getId()); // at the iterate to the already-seen-set with the result IDs, so that we can transform // the feedback edges and let them stop when encountering the iterate node alreadyTransformed.put(coIterate, resultIds); // so that we can determine the slot sharing group from all feedback edges List<Integer> allFeedbackIds = new ArrayList<>(); for (Transformation<F> feedbackEdge : coIterate.getFeedbackEdges()) { Collection<Integer> feedbackIds = transform(feedbackEdge); allFeedbackIds.addAll(feedbackIds); for (Integer feedbackId : feedbackIds) { streamGraph.addEdge(feedbackId, itSink.getId(), 0); } } String slotSharingGroup = determineSlotSharingGroup(null, allFeedbackIds); itSink.setSlotSharingGroup(slotSharingGroup); itSource.setSlotSharingGroup(slotSharingGroup); return Collections.singleton(itSource.getId()); }
3.68
hadoop_TaskId_getJobId
/** * Getter method for jobId. * @return JobID: Job identifier */ public final int getJobId() { return jobId.getID(); }
3.68
framework_VaadinSession_hasLock
/** * Checks if the current thread has exclusive access to the given * WrappedSession. * * @return true if this thread has exclusive access, false otherwise * @since 7.6 */ protected static boolean hasLock(VaadinService service, WrappedSession session) { ReentrantLock l = (ReentrantLock) service.getSessionLock(session); return l.isHeldByCurrentThread(); }
3.68
flink_BinarySegmentUtils_readDecimalData
/** Gets an instance of {@link DecimalData} from underlying {@link MemorySegment}. */ public static DecimalData readDecimalData( MemorySegment[] segments, int baseOffset, long offsetAndSize, int precision, int scale) { final int size = ((int) offsetAndSize); int subOffset = (int) (offsetAndSize >> 32); byte[] bytes = new byte[size]; copyToBytes(segments, baseOffset + subOffset, bytes, 0, size); return DecimalData.fromUnscaledBytes(bytes, precision, scale); }
3.68
open-banking-gateway_Xs2aConsentInfo_hasWrongCredentials
/** * Generic wrong credentials indicator. */ public boolean hasWrongCredentials(Xs2aContext ctx) { return null != ctx.getWrongAuthCredentials() && ctx.getWrongAuthCredentials(); }
3.68
rocketmq-connect_ExpressionBuilder_appendIdentifierDelimiter
/** * Append to this builder's expression the delimiter defined by this builder's * {@link IdentifierRules}. * * @return this builder to enable methods to be chained; never null */ public ExpressionBuilder appendIdentifierDelimiter() { sb.append(rules.identifierDelimiter()); return this; }
3.68
hbase_AsyncAdmin_getMasterCoprocessorNames
/** Returns a list of master coprocessors wrapped by {@link CompletableFuture} */ default CompletableFuture<List<String>> getMasterCoprocessorNames() { return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS)) .thenApply(ClusterMetrics::getMasterCoprocessorNames); }
3.68
hbase_PrivateCellUtil_getTag
/** * Retrieve Cell's first tag, matching the passed in type * @param cell The Cell * @param type Type of the Tag to retrieve * @return Optional, empty if there is no tag of the passed in tag type */ public static Optional<Tag> getTag(Cell cell, byte type) { boolean bufferBacked = cell instanceof ByteBufferExtendedCell; int length = cell.getTagsLength(); int offset = bufferBacked ? ((ByteBufferExtendedCell) cell).getTagsPosition() : cell.getTagsOffset(); int pos = offset; while (pos < offset + length) { int tagLen; if (bufferBacked) { ByteBuffer tagsBuffer = ((ByteBufferExtendedCell) cell).getTagsByteBuffer(); tagLen = ByteBufferUtils.readAsInt(tagsBuffer, pos, TAG_LENGTH_SIZE); if (ByteBufferUtils.toByte(tagsBuffer, pos + TAG_LENGTH_SIZE) == type) { return Optional.of(new ByteBufferTag(tagsBuffer, pos, tagLen + TAG_LENGTH_SIZE)); } } else { tagLen = Bytes.readAsInt(cell.getTagsArray(), pos, TAG_LENGTH_SIZE); if (cell.getTagsArray()[pos + TAG_LENGTH_SIZE] == type) { return Optional .of(new ArrayBackedTag(cell.getTagsArray(), pos, tagLen + TAG_LENGTH_SIZE)); } } pos += TAG_LENGTH_SIZE + tagLen; } return Optional.empty(); }
3.68
hadoop_AbfsOutputStream_getWriteOperationsSize
/** * Getter to get the size of the task queue. * * @return the number of writeOperations in AbfsOutputStream. */ @VisibleForTesting public int getWriteOperationsSize() { return writeOperations.size(); }
3.68
flink_FlinkPreparingTableBase_getMonotonicity
/** * Obtains whether a given column is monotonic. * * @param columnName Column name * @return True if the given column is monotonic */ public SqlMonotonicity getMonotonicity(String columnName) { return SqlMonotonicity.NOT_MONOTONIC; }
3.68
hbase_ColumnFamilyDescriptorBuilder_setCacheDataOnWrite
/** * Set the setCacheDataOnWrite flag * @param value true if we should cache data blocks on write * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setCacheDataOnWrite(boolean value) { return setValue(CACHE_DATA_ON_WRITE_BYTES, Boolean.toString(value)); }
3.68
hadoop_S3AReadOpContext_getInputPolicy
/** * Get the IO policy. * @return the initial input policy. */ public S3AInputPolicy getInputPolicy() { return inputPolicy; }
3.68
hbase_JVMClusterUtil_waitForServerOnline
/** * Block until the region server has come online, indicating it is ready to be used. */ public void waitForServerOnline() { // The server is marked online after the init method completes inside of // the HRS#run method. HRS#init can fail for whatever region. In those // cases, we'll jump out of the run without setting online flag. Check // stopRequested so we don't wait here a flag that will never be flipped. regionServer.waitForServerOnline(); }
3.68
hbase_MasterObserver_postModifyTableStoreFileTracker
/** * Called after modifying a table's store file tracker. Called as part of modify table store file * tracker RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table * @param dstSFT the store file tracker */ default void postModifyTableStoreFileTracker( final ObserverContext<MasterCoprocessorEnvironment> ctx, final TableName tableName, String dstSFT) throws IOException { }
3.68
flink_JobEdge_getPreProcessingOperationName
/** * Gets the name of the pro-processing operation for this input. * * @return The name of the pro-processing operation, or null, if none was set. */ public String getPreProcessingOperationName() { return preProcessingOperationName; }
3.68
flink_OneShotLatch_reset
/** Resets the latch so that {@link #isTriggered()} returns false. */ public void reset() { synchronized (lock) { triggered = false; } }
3.68
hadoop_TimelineEntity_getEvents
/** * Get a list of events related to the entity * * @return a list of events related to the entity */ @XmlElement(name = "events") public List<TimelineEvent> getEvents() { return events; }
3.68
framework_XhrConnection_setRequestStartTime
/** * Sets the relative time (see {@link Profiler#getRelativeTimeMillis()}) * when the request was sent. * * @param requestStartTime * the relative time when the request was sent */ private void setRequestStartTime(double requestStartTime) { this.requestStartTime = requestStartTime; }
3.68
hmily_HmilyRepositoryNode_getHmilyLockRootPath
/** * Get hmily lock root path. * * @return hmily lock root path */ public String getHmilyLockRootPath() { return Joiner.on("/").join("", ROOT_PATH_PREFIX, appName, HMILY_LOCK_GLOBAL); }
3.68
pulsar_ConcurrentLongLongPairHashMap_keys
/** * @return a new list of all keys (makes a copy). */ public List<LongPair> keys() { List<LongPair> keys = Lists.newArrayListWithExpectedSize((int) size()); forEach((key1, key2, value1, value2) -> keys.add(new LongPair(key1, key2))); return keys; }
3.68
hbase_ServerManager_checkAndRecordNewServer
/** * Check is a server of same host and port already exists, if not, or the existed one got a * smaller start code, record it. * @param serverName the server to check and record * @param sl the server load on the server * @return true if the server is recorded, otherwise, false */ boolean checkAndRecordNewServer(final ServerName serverName, final ServerMetrics sl) { ServerName existingServer = null; synchronized (this.onlineServers) { existingServer = findServerWithSameHostnamePortWithLock(serverName); if (existingServer != null && (existingServer.getStartcode() > serverName.getStartcode())) { LOG.info("Server serverName=" + serverName + " rejected; we already have " + existingServer.toString() + " registered with same hostname and port"); return false; } recordNewServerWithLock(serverName, sl); } // Tell our listeners that a server was added if (!this.listeners.isEmpty()) { for (ServerListener listener : this.listeners) { listener.serverAdded(serverName); } } // Note that we assume that same ts means same server, and don't expire in that case. // TODO: ts can theoretically collide due to clock shifts, so this is a bit hacky. if (existingServer != null && (existingServer.getStartcode() < serverName.getStartcode())) { LOG.info("Triggering server recovery; existingServer " + existingServer + " looks stale, new server:" + serverName); expireServer(existingServer); } return true; }
3.68
hbase_MasterObserver_postRemoveServers
/** * Called after servers are removed from rsgroup * @param ctx the environment to interact with the framework and master * @param servers set of servers to remove */ default void postRemoveServers(final ObserverContext<MasterCoprocessorEnvironment> ctx, Set<Address> servers) throws IOException { }
3.68
flink_AfterMatchSkipStrategy_getPatternName
/** Name of pattern that processing will be skipped to. */ public Optional<String> getPatternName() { return Optional.empty(); }
3.68
hbase_HBaseCommonTestingUtility_cleanupTestDir
/** * @param subdir Test subdir name. * @return True if we removed the test dir */ public boolean cleanupTestDir(final String subdir) { if (this.dataTestDir == null) { return false; } return deleteDir(new File(this.dataTestDir, subdir)); }
3.68
framework_ContainerOrderedWrapper_isFirstId
/* * Tests if the given item is the first item in the container Don't add a * JavaDoc comment here, we use the default documentation from implemented * interface. */ @Override public boolean isFirstId(Object itemId) { if (ordered) { return ((Container.Ordered) container).isFirstId(itemId); } return first != null && first.equals(itemId); }
3.68
morf_InsertStatement_withDefaults
/** * Specifies the defaults to use when inserting new fields * * @param defaultValues the list of values to use as defaults * @return a statement with the changes applied. */ public InsertStatement withDefaults(AliasedFieldBuilder... defaultValues) { return copyOnWriteOrMutate( b -> b.withDefaults(defaultValues), () -> { for (AliasedField currentValue : Builder.Helper.buildAll(Lists.newArrayList(defaultValues))) { if (StringUtils.isBlank(currentValue.getAlias())) { throw new IllegalArgumentException("Cannot specify a blank alias for a field default"); } fieldDefaults.put(currentValue.getAlias(), currentValue); } } ); }
3.68
framework_ValidationResult_isError
/** * Checks if the result denotes an error. * <p> * <strong>Note:</strong> By default {@link ErrorLevel#INFO} and * {@link ErrorLevel#WARNING} are not considered to be errors. * * @return <code>true</code> if the result denotes an error, * <code>false</code> otherwise */ default boolean isError() { ErrorLevel errorLevel = getErrorLevel().orElse(null); return errorLevel != null && errorLevel != ErrorLevel.INFO && errorLevel != ErrorLevel.WARNING; }
3.68
hudi_HoodieAvroPayload_getRecordBytes
// for examples public byte[] getRecordBytes() { return recordBytes; }
3.68
hbase_Scan_hasFamilies
/** Returns true if familyMap is non empty, false otherwise */ public boolean hasFamilies() { return !this.familyMap.isEmpty(); }
3.68
hbase_MasterFileSystem_createInitialFileSystemLayout
/** * Create initial layout in filesystem. * <ol> * <li>Check if the meta region exists and is readable, if not create it. Create hbase.version and * the hbase:meta directory if not one.</li> * </ol> * Idempotent. */ private void createInitialFileSystemLayout() throws IOException { final String[] protectedSubDirs = new String[] { HConstants.BASE_NAMESPACE_DIR, HConstants.HFILE_ARCHIVE_DIRECTORY, HConstants.HBCK_SIDELINEDIR_NAME, MobConstants.MOB_DIR_NAME }; // With the introduction of RegionProcedureStore, // there's no need to create MasterProcWAL dir here anymore. See HBASE-23715 final String[] protectedSubLogDirs = new String[] { HConstants.HREGION_LOGDIR_NAME, HConstants.HREGION_OLDLOGDIR_NAME, HConstants.CORRUPT_DIR_NAME, ReplicationUtils.REMOTE_WAL_DIR_NAME }; // check if the root directory exists checkRootDir(this.rootdir, conf, this.fs); // Check the directories under rootdir. checkTempDir(this.tempdir, conf, this.fs); for (String subDir : protectedSubDirs) { checkSubDir(new Path(this.rootdir, subDir), HBASE_DIR_PERMS); } final String perms; if (!this.walRootDir.equals(this.rootdir)) { perms = HBASE_WAL_DIR_PERMS; } else { perms = HBASE_DIR_PERMS; } for (String subDir : protectedSubLogDirs) { checkSubDir(new Path(this.walRootDir, subDir), perms); } checkStagingDir(); // Handle the last few special files and set the final rootDir permissions // rootDir needs 'x' for all to support bulk load staging dir if (isSecurityEnabled) { fs.setPermission(new Path(rootdir, HConstants.VERSION_FILE_NAME), secureRootFilePerms); fs.setPermission(new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME), secureRootFilePerms); } FsPermission currentRootPerms = fs.getFileStatus(this.rootdir).getPermission(); if ( !currentRootPerms.getUserAction().implies(FsAction.EXECUTE) || !currentRootPerms.getGroupAction().implies(FsAction.EXECUTE) || !currentRootPerms.getOtherAction().implies(FsAction.EXECUTE) ) { LOG.warn("rootdir permissions do not contain 'excute' for user, group or other. " + "Automatically adding 'excute' permission for all"); fs.setPermission(this.rootdir, new FsPermission(currentRootPerms.getUserAction().or(FsAction.EXECUTE), currentRootPerms.getGroupAction().or(FsAction.EXECUTE), currentRootPerms.getOtherAction().or(FsAction.EXECUTE))); } }
3.68
hbase_BlockCacheUtil_getDataSize
/** Returns Size of data. */ public long getDataSize() { return dataSize; }
3.68
pulsar_SaslRoleToken_split
/** * Splits the string representation of a token into attributes pairs. * * @param tokenStr string representation of a token. * * @return a map with the attribute pairs of the token. * * @throws AuthenticationException thrown if the string representation of the token could not be broken into * attribute pairs. */ private static Map<String, String> split(String tokenStr) throws AuthenticationException { Map<String, String> map = new HashMap<String, String>(); StringTokenizer st = new StringTokenizer(tokenStr, ATTR_SEPARATOR); while (st.hasMoreTokens()) { String part = st.nextToken(); int separator = part.indexOf('='); if (separator == -1) { throw new AuthenticationException("Invalid authentication token"); } String key = part.substring(0, separator); String value = part.substring(separator + 1); map.put(key, value); } return map; }
3.68
AreaShop_WorldGuardRegionFlagsFeature_setFlag
/** * Set a WorldGuard region flag. * @param region The WorldGuard region to set * @param flag The flag to set * @param value The value to set the flag to * @param <V> They type of flag to set * @throws InvalidFlagFormat When the value of the flag is wrong */ private <V> void setFlag(ProtectedRegion region, Flag<V> flag, String value) throws InvalidFlagFormat { V current = region.getFlag(flag); V next = plugin.getWorldGuardHandler().parseFlagInput(flag, value); if(!Objects.equals(current, next)) { region.setFlag(flag, next); } }
3.68
hadoop_AddMountAttributes_getNewOrUpdatedMountTableEntryWithAttributes
/** * Retrieve mount table object with all attributes derived from this object. * The returned mount table could be either new or existing one with updated attributes. * * @param existingEntry Existing mount table entry. If null, new mount table object is created, * otherwise the existing mount table object is updated. * @return MountTable object with updated attributes. * @throws IOException If mount table instantiation fails. */ public MountTable getNewOrUpdatedMountTableEntryWithAttributes(MountTable existingEntry) throws IOException { if (existingEntry == null) { return getMountTableForAddRequest(this.mount); } else { // Update the existing entry if it exists for (String nsId : this.getNss()) { if (!existingEntry.addDestination(nsId, this.getDest())) { System.err.println("Cannot add destination at " + nsId + " " + this.getDest()); return null; } } updateCommonAttributes(existingEntry); return existingEntry; } }
3.68
framework_GlobalResourceHandler_register
/** * Registers a resource to be served with a global URL. * <p> * A {@link ConnectorResource} registered for a {@link LegacyComponent} will * be set to be served with a global URL. Other resource types will be * ignored and thus not served by this handler. * * @param resource * the resource to register * @param ownerConnector * the connector to which the resource belongs */ public void register(Resource resource, ClientConnector ownerConnector) { if (resource instanceof ConnectorResource) { if (!(ownerConnector instanceof LegacyComponent)) { throw new IllegalArgumentException( "A normal ConnectorResource can only be registered for legacy components."); } ConnectorResource connectorResource = (ConnectorResource) resource; if (!legacyResourceKeys.containsKey(resource)) { String uri = LEGACY_TYPE + '/' + Integer.toString(nextLegacyId++); String filename = connectorResource.getFilename(); if (filename != null && !filename.isEmpty()) { uri += '/' + ResourceReference.encodeFileName(filename); } legacyResourceKeys.put(connectorResource, uri); legacyResources.put(uri, connectorResource); registerResourceUsage(connectorResource, ownerConnector); } } }
3.68
morf_GraphBasedUpgradeBuilder_create
/** * Creates new {@link GraphBasedUpgradeBuilder}. * * @param sourceSchema source schema * @param targetSchema target schema * @param connectionResources connection resources to be used * @param exclusiveExecutionSteps names of the upgrade step classes which should * be executed in an exclusive way * @param schemaChangeSequence to be used to build a * {@link GraphBasedUpgrade} * @param viewChanges view changes which need to be made to match * the target schema * @return new {@link GraphBasedUpgradeBuilder} instance */ GraphBasedUpgradeBuilder create( Schema sourceSchema, Schema targetSchema, ConnectionResources connectionResources, Set<String> exclusiveExecutionSteps, SchemaChangeSequence schemaChangeSequence, ViewChanges viewChanges) { return new GraphBasedUpgradeBuilder( visitorFactory, scriptGeneratorFactory, drawIOGraphPrinter, sourceSchema, targetSchema, connectionResources, exclusiveExecutionSteps, schemaChangeSequence, viewChanges); }
3.68
druid_Lexer_stringVal
/** * The value of a literal token, recorded as a string. For integers, leading 0x and 'l' suffixes are suppressed. */ public final String stringVal() { if (stringVal == null) { stringVal = subString(mark, bufPos); } return stringVal; }
3.68
framework_Highlight_show
/** * Highlight the given {@link Element} using the given color. * <p> * Pass the returned highlight {@link Element} to {@link #hide(Element)} to * remove this particular highlight. * </p> * * @param element * Element to highlight * @param color * Color of highlight * @return Highlight element */ static Element show(Element element, String color) { if (element != null) { if (highlights == null) { highlights = new HashSet<>(); } Element highlight = DOM.createDiv(); Style style = highlight.getStyle(); style.setTop(element.getAbsoluteTop(), Unit.PX); style.setLeft(element.getAbsoluteLeft(), Unit.PX); int width = element.getOffsetWidth(); if (width < MIN_WIDTH) { width = MIN_WIDTH; } style.setWidth(width, Unit.PX); int height = element.getOffsetHeight(); if (height < MIN_HEIGHT) { height = MIN_HEIGHT; } style.setHeight(height, Unit.PX); RootPanel.getBodyElement().appendChild(highlight); style.setPosition(Position.ABSOLUTE); style.setZIndex(VWindow.Z_INDEX + 1000); style.setBackgroundColor(color); style.setOpacity(DEFAULT_OPACITY); if (BrowserInfo.get().isIE()) { style.setProperty("filter", "alpha(opacity=" + (DEFAULT_OPACITY * 100) + ")"); } highlights.add(highlight); return highlight; } return null; }
3.68
hbase_CheckAndMutate_getTimeRange
/** Returns the time range to check */ public TimeRange getTimeRange() { return timeRange; }
3.68
morf_DatabaseTypeIdentifier_identifyFromMetaData
/** * Try to identify the database type from connection metadata. * * @return Database type, or null if none. */ public Optional<DatabaseType> identifyFromMetaData() { try { try (Connection connection = dataSource.getConnection()) { DatabaseMetaData metaData = connection.getMetaData(); String product = metaData.getDatabaseProductName(); String versionString = metaData.getDatabaseProductVersion().replaceAll("\n", "\\\\n"); int versionMajor = metaData.getDatabaseMajorVersion(); int versionMinor = metaData.getDatabaseMinorVersion(); log.info(String.format("Database product = [%s], version = [%s], v%d.%d", product, versionString, versionMajor, versionMinor)); return DatabaseType.Registry.findByProductName(product); } } catch (SQLException e) { throw new RuntimeException("SQL exception", e); } }
3.68
framework_VScrollTable_updateColumnProperties
/** For internal use only. May be removed or replaced in the future. */ public void updateColumnProperties(UIDL uidl) { updateColumnOrder(uidl); updateCollapsedColumns(uidl); UIDL vc = uidl.getChildByTagName("visiblecolumns"); if (vc != null) { tHead.updateCellsFromUIDL(vc); tFoot.updateCellsFromUIDL(vc); } updateHeader(uidl.getStringArrayAttribute("vcolorder")); updateFooter(uidl.getStringArrayAttribute("vcolorder")); if (uidl.hasVariable("noncollapsiblecolumns")) { noncollapsibleColumns = uidl .getStringArrayVariableAsSet("noncollapsiblecolumns"); } }
3.68
flink_ExecutionConfig_setExecutionRetryDelay
/** * Sets the delay between executions. * * @param executionRetryDelay The number of milliseconds the system will wait to retry. * @return The current execution configuration * @deprecated This method will be replaced by {@link #setRestartStrategy}. The {@link * RestartStrategies.FixedDelayRestartStrategyConfiguration} contains the delay between * successive execution attempts. */ @Deprecated public ExecutionConfig setExecutionRetryDelay(long executionRetryDelay) { if (executionRetryDelay < 0) { throw new IllegalArgumentException("The delay between retries must be non-negative."); } this.executionRetryDelay = executionRetryDelay; return this; }
3.68