name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_Calendar_expandStartDate
/** * Finds the first day of the week and returns a day representing the start * of that day. * * @param start * The actual date * @param expandToFullWeek * Should the returned date be moved to the start of the week * @return If expandToFullWeek is set then it returns the first day of the * week, else it returns a clone of the actual date with the time * set to the start of the day */ protected Date expandStartDate(Date start, boolean expandToFullWeek) { // If the duration is more than week, use monthly view and get startweek // and endweek. Example if views daterange is from tuesday to next weeks // wednesday->expand to monday to nextweeks sunday. If firstdayofweek = // monday if (expandToFullWeek) { start = getFirstDateForWeek(start); } else { start = (Date) start.clone(); } // Always expand to the start of the first day to the end of the last // day start = getStartOfDay(currentCalendar, start); return start; }
3.68
hadoop_Cluster_getQueue
/** * Get queue information for the specified name. * * @param name queuename * @return object of {@link QueueInfo} * @throws IOException * @throws InterruptedException */ public QueueInfo getQueue(String name) throws IOException, InterruptedException { return client.getQueue(name); }
3.68
hbase_BalancerClusterState_wouldLowerAvailability
/** * Return true if the placement of region on server would lower the availability of the region in * question * @return true or false */ boolean wouldLowerAvailability(RegionInfo regionInfo, ServerName serverName) { if (!serversToIndex.containsKey(serverName.getAddress())) { return false; // safeguard against race between cluster.servers and servers from LB method // args } int server = serversToIndex.get(serverName.getAddress()); int region = regionsToIndex.get(regionInfo); // Region replicas for same region should better assign to different servers for (int i : regionsPerServer[server]) { RegionInfo otherRegionInfo = regions[i]; if (RegionReplicaUtil.isReplicasForSameRegion(regionInfo, otherRegionInfo)) { return true; } } int primary = regionIndexToPrimaryIndex[region]; if (primary == -1) { return false; } // there is a subset relation for server < host < rack // check server first int result = checkLocationForPrimary(server, colocatedReplicaCountsPerServer, primary); if (result != 0) { return result > 0; } // check host if (multiServersPerHost) { result = checkLocationForPrimary(serverIndexToHostIndex[server], colocatedReplicaCountsPerHost, primary); if (result != 0) { return result > 0; } } // check rack if (numRacks > 1) { result = checkLocationForPrimary(serverIndexToRackIndex[server], colocatedReplicaCountsPerRack, primary); if (result != 0) { return result > 0; } } return false; }
3.68
flink_FlinkRelBuilder_watermark
/** Build watermark assigner relational node. */ public RelBuilder watermark(int rowtimeFieldIndex, RexNode watermarkExpr) { final RelNode input = build(); final RelNode relNode = LogicalWatermarkAssigner.create(cluster, input, rowtimeFieldIndex, watermarkExpr); return push(relNode); }
3.68
morf_SqlUtils_insert
/** * Constructs an Insert Statement. * * <p>Usage is discouraged; this method will be deprecated at some point. Use * {@link InsertStatement#insert()} for preference.</p> * * @return {@link InsertStatement} */ public static InsertStatement insert() { return new InsertStatement(); }
3.68
morf_MySqlDialect_checkMaxIdAutonumberStatement
/** * Returns a statement which will check that the max id value on the table is less than the autonumber start value */ private String checkMaxIdAutonumberStatement(Table table,Column autoIncrementColumn) { return "SELECT MAX(" + autoIncrementColumn.getName()+") FROM "+table.getName(); }
3.68
dubbo_URLParam_hasParameter
/** * check if specified key is present in URLParam * * @param key specified key * @return present or not */ public boolean hasParameter(String key) { int keyIndex = DynamicParamTable.getKeyIndex(enableCompressed, key); if (keyIndex < 0) { return EXTRA_PARAMS.containsKey(key); } return KEY.get(keyIndex); }
3.68
framework_Compare_compareEquals
/** * Checks if the this value equals the given value. Favors Comparable over * equals to better support e.g. BigDecimal where equals is stricter than * compareTo. * * @param otherValue * The value to compare to * @return true if the values are equal, false otherwise */ private boolean compareEquals(Object otherValue) { if (value == null || otherValue == null) { return (otherValue == value); } else if (value == otherValue) { return true; } else if (value instanceof Comparable && otherValue.getClass() .isAssignableFrom(getValue().getClass())) { return ((Comparable) value).compareTo(otherValue) == 0; } else { return value.equals(otherValue); } }
3.68
pulsar_AbstractSchema_validate
/** * Check if the message read able length length is a valid object for this schema. * * <p>The implementation can choose what its most efficient approach to validate the schema. * If the implementation doesn't provide it, it will attempt to use {@link #decode(ByteBuf)} * to see if this schema can decode this message or not as a validation mechanism to verify * the bytes. * * @param byteBuf the messages to verify * @return true if it is a valid message * @throws SchemaSerializationException if it is not a valid message */ void validate(ByteBuf byteBuf) { throw new SchemaSerializationException("This method is not supported"); }
3.68
hudi_AvroSchemaCompatibility_equals
/** * {@inheritDoc} */ @Override public boolean equals(Object other) { if ((other instanceof SchemaPairCompatibility)) { final SchemaPairCompatibility result = (SchemaPairCompatibility) other; return objectsEqual(result.mResult, mResult) && objectsEqual(result.mReader, mReader) && objectsEqual(result.mWriter, mWriter) && objectsEqual(result.mDescription, mDescription); } else { return false; } }
3.68
flink_OrCondition_getLeft
/** @return One of the {@link IterativeCondition conditions} combined in this condition. */ public IterativeCondition<T> getLeft() { return left; }
3.68
hadoop_AzureNativeFileSystemStore_addTestHookToOperationContext
/** * Add a test hook to modify the operation context we use for Azure Storage * operations. * * @param testHook * The test hook, or null to unset previous hooks. */ @VisibleForTesting void addTestHookToOperationContext(TestHookOperationContext testHook) { this.testHookOperationContext = testHook; }
3.68
framework_ContainerOrderedWrapper_addListener
/** * @deprecated As of 7.0, replaced by * {@link #addPropertySetChangeListener(Container.PropertySetChangeListener)} */ @Override @Deprecated public void addListener(Container.PropertySetChangeListener listener) { addPropertySetChangeListener(listener); }
3.68
hbase_HFileBlock_finishBlock
/** * Finish up writing of the block. Flushes the compressing stream (if using compression), fills * out the header, does any compression/encryption of bytes to flush out to disk, and manages * the cache on write content, if applicable. Sets block write state to "block ready". */ private void finishBlock() throws IOException { if (blockType == BlockType.DATA) { this.dataBlockEncoder.endBlockEncoding(dataBlockEncodingCtx, userDataStream, baosInMemory.getBuffer(), blockType); blockType = dataBlockEncodingCtx.getBlockType(); } userDataStream.flush(); prevOffset = prevOffsetByType[blockType.getId()]; // We need to cache the unencoded/uncompressed size before changing the block state int rawBlockSize = 0; if (this.getEncodingState() != null) { rawBlockSize = encodedBlockSizeWritten(); } // We need to set state before we can package the block up for cache-on-write. In a way, the // block is ready, but not yet encoded or compressed. state = State.BLOCK_READY; Bytes compressAndEncryptDat; if (blockType == BlockType.DATA || blockType == BlockType.ENCODED_DATA) { compressAndEncryptDat = dataBlockEncodingCtx.compressAndEncrypt(baosInMemory.getBuffer(), 0, baosInMemory.size()); } else { compressAndEncryptDat = defaultBlockEncodingCtx.compressAndEncrypt(baosInMemory.getBuffer(), 0, baosInMemory.size()); } if (compressAndEncryptDat == null) { compressAndEncryptDat = new Bytes(baosInMemory.getBuffer(), 0, baosInMemory.size()); } if (onDiskBlockBytesWithHeader == null) { onDiskBlockBytesWithHeader = new ByteArrayOutputStream(compressAndEncryptDat.getLength()); } onDiskBlockBytesWithHeader.reset(); onDiskBlockBytesWithHeader.write(compressAndEncryptDat.get(), compressAndEncryptDat.getOffset(), compressAndEncryptDat.getLength()); // Update raw and compressed sizes in the predicate compressedSizePredicator.updateLatestBlockSizes(fileContext, rawBlockSize, onDiskBlockBytesWithHeader.size()); // Calculate how many bytes we need for checksum on the tail of the block. int numBytes = (int) ChecksumUtil.numBytes(onDiskBlockBytesWithHeader.size(), fileContext.getBytesPerChecksum()); // Put the header for the on disk bytes; header currently is unfilled-out putHeader(onDiskBlockBytesWithHeader, onDiskBlockBytesWithHeader.size() + numBytes, baosInMemory.size(), onDiskBlockBytesWithHeader.size()); if (onDiskChecksum.length != numBytes) { onDiskChecksum = new byte[numBytes]; } ChecksumUtil.generateChecksums(onDiskBlockBytesWithHeader.getBuffer(), 0, onDiskBlockBytesWithHeader.size(), onDiskChecksum, 0, fileContext.getChecksumType(), fileContext.getBytesPerChecksum()); }
3.68
framework_MenuItem_setCommand
/** * Sets the command associated with this item. * * @param cmd * the command to be associated with this item */ public void setCommand(Command cmd) { command = cmd; }
3.68
flink_RawFormatSerializationSchema_createConverter
/** Creates a runtime converter. */ private SerializationRuntimeConverter createConverter( LogicalType type, String charsetName, boolean isBigEndian) { final SerializationRuntimeConverter converter = createNotNullConverter(type, charsetName, isBigEndian); return new SerializationRuntimeConverter() { private static final long serialVersionUID = 1L; @Override public void open() { converter.open(); } @Override public byte[] convert(RowData row) throws IOException { if (row.isNullAt(0)) { return null; } return converter.convert(row); } }; }
3.68
flink_AbstractBlobCache_getPort
/** * Returns the port the BLOB server is listening on. * * @return BLOB server port or {@code -1} if no server address */ public int getPort() { final InetSocketAddress currentServerAddress = serverAddress; if (currentServerAddress != null) { return currentServerAddress.getPort(); } else { return -1; } }
3.68
morf_SqlServerDialect_indexDropStatements
/** * {@inheritDoc} * * @see org.alfasoftware.morf.jdbc.SqlDialect#indexDropStatements(org.alfasoftware.morf.metadata.Table, * org.alfasoftware.morf.metadata.Index) */ @Override public Collection<String> indexDropStatements(Table table, Index indexToBeRemoved) { return Arrays.asList("DROP INDEX " + indexToBeRemoved.getName() + " ON " + schemaNamePrefix() + table.getName()); }
3.68
hbase_AuthManager_authorizeCell
/** * Check if user has given action privilige in cell scope. * @param user user name * @param table table name * @param cell cell to be checked * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ public boolean authorizeCell(User user, TableName table, Cell cell, Permission.Action action) { try { List<Permission> perms = PermissionStorage.getCellPermissionsForUser(user, cell); if (LOG.isTraceEnabled()) { LOG.trace("Perms for user {} in table {} in cell {}: {}", user.getShortName(), table, cell, (perms != null ? perms : "")); } if (perms != null) { for (Permission p : perms) { if (p.implies(action)) { return true; } } } } catch (IOException e) { // We failed to parse the KV tag LOG.error("Failed parse of ACL tag in cell " + cell); // Fall through to check with the table and CF perms we were able // to collect regardless } return false; }
3.68
pulsar_RangeCache_evictLEntriesBeforeTimestamp
/** * * @param maxTimestamp the max timestamp of the entries to be evicted * @return the tota */ public Pair<Integer, Long> evictLEntriesBeforeTimestamp(long maxTimestamp) { long removedSize = 0; int removedCount = 0; while (true) { Map.Entry<Key, Value> entry = entries.firstEntry(); if (entry == null || timestampExtractor.getTimestamp(entry.getValue()) > maxTimestamp) { break; } Value value = entry.getValue(); boolean removeHits = entries.remove(entry.getKey(), value); if (!removeHits) { break; } removedSize += weighter.getSize(value); removedCount++; value.release(); } size.addAndGet(-removedSize); return Pair.of(removedCount, removedSize); }
3.68
framework_AbstractContainer_removeItemSetChangeListener
/** * Implementation of the corresponding method in * {@link ItemSetChangeNotifier}, override with the corresponding public * method and implement the interface to use this. * * @see ItemSetChangeNotifier#removeListener(Container.ItemSetChangeListener) */ protected void removeItemSetChangeListener( Container.ItemSetChangeListener listener) { if (getItemSetChangeListeners() != null) { getItemSetChangeListeners().remove(listener); } }
3.68
flink_StreamTask_getTaskNameWithSubtaskAndId
/** * Gets the name of the task, appended with the subtask indicator and execution id. * * @return The name of the task, with subtask indicator and execution id. */ String getTaskNameWithSubtaskAndId() { return getEnvironment().getTaskInfo().getTaskNameWithSubtasks() + " (" + getEnvironment().getExecutionId() + ')'; }
3.68
hbase_PermissionStorage_addUserPermission
/** * Stores a new user permission grant in the access control lists table. * @param conf the configuration * @param userPerm the details of the permission to be granted * @param t acl table instance. It is closed upon method return. * @throws IOException in the case of an error accessing the metadata table */ public static void addUserPermission(Configuration conf, UserPermission userPerm, Table t, boolean mergeExistingPermissions) throws IOException { Permission permission = userPerm.getPermission(); Permission.Action[] actions = permission.getActions(); byte[] rowKey = userPermissionRowKey(permission); Put p = new Put(rowKey); byte[] key = userPermissionKey(userPerm); if ((actions == null) || (actions.length == 0)) { String msg = "No actions associated with user '" + userPerm.getUser() + "'"; LOG.warn(msg); throw new IOException(msg); } Set<Permission.Action> actionSet = new TreeSet<Permission.Action>(); if (mergeExistingPermissions) { List<UserPermission> perms = getUserPermissions(conf, rowKey, null, null, null, false); UserPermission currentPerm = null; for (UserPermission perm : perms) { if (userPerm.equalsExceptActions(perm)) { currentPerm = perm; break; } } if (currentPerm != null && currentPerm.getPermission().getActions() != null) { actionSet.addAll(Arrays.asList(currentPerm.getPermission().getActions())); } } // merge current action with new action. actionSet.addAll(Arrays.asList(actions)); // serialize to byte array. byte[] value = new byte[actionSet.size()]; int index = 0; for (Permission.Action action : actionSet) { value[index++] = action.code(); } p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow()) .setFamily(ACL_LIST_FAMILY).setQualifier(key).setTimestamp(p.getTimestamp()).setType(Type.Put) .setValue(value).build()); if (LOG.isDebugEnabled()) { LOG.debug("Writing permission with rowKey " + Bytes.toString(rowKey) + " " + Bytes.toString(key) + ": " + Bytes.toStringBinary(value)); } try { t.put(p); } finally { t.close(); } }
3.68
hbase_BitSetNode_isAllModified
/** Returns true, if all the procedures has been modified. */ public boolean isAllModified() { // TODO: cache the value for (int i = 0; i < modified.length; ++i) { if ((modified[i] | deleted[i]) != WORD_MASK) { return false; } } return true; }
3.68
pulsar_ManagedCursor_skipNonRecoverableLedger
/** * If a ledger is lost, this ledger will be skipped after enabled "autoSkipNonRecoverableData", and the method is * used to delete information about this ledger in the ManagedCursor. */ default void skipNonRecoverableLedger(long ledgerId){}
3.68
hbase_BloomFilterUtil_formatStats
/** * A human-readable string with statistics for the given Bloom filter. * @param bloomFilter the Bloom filter to output statistics for; * @return a string consisting of "&lt;key&gt;: &lt;value&gt;" parts separated by * {@link #STATS_RECORD_SEP}. */ public static String formatStats(BloomFilterBase bloomFilter) { StringBuilder sb = new StringBuilder(); long k = bloomFilter.getKeyCount(); long m = bloomFilter.getMaxKeys(); sb.append("BloomSize: " + bloomFilter.getByteSize() + STATS_RECORD_SEP); sb.append("No of Keys in bloom: " + k + STATS_RECORD_SEP); sb.append("Max Keys for bloom: " + m); if (m > 0) { sb.append(STATS_RECORD_SEP + "Percentage filled: " + NumberFormat.getPercentInstance().format(k * 1.0 / m)); } return sb.toString(); }
3.68
flink_Time_of
/** * Creates a new {@link Time} of the given duration and {@link TimeUnit}. * * <p>The {@code Time} refers to the time characteristic that is set on the dataflow via {@link * org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#setStreamTimeCharacteristic(org.apache.flink.streaming.api.TimeCharacteristic)}. * * @param size The duration of time. * @param unit The unit of time of the duration, for example {@code TimeUnit.SECONDS}. * @return The time policy. */ public static Time of(long size, TimeUnit unit) { return new Time(size, unit); }
3.68
flink_JoinOperator_projectTuple24
/** * Projects a pair of joined elements to a {@link Tuple} with the previously selected * fields. Requires the classes of the fields of the resulting tuples. * * @return The projected data set. * @see Tuple * @see DataSet */ public < T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> ProjectJoin< I1, I2, Tuple24< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>> projectTuple24() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo< Tuple24< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>> tType = new TupleTypeInfo< Tuple24< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>>(fTypes); return new ProjectJoin< I1, I2, Tuple24< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>>( this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this); }
3.68
dubbo_AbstractConfig_isValid
/** * FIXME check @Parameter(required=true) and any conditions that need to match. */ @Parameter(excluded = true, attribute = false) public boolean isValid() { return true; }
3.68
dubbo_DubboServiceAddressURL_getAllParameters
/** * The returned parameters is imprecise regarding override priorities of consumer url and provider url. * This method is only used to pass the configuration in the 'client'. */ @Override public Map<String, String> getAllParameters() { Map<String, String> allParameters = new HashMap<>((int) (super.getParameters().size() / .75 + 1)); allParameters.putAll(super.getParameters()); if (consumerURL != null) { allParameters.putAll(consumerURL.getParameters()); } if (overrideURL != null) { allParameters.putAll(overrideURL.getParameters()); } return Collections.unmodifiableMap(allParameters); }
3.68
hibernate-validator_ModCheckValidator_isCheckDigitValid
/** * Check if the input passes the Mod10 (Luhn algorithm implementation only) or Mod11 test * * @param digits the digits over which to calculate the Mod10 or Mod11 checksum * @param checkDigit the check digit * * @return {@code true} if the mod 10/11 result matches the check digit, {@code false} otherwise */ @Override public boolean isCheckDigitValid(List<Integer> digits, char checkDigit) { int modResult = -1; int checkValue = extractDigit( checkDigit ); if ( modType.equals( ModType.MOD11 ) ) { modResult = ModUtil.calculateMod11Check( digits, multiplier ); if ( modResult == 10 || modResult == 11 ) { modResult = 0; } } else { modResult = ModUtil.calculateLuhnMod10Check( digits ); } return checkValue == modResult; }
3.68
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperations15
/** * Expression that should be wrapped implicitly, is wrapped additionally with * a bracket() method. */ @Test public void shouldGenerateCorrectSqlForMathOperations15() { String result = testDialect.getSqlFrom(field("a").plus(bracket(field("b").plus(field("c")))).divideBy(literal(2))); assertEquals(expectedSqlForMathOperations15(), result); }
3.68
flink_SqlCreateTable_getFullConstraints
/** Returns the column constraints plus the table constraints. */ public List<SqlTableConstraint> getFullConstraints() { return SqlConstraintValidator.getFullConstraints(tableConstraints, columnList); }
3.68
MagicPlugin_ExprActiveSpell_convert
// Eclipse detects the parent return type of this function as @NonNull // which is not correct. @SuppressWarnings("null") @Nullable @Override public String convert(final Player p) { assert false; return null; }
3.68
hadoop_StringValueMin_addNextValue
/** * add a value to the aggregator * * @param val * a string. * */ public void addNextValue(Object val) { String newVal = val.toString(); if (this.minVal == null || this.minVal.compareTo(newVal) > 0) { this.minVal = newVal; } }
3.68
hudi_HoodieTableFactory_setupTimestampKeygenOptions
/** * Sets up the keygen options when the partition path is datetime type. * * <p>The UTC timezone is used as default. */ public static void setupTimestampKeygenOptions(Configuration conf, DataType fieldType) { if (conf.contains(FlinkOptions.KEYGEN_CLASS_NAME)) { // the keygen clazz has been set up explicitly, skipping return; } conf.setString(FlinkOptions.KEYGEN_CLASS_NAME, TimestampBasedAvroKeyGenerator.class.getName()); LOG.info("Table option [{}] is reset to {} because datetime partitioning turns on", FlinkOptions.KEYGEN_CLASS_NAME.key(), TimestampBasedAvroKeyGenerator.class.getName()); if (DataTypeUtils.isTimestampType(fieldType)) { int precision = DataTypeUtils.precision(fieldType.getLogicalType()); if (precision == 0) { // seconds conf.setString(TIMESTAMP_TYPE_FIELD.key(), TimestampBasedAvroKeyGenerator.TimestampType.UNIX_TIMESTAMP.name()); } else if (precision == 3) { // milliseconds conf.setString(TIMESTAMP_TYPE_FIELD.key(), TimestampBasedAvroKeyGenerator.TimestampType.EPOCHMILLISECONDS.name()); } String outputPartitionFormat = conf.getOptional(FlinkOptions.PARTITION_FORMAT).orElse(FlinkOptions.PARTITION_FORMAT_HOUR); conf.setString(TIMESTAMP_OUTPUT_DATE_FORMAT.key(), outputPartitionFormat); } else { conf.setString(TIMESTAMP_TYPE_FIELD.key(), TimestampBasedAvroKeyGenerator.TimestampType.SCALAR.name()); conf.setString(INPUT_TIME_UNIT.key(), TimeUnit.DAYS.toString()); String outputPartitionFormat = conf.getOptional(FlinkOptions.PARTITION_FORMAT).orElse(FlinkOptions.PARTITION_FORMAT_DAY); conf.setString(TIMESTAMP_OUTPUT_DATE_FORMAT.key(), outputPartitionFormat); // the option is actually useless, it only works for validation conf.setString(TIMESTAMP_INPUT_DATE_FORMAT.key(), FlinkOptions.PARTITION_FORMAT_DAY); } conf.setString(TIMESTAMP_OUTPUT_TIMEZONE_FORMAT.key(), "UTC"); }
3.68
hadoop_BalancerBandwidthCommand_getBalancerBandwidthValue
/** * Get current value of the max balancer bandwidth in bytes per second. * * @return bandwidth Blanacer bandwidth in bytes per second for this datanode. */ public long getBalancerBandwidthValue() { return this.bandwidth; }
3.68
hadoop_BalanceJob_finish
/** * Job finishes. It could be either success or failure. * @param exception the exception that causes the job to fail. null indicates * the job is successful. */ private synchronized void finish(Exception exception) { assert !jobDone; if (scheduler.jobDone(this)) { jobDone = true; error = exception; notifyAll(); } }
3.68
querydsl_SQLExpressions_ratioToReport
/** * computes the ratio of a value to the sum of a set of values. If expr evaluates to null, * then the ratio-to-report value also evaluates to null. * * @return ratio_to_report(expr) */ public static <T> WindowOver<T> ratioToReport(Expression<T> expr) { return new WindowOver<T>(expr.getType(), SQLOps.RATIOTOREPORT, expr); }
3.68
dubbo_AbstractConfigManager_removeConfig
/** * In some scenario, we may need to add and remove ServiceConfig or ReferenceConfig dynamically. * * @param config the config instance to remove. * @return */ public boolean removeConfig(AbstractConfig config) { if (config == null) { return false; } Map<String, AbstractConfig> configs = configsCache.get(getTagName(config.getClass())); if (CollectionUtils.isNotEmptyMap(configs)) { // lock by config type synchronized (configs) { return removeIfAbsent(config, configs); } } return false; }
3.68
querydsl_Expressions_asSimple
/** * Create a new SimpleExpression * * @param value constant * @return new SimpleExpression */ public static <T> SimpleExpression<T> asSimple(T value) { return asSimple(constant(value)); }
3.68
hudi_HoodiePartitionMetadata_writeMetafile
/** * Write the partition metadata in the correct format in the given file path. * * @param filePath Path of the file to write * @throws IOException */ private void writeMetafile(Path filePath) throws IOException { if (format.isPresent()) { Schema schema = HoodieAvroUtils.getRecordKeySchema(); switch (format.get()) { case PARQUET: // Since we are only interested in saving metadata to the footer, the schema, blocksizes and other // parameters are not important. MessageType type = Types.buildMessage().optional(PrimitiveTypeName.INT64).named("dummyint").named("dummy"); HoodieAvroWriteSupport writeSupport = new HoodieAvroWriteSupport(type, schema, Option.empty(), new Properties()); try (ParquetWriter writer = new ParquetWriter(filePath, writeSupport, CompressionCodecName.UNCOMPRESSED, 1024, 1024)) { for (String key : props.stringPropertyNames()) { writeSupport.addFooterMetadata(key, props.getProperty(key)); } } break; case ORC: // Since we are only interested in saving metadata to the footer, the schema, blocksizes and other // parameters are not important. OrcFile.WriterOptions writerOptions = OrcFile.writerOptions(fs.getConf()).fileSystem(fs) .setSchema(AvroOrcUtils.createOrcSchema(schema)); try (Writer writer = OrcFile.createWriter(filePath, writerOptions)) { for (String key : props.stringPropertyNames()) { writer.addUserMetadata(key, ByteBuffer.wrap(getUTF8Bytes(props.getProperty(key)))); } } break; default: throw new HoodieException("Unsupported format for partition metafiles: " + format.get()); } } else { // Backwards compatible properties file format FSDataOutputStream os = fs.create(filePath, true); props.store(os, "partition metadata"); os.hsync(); os.hflush(); os.close(); } }
3.68
flink_TableConfig_getSqlDialect
/** Returns the current SQL dialect. */ public SqlDialect getSqlDialect() { return SqlDialect.valueOf(get(TableConfigOptions.TABLE_SQL_DIALECT).toUpperCase()); }
3.68
hadoop_InterruptEscalator_lookup
/** * Look up the handler for a signal. * @param signalName signal name * @return a handler if found */ public synchronized IrqHandler lookup(String signalName) { for (IrqHandler irqHandler : interruptHandlers) { if (irqHandler.getName().equals(signalName)) { return irqHandler; } } return null; }
3.68
hbase_FavoredNodeAssignmentHelper_getRackOfServer
/** * Get the rack of server from local mapping when present, saves lookup by the RackManager. */ private String getRackOfServer(ServerName sn) { if (this.regionServerToRackMap.containsKey(sn.getHostname())) { return this.regionServerToRackMap.get(sn.getHostname()); } else { String rack = this.rackManager.getRack(sn); this.regionServerToRackMap.put(sn.getHostname(), rack); return rack; } }
3.68
hadoop_ConnectionPool_getNumActiveConnectionsRecently
/** * Number of active connections recently in the pool. * * @return Number of active connections recently. */ protected int getNumActiveConnectionsRecently() { int ret = 0; List<ConnectionContext> tmpConnections = this.connections; for (ConnectionContext conn : tmpConnections) { if (conn.isActiveRecently()) { ret++; } } return ret; }
3.68
hadoop_IdentityTransformer_isShortUserName
/** * Internal method to identify if owner name returned by the ADLS backend is short name or not. * If name contains "@", this code assumes that whatever comes after '@' is domain name and ignores it. * @param owner * @return */ private boolean isShortUserName(String owner) { return (owner != null) && !owner.contains(AT); }
3.68
hbase_HFileBlock_serialize
// Cacheable implementation @Override public void serialize(ByteBuffer destination, boolean includeNextBlockMetadata) { this.bufWithoutChecksum.get(destination, 0, getSerializedLength() - BLOCK_METADATA_SPACE); destination = addMetaData(destination, includeNextBlockMetadata); // Make it ready for reading. flip sets position to zero and limit to current position which // is what we want if we do not want to serialize the block plus checksums if present plus // metadata. destination.flip(); }
3.68
rocketmq-connect_KafkaSinkValueConverter_convertKafkaValue
/** * convert value * * @param targetSchema * @param originalValue * @return */ private Object convertKafkaValue(Schema targetSchema, Object originalValue) { if (targetSchema == null) { if (originalValue == null) { return null; } return originalValue; } switch (targetSchema.type()) { case INT8: case INT16: case INT32: case INT64: case FLOAT32: case FLOAT64: case BOOLEAN: case STRING: case BYTES: return originalValue; case STRUCT: Struct toStruct = new Struct(targetSchema); if (originalValue != null) { convertStructValue(toStruct, (org.apache.kafka.connect.data.Struct) originalValue); } return toStruct; case ARRAY: List<Object> array = (List<Object>) originalValue; List<Object> newArray = new ArrayList<>(); array.forEach(item -> { newArray.add(convertKafkaValue(targetSchema.valueSchema(), item)); }); return newArray; case MAP: Map mapData = (Map) originalValue; Map newMapData = new ConcurrentHashMap(); mapData.forEach((k, v) -> { newMapData.put( convertKafkaValue(targetSchema.keySchema(), k), convertKafkaValue(targetSchema.valueSchema(), v) ); }); return newMapData; default: throw new RuntimeException(" Type not supported: {}" + targetSchema.type()); } }
3.68
rocketmq-connect_WorkerSinkTask_consumeFromOffset
/** * consume fro offset * * @param messageQueue * @param taskConfig */ public long consumeFromOffset(MessageQueue messageQueue, ConnectKeyValue taskConfig) { //-1 when started long offset = consumer.getOffsetStore().readOffset(messageQueue, ReadOffsetType.READ_FROM_MEMORY); if (0 > offset) { //query from broker offset = consumer.getOffsetStore().readOffset(messageQueue, ReadOffsetType.READ_FROM_STORE); } if (offset < 0) { String consumeFromWhere = taskConfig.getString(ConnectorConfig.CONSUME_FROM_WHERE); if (StringUtils.isBlank(consumeFromWhere)) { consumeFromWhere = ConsumeFromWhere.CONSUME_FROM_LAST_OFFSET.name(); } try { switch (ConsumeFromWhere.valueOf(consumeFromWhere)) { case CONSUME_FROM_LAST_OFFSET: consumer.seekToEnd(messageQueue); break; case CONSUME_FROM_FIRST_OFFSET: consumer.seekToBegin(messageQueue); break; default: break; } } catch (MQClientException e) { throw new ConnectException(e); } } log.info("Consume {} from {}", messageQueue, offset); return offset < 0 ? 0 : offset; }
3.68
hbase_MemStoreCompactor_releaseResources
/** * ---------------------------------------------------------------------- Reset the interruption * indicator and clear the pointers in order to allow good garbage collection */ private void releaseResources() { isInterrupted.set(false); versionedList = null; }
3.68
flink_OptimizerNode_setParallelism
/** * Sets the parallelism for this optimizer node. The parallelism denotes how many parallel * instances of the operator will be spawned during the execution. * * @param parallelism The parallelism to set. If this value is {@link * ExecutionConfig#PARALLELISM_DEFAULT} then the system will take the default number of * parallel instances. * @throws IllegalArgumentException If the parallelism is smaller than one. */ public void setParallelism(int parallelism) { if (parallelism < 1 && parallelism != ExecutionConfig.PARALLELISM_DEFAULT) { throw new IllegalArgumentException("Parallelism of " + parallelism + " is invalid."); } this.parallelism = parallelism; }
3.68
hmily_JavaBeanBinder_getSupplier
/** * Gets supplier. * * @param target the target * @return the supplier */ @SuppressWarnings("unchecked") BeanSupplier<T> getSupplier(final BindData<T> target) { return new BeanSupplier<>(() -> { T instance = null; if (target.getValue() != null) { instance = target.getValue().get(); } if (instance == null) { try { instance = (T) this.type.newInstance(); } catch (InstantiationException | IllegalAccessException e) { throw new ConfigException(e); } } return instance; }); }
3.68
morf_AbstractSqlDialectTest_testMergeSimple
/** * Tests a simple merge. */ @Test public void testMergeSimple() { TableReference foo = new TableReference("foo").as("foo"); TableReference somewhere = new TableReference("somewhere"); SelectStatement sourceStmt = new SelectStatement(somewhere.field("newId").as("id"), somewhere.field("newBar").as("bar")).from(somewhere).alias("somewhere"); MergeStatement stmt = new MergeStatement().into(foo).tableUniqueKey(foo.field("id")).from(sourceStmt); assertEquals("Select scripts are not the same", expectedMergeSimple(), testDialect.convertStatementToSQL(stmt)); }
3.68
hudi_BaseHoodieWriteClient_clean
/** * Triggers clean for the table. This refers to Clean up any stale/old files/data lying around (either on file storage or index storage) based on the * * configurations and CleaningPolicy used. * @param skipLocking if this is triggered by another parent transaction, locking can be skipped. * @return instance of {@link HoodieCleanMetadata}. */ @Deprecated public HoodieCleanMetadata clean(boolean skipLocking) { return clean(createNewInstantTime()); }
3.68
flink_SqlFunctionUtils_log2
/** Returns the logarithm of "a" with base 2. */ public static double log2(double x) { return Math.log(x) / Math.log(2); }
3.68
pulsar_ModularLoadManagerImpl_updateLoadBalancingMetrics
/** * As any broker, update System Resource Usage Percentage. * * @param systemResourceUsage */ private void updateLoadBalancingMetrics(final SystemResourceUsage systemResourceUsage) { List<Metrics> metrics = new ArrayList<>(); Map<String, String> dimensions = new HashMap<>(); dimensions.put("broker", pulsar.getAdvertisedAddress()); dimensions.put("metric", "loadBalancing"); Metrics m = Metrics.create(dimensions); m.put("brk_lb_cpu_usage", systemResourceUsage.getCpu().percentUsage()); m.put("brk_lb_memory_usage", systemResourceUsage.getMemory().percentUsage()); m.put("brk_lb_directMemory_usage", systemResourceUsage.getDirectMemory().percentUsage()); m.put("brk_lb_bandwidth_in_usage", systemResourceUsage.getBandwidthIn().percentUsage()); m.put("brk_lb_bandwidth_out_usage", systemResourceUsage.getBandwidthOut().percentUsage()); metrics.add(m); this.loadBalancingMetrics.set(metrics); }
3.68
hudi_BaseHoodieClient_finalizeWrite
/** * Finalize Write operation. * * @param table HoodieTable * @param instantTime Instant Time * @param stats Hoodie Write Stat */ protected void finalizeWrite(HoodieTable table, String instantTime, List<HoodieWriteStat> stats) { try { final Timer.Context finalizeCtx = metrics.getFinalizeCtx(); table.finalizeWrite(context, instantTime, stats); if (finalizeCtx != null) { Option<Long> durationInMs = Option.of(metrics.getDurationInMs(finalizeCtx.stop())); durationInMs.ifPresent(duration -> { LOG.info("Finalize write elapsed time (milliseconds): " + duration); metrics.updateFinalizeWriteMetrics(duration, stats.size()); }); } } catch (HoodieIOException ioe) { throw new HoodieCommitException("Failed to complete commit " + instantTime + " due to finalize errors.", ioe); } }
3.68
zxing_MatrixUtil_embedBasicPatterns
// Embed basic patterns. On success, modify the matrix and return true. // The basic patterns are: // - Position detection patterns // - Timing patterns // - Dark dot at the left bottom corner // - Position adjustment patterns, if need be static void embedBasicPatterns(Version version, ByteMatrix matrix) throws WriterException { // Let's get started with embedding big squares at corners. embedPositionDetectionPatternsAndSeparators(matrix); // Then, embed the dark dot at the left bottom corner. embedDarkDotAtLeftBottomCorner(matrix); // Position adjustment patterns appear if version >= 2. maybeEmbedPositionAdjustmentPatterns(version, matrix); // Timing patterns should be embedded after position adj. patterns. embedTimingPatterns(matrix); }
3.68
flink_AccumulatorHelper_mergeSingle
/** Workaround method for type safety. */ private static <V, R extends Serializable> Accumulator<V, R> mergeSingle( Accumulator<?, ?> target, Accumulator<?, ?> toMerge) { @SuppressWarnings("unchecked") Accumulator<V, R> typedTarget = (Accumulator<V, R>) target; @SuppressWarnings("unchecked") Accumulator<V, R> typedToMerge = (Accumulator<V, R>) toMerge; typedTarget.merge(typedToMerge); return typedTarget; }
3.68
hadoop_TimelinePutResponse_getEntityType
/** * Get the entity type * * @return the entity type */ @XmlElement(name = "entitytype") public String getEntityType() { return entityType; }
3.68
pulsar_WebSocketWebResource_clientAppId
/** * Gets a caller id (IP + role). * * @return the web service caller identification */ public String clientAppId() { if (isBlank(clientId)) { try { String authMethodName = httpRequest.getHeader(AuthenticationFilter.PULSAR_AUTH_METHOD_NAME); if (authMethodName != null && service().getAuthenticationService().getAuthenticationProvider(authMethodName) != null) { authenticationDataSource = service().getAuthenticationService() .getAuthenticationProvider(authMethodName) .newHttpAuthState(httpRequest).getAuthDataSource(); clientId = service().getAuthenticationService().authenticateHttpRequest( httpRequest, authenticationDataSource); } else { clientId = service().getAuthenticationService().authenticateHttpRequest(httpRequest); authenticationDataSource = new AuthenticationDataHttps(httpRequest); } } catch (AuthenticationException e) { if (service().getConfig().isAuthenticationEnabled()) { throw new RestException(Status.UNAUTHORIZED, "Failed to get clientId from request"); } } if (isBlank(clientId) && service().getConfig().isAuthenticationEnabled()) { throw new RestException(Status.UNAUTHORIZED, "Failed to get auth data from the request"); } } return clientId; }
3.68
druid_ListDG_getPosition
/* * 返回ch位置 */ private int getPosition(Object ch) { for (int i = 0; i < mVexs.size(); i++) { if (mVexs.get(i).data == ch) { return i; } } return -1; }
3.68
hbase_CompositeImmutableSegment_getCellsCount
/** Returns number of cells in segment */ @Override public int getCellsCount() { int result = 0; for (ImmutableSegment s : segments) { result += s.getCellsCount(); } return result; }
3.68
morf_SpreadsheetDataSetProducer_countHeadings
/** * Counts the number of headings in the given sheet. This excludes any * heading related to translations. * * @param sheet Worksheet to count headings in * @param headingRowIndex Index of the heading row * @return the number of headings */ private int countHeadings(final Sheet sheet, final int headingRowIndex) { for (int i = 0; i < sheet.getRow(headingRowIndex).length; i++) { // A blank heading is the start of additional headings such as the // translation heading if (sheet.getCell(i, headingRowIndex).getContents().length() == 0) { return i; } } return sheet.getRow(headingRowIndex).length; }
3.68
dubbo_RpcServiceContext_setLocalAddress
/** * set local address. * * @param address * @return context */ @Override public RpcServiceContext setLocalAddress(InetSocketAddress address) { this.localAddress = address; return this; }
3.68
flink_CompactFileUtils_doCompact
/** * Do Compaction: - Target file exists, do nothing. - Can do compaction: - Single file, do * atomic renaming, there are optimizations for FileSystem. - Multiple file, do reading and * writing. */ public static @Nullable <T> Path doCompact( FileSystem fileSystem, String partition, List<Path> paths, Path target, Configuration config, CompactReader.Factory<T> readerFactory, CompactWriter.Factory<T> writerFactory) throws IOException { if (paths.size() == 0) { return null; } Map<Path, Long> inputMap = new HashMap<>(); for (Path path : paths) { inputMap.put(path, fileSystem.getFileStatus(path).getLen()); } if (fileSystem.exists(target)) { return target; } checkExist(fileSystem, paths); long startMillis = System.currentTimeMillis(); boolean success = false; if (paths.size() == 1) { // optimizer for single file success = doSingleFileMove(fileSystem, paths.get(0), target); } if (!success) { doMultiFilesCompact( partition, paths, target, config, fileSystem, readerFactory, writerFactory); } Map<Path, Long> targetMap = new HashMap<>(); targetMap.put(target, fileSystem.getFileStatus(target).getLen()); double costSeconds = ((double) (System.currentTimeMillis() - startMillis)) / 1000; LOG.info( "Compaction time cost is '{}S', output per file as following format: name=size(byte), target file is '{}', input files are '{}'", costSeconds, targetMap, inputMap); return target; }
3.68
hbase_RegionInfo_createRegionName
/** * Make a region name of passed parameters. * @param startKey Can be null * @param id Region id (Usually timestamp from when region was created). * @param newFormat should we create the region name in the new format * @return Region name made of passed tableName, startKey, id and replicaId */ static byte[] createRegionName(final TableName tableName, final byte[] startKey, final byte[] id, final int replicaId, boolean newFormat) { int len = tableName.getName().length + 2 + id.length + (startKey == null ? 0 : startKey.length); if (newFormat) { len += MD5_HEX_LENGTH + 2; } byte[] replicaIdBytes = null; // Special casing: replicaId is only appended if replicaId is greater than // 0. This is because all regions in meta would have to be migrated to the new // name otherwise if (replicaId > 0) { // use string representation for replica id replicaIdBytes = Bytes.toBytes(String.format(REPLICA_ID_FORMAT, replicaId)); len += 1 + replicaIdBytes.length; } byte[] b = new byte[len]; int offset = tableName.getName().length; System.arraycopy(tableName.getName(), 0, b, 0, offset); b[offset++] = HConstants.DELIMITER; if (startKey != null && startKey.length > 0) { System.arraycopy(startKey, 0, b, offset, startKey.length); offset += startKey.length; } b[offset++] = HConstants.DELIMITER; System.arraycopy(id, 0, b, offset, id.length); offset += id.length; if (replicaIdBytes != null) { b[offset++] = REPLICA_ID_DELIMITER; System.arraycopy(replicaIdBytes, 0, b, offset, replicaIdBytes.length); offset += replicaIdBytes.length; } if (newFormat) { // // Encoded name should be built into the region name. // // Use the region name thus far (namely, <tablename>,<startKey>,<id>_<replicaId>) // to compute a MD5 hash to be used as the encoded name, and append // it to the byte buffer. // String md5Hash = MD5Hash.getMD5AsHex(b, 0, offset); byte[] md5HashBytes = Bytes.toBytes(md5Hash); if (md5HashBytes.length != MD5_HEX_LENGTH) { System.out.println( "MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH + "; Got=" + md5HashBytes.length); } // now append the bytes '.<encodedName>.' to the end b[offset++] = ENC_SEPARATOR; System.arraycopy(md5HashBytes, 0, b, offset, MD5_HEX_LENGTH); offset += MD5_HEX_LENGTH; b[offset] = ENC_SEPARATOR; } return b; }
3.68
hadoop_SingleFilePerBlockCache_deleteBlockFileAndEvictCache
/** * Delete cache file as part of the block cache LRU eviction. * * @param elementToPurge Block entry to evict. */ private void deleteBlockFileAndEvictCache(Entry elementToPurge) { try (DurationTracker ignored = trackerFactory.trackDuration(STREAM_FILE_CACHE_EVICTION)) { boolean lockAcquired = elementToPurge.takeLock(Entry.LockType.WRITE, PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT, PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT_UNIT); if (!lockAcquired) { LOG.error("Cache file {} deletion would not be attempted as write lock could not" + " be acquired within {} {}", elementToPurge.path, PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT, PrefetchConstants.PREFETCH_WRITE_LOCK_TIMEOUT_UNIT); } else { try { if (Files.deleteIfExists(elementToPurge.path)) { entryListSize--; prefetchingStatistics.blockRemovedFromFileCache(); blocks.remove(elementToPurge.blockNumber); prefetchingStatistics.blockEvictedFromFileCache(); } } catch (IOException e) { LOG.warn("Failed to delete cache file {}", elementToPurge.path, e); } finally { elementToPurge.releaseLock(Entry.LockType.WRITE); } } } }
3.68
flink_DeltaIterationBase_setInitialWorkset
/** * Sets the given input as the initial workset. * * @param input The contract to set as the initial workset. */ public void setInitialWorkset(Operator<WT> input) { setSecondInput(input); }
3.68
hbase_HRegionServer_isDataFileSystemOk
/** Returns {@code true} when the data file system is available, {@code false} otherwise. */ boolean isDataFileSystemOk() { return this.dataFsOk; }
3.68
framework_VaadinFinderLocatorStrategy_filterMatches
/** * Go through a list of potentially matching components, modifying that list * until all elements that remain in that list match the complete list of * predicates. * * @param potentialMatches * a list of component connectors. Will be changed. * @param predicates * an immutable list of predicates * @return filtered list of component connectors. */ private List<ComponentConnector> filterMatches( List<ComponentConnector> potentialMatches, List<SelectorPredicate> predicates) { for (SelectorPredicate p : predicates) { if (p.getIndex() > -1) { try { ComponentConnector v = potentialMatches.get(p.getIndex()); potentialMatches.clear(); potentialMatches.add(v); } catch (IndexOutOfBoundsException e) { potentialMatches.clear(); } continue; } for (int i = 0, l = potentialMatches.size(); i < l; ++i) { String propData = getPropertyValue(potentialMatches.get(i), p.getName()); if ((p.isWildcard() && propData == null) || (!p.isWildcard() && !p.getValue().equals(propData))) { potentialMatches.remove(i); --l; --i; } } } return eliminateDuplicates(potentialMatches); }
3.68
flink_DeletePushDownUtils_getDynamicTableSink
/** * Get the {@link DynamicTableSink} for the table to be modified. Return Optional.empty() if it * can't get the {@link DynamicTableSink}. */ public static Optional<DynamicTableSink> getDynamicTableSink( ContextResolvedTable contextResolvedTable, LogicalTableModify tableModify, CatalogManager catalogManager) { final FlinkContext context = ShortcutUtils.unwrapContext(tableModify.getCluster()); CatalogBaseTable catalogBaseTable = contextResolvedTable.getTable(); // only consider DynamicTableSink if (catalogBaseTable instanceof CatalogTable) { ResolvedCatalogTable resolvedTable = contextResolvedTable.getResolvedTable(); Optional<Catalog> optionalCatalog = contextResolvedTable.getCatalog(); ObjectIdentifier objectIdentifier = contextResolvedTable.getIdentifier(); boolean isTemporary = contextResolvedTable.isTemporary(); // only consider the CatalogTable that doesn't use legacy connector sink option if (!contextResolvedTable.isAnonymous() && !TableFactoryUtil.isLegacyConnectorOptions( catalogManager .getCatalog(objectIdentifier.getCatalogName()) .orElse(null), context.getTableConfig(), !context.isBatchMode(), objectIdentifier, resolvedTable, isTemporary)) { // create table dynamic table sink DynamicTableSink tableSink = ExecutableOperationUtils.createDynamicTableSink( optionalCatalog.orElse(null), () -> context.getModuleManager() .getFactory((Module::getTableSinkFactory)), objectIdentifier, resolvedTable, Collections.emptyMap(), context.getTableConfig(), context.getClassLoader(), contextResolvedTable.isTemporary()); return Optional.of(tableSink); } } return Optional.empty(); }
3.68
morf_WithMetaDataAdapter_open
/** * {@inheritDoc} * * @see org.alfasoftware.morf.dataset.DataSetProducer#open() */ @Override public void open() { super.open(); schemaProducer.open(); }
3.68
hbase_HRegion_shouldFlush
/** * Should the memstore be flushed now */ boolean shouldFlush(final StringBuilder whyFlush) { whyFlush.setLength(0); // This is a rough measure. if ( this.maxFlushedSeqId > 0 && (this.maxFlushedSeqId + this.flushPerChanges < this.mvcc.getReadPoint()) ) { whyFlush.append("more than max edits, " + this.flushPerChanges + ", since last flush"); return true; } long modifiedFlushCheckInterval = flushCheckInterval; if ( getRegionInfo().getTable().isSystemTable() && getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID ) { modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; } if (modifiedFlushCheckInterval <= 0) { // disabled return false; } long now = EnvironmentEdgeManager.currentTime(); // if we flushed in the recent past, we don't need to do again now if ((now - getEarliestFlushTimeForAllStores() < modifiedFlushCheckInterval)) { return false; } // since we didn't flush in the recent past, flush now if certain conditions // are met. Return true on first such memstore hit. for (HStore s : stores.values()) { if (s.timeOfOldestEdit() < now - modifiedFlushCheckInterval) { // we have an old enough edit in the memstore, flush whyFlush.append(s.toString() + " has an old edit so flush to free WALs"); return true; } } return false; }
3.68
flink_DebeziumJsonFormatFactory_validateEncodingFormatOptions
/** Validator for debezium encoding format. */ private static void validateEncodingFormatOptions(ReadableConfig tableOptions) { JsonFormatOptionsUtil.validateEncodingFormatOptions(tableOptions); // validator for {@link SCHEMA_INCLUDE} if (tableOptions.get(SCHEMA_INCLUDE)) { throw new ValidationException( String.format( "Debezium JSON serialization doesn't support '%s.%s' option been set to true.", IDENTIFIER, SCHEMA_INCLUDE.key())); } }
3.68
hadoop_ApplicationMaster_printUsage
/** * Helper function to print usage. * * @param opts arsed command line options */ private void printUsage(Options opts) { new HelpFormatter().printHelp("ApplicationMaster", opts); }
3.68
hadoop_SingleFilePerBlockCache_releaseLock
/** * Release the read or write lock. * * @param lockType type of the lock. */ private void releaseLock(LockType lockType) { if (LockType.READ == lockType) { lock.readLock().unlock(); } else if (LockType.WRITE == lockType) { lock.writeLock().unlock(); } }
3.68
hadoop_ConnectionPool_newConnection
/** * Creates a proxy wrapper for a client NN connection. Each proxy contains * context for a single user/security context. To maximize throughput it is * recommended to use multiple connection per user+server, allowing multiple * writes and reads to be dispatched in parallel. * * @param conf Configuration for the connection. * @param nnAddress Address of server supporting the ClientProtocol. * @param ugi User context. * @param proto Interface of the protocol. * @param enableMultiSocket Enable multiple socket or not. * @param socketIndex Index for FederationConnectionId. * @param alignmentContext Client alignment context. * @param <T> Input type T. * @return proto for the target ClientProtocol that contains the user's * security context. * @throws IOException If it cannot be created. */ protected static <T> ConnectionContext newConnection(Configuration conf, String nnAddress, UserGroupInformation ugi, Class<T> proto, boolean enableMultiSocket, int socketIndex, AlignmentContext alignmentContext) throws IOException { if (!PROTO_MAP.containsKey(proto)) { String msg = "Unsupported protocol for connection to NameNode: " + ((proto != null) ? proto.getName() : "null"); LOG.error(msg); throw new IllegalStateException(msg); } ProtoImpl classes = PROTO_MAP.get(proto); RPC.setProtocolEngine(conf, classes.protoPb, ProtobufRpcEngine2.class); final RetryPolicy defaultPolicy = RetryUtils.getDefaultRetryPolicy(conf, HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT, HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY, HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT, HdfsConstants.SAFEMODE_EXCEPTION_CLASS_NAME); SocketFactory factory = SocketFactory.getDefault(); if (UserGroupInformation.isSecurityEnabled()) { SaslRpcServer.init(conf); } InetSocketAddress socket = NetUtils.createSocketAddr(nnAddress); final long version = RPC.getProtocolVersion(classes.protoPb); Object proxy; if (enableMultiSocket) { FederationConnectionId connectionId = new FederationConnectionId( socket, classes.protoPb, ugi, RPC.getRpcTimeout(conf), defaultPolicy, conf, socketIndex); proxy = RPC.getProtocolProxy(classes.protoPb, version, connectionId, conf, factory, alignmentContext).getProxy(); } else { proxy = RPC.getProtocolProxy(classes.protoPb, version, socket, ugi, conf, factory, RPC.getRpcTimeout(conf), defaultPolicy, null, alignmentContext).getProxy(); } T client = newProtoClient(proto, classes, proxy); Text dtService = SecurityUtil.buildTokenService(socket); ProxyAndInfo<T> clientProxy = new ProxyAndInfo<T>(client, dtService, socket); return new ConnectionContext(clientProxy, conf); }
3.68
flink_Pattern_begin
/** * Starts a new pattern sequence. The provided pattern is the initial pattern of the new * sequence. * * @param group the pattern to begin with * @return the first pattern of a pattern sequence */ public static <T, F extends T> GroupPattern<T, F> begin(Pattern<T, F> group) { return new GroupPattern<>( null, group, ConsumingStrategy.STRICT, AfterMatchSkipStrategy.noSkip()); }
3.68
hbase_MultiTableSnapshotInputFormatImpl_setSnapshotToScans
/** * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY}) */ public void setSnapshotToScans(Configuration conf, Map<String, Collection<Scan>> snapshotScans) throws IOException { // flatten out snapshotScans for serialization to the job conf List<Map.Entry<String, String>> snapshotToSerializedScans = Lists.newArrayList(); for (Map.Entry<String, Collection<Scan>> entry : snapshotScans.entrySet()) { String snapshotName = entry.getKey(); Collection<Scan> scans = entry.getValue(); // serialize all scans and map them to the appropriate snapshot for (Scan scan : scans) { snapshotToSerializedScans.add(new AbstractMap.SimpleImmutableEntry<>(snapshotName, TableMapReduceUtil.convertScanToString(scan))); } } ConfigurationUtil.setKeyValues(conf, SNAPSHOT_TO_SCANS_KEY, snapshotToSerializedScans); }
3.68
hbase_ServerManager_persistRegionLastFlushedSequenceIds
/** * Persist last flushed sequence id of each region to HDFS * @throws IOException if persit to HDFS fails */ private void persistRegionLastFlushedSequenceIds() throws IOException { if (isFlushSeqIdPersistInProgress) { return; } isFlushSeqIdPersistInProgress = true; try { Configuration conf = master.getConfiguration(); Path rootDir = CommonFSUtils.getRootDir(conf); Path lastFlushedSeqIdPath = new Path(rootDir, LAST_FLUSHED_SEQ_ID_FILE); FileSystem fs = FileSystem.get(conf); if (fs.exists(lastFlushedSeqIdPath)) { LOG.info("Rewriting .lastflushedseqids file at: " + lastFlushedSeqIdPath); if (!fs.delete(lastFlushedSeqIdPath, false)) { throw new IOException("Unable to remove existing " + lastFlushedSeqIdPath); } } else { LOG.info("Writing .lastflushedseqids file at: " + lastFlushedSeqIdPath); } FSDataOutputStream out = fs.create(lastFlushedSeqIdPath); FlushedSequenceId.Builder flushedSequenceIdBuilder = FlushedSequenceId.newBuilder(); try { for (Entry<byte[], Long> entry : flushedSequenceIdByRegion.entrySet()) { FlushedRegionSequenceId.Builder flushedRegionSequenceIdBuilder = FlushedRegionSequenceId.newBuilder(); flushedRegionSequenceIdBuilder.setRegionEncodedName(ByteString.copyFrom(entry.getKey())); flushedRegionSequenceIdBuilder.setSeqId(entry.getValue()); ConcurrentNavigableMap<byte[], Long> storeSeqIds = storeFlushedSequenceIdsByRegion.get(entry.getKey()); if (storeSeqIds != null) { for (Entry<byte[], Long> store : storeSeqIds.entrySet()) { FlushedStoreSequenceId.Builder flushedStoreSequenceIdBuilder = FlushedStoreSequenceId.newBuilder(); flushedStoreSequenceIdBuilder.setFamily(ByteString.copyFrom(store.getKey())); flushedStoreSequenceIdBuilder.setSeqId(store.getValue()); flushedRegionSequenceIdBuilder.addStores(flushedStoreSequenceIdBuilder); } } flushedSequenceIdBuilder.addRegionSequenceId(flushedRegionSequenceIdBuilder); } flushedSequenceIdBuilder.build().writeDelimitedTo(out); } finally { if (out != null) { out.close(); } } } finally { isFlushSeqIdPersistInProgress = false; } }
3.68
hibernate-validator_GetDeclaredMethod_andMakeAccessible
/** * Before using this method, you need to check the {@code HibernateValidatorPermission.ACCESS_PRIVATE_MEMBERS} * permission against the security manager. */ public static GetDeclaredMethod andMakeAccessible(Class<?> clazz, String methodName, Class<?>... parameterTypes) { return new GetDeclaredMethod( clazz, methodName, true, parameterTypes ); }
3.68
hbase_BucketEntry_access
/** * Block has been accessed. Update its local access counter. */ void access(long accessCounter) { this.accessCounter = accessCounter; if (this.priority == BlockPriority.SINGLE) { this.priority = BlockPriority.MULTI; } }
3.68
flink_Tuple11_equals
/** * Deep equality for tuples by calling equals() on the tuple members. * * @param o the object checked for equality * @return true if this is equal to o. */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof Tuple11)) { return false; } @SuppressWarnings("rawtypes") Tuple11 tuple = (Tuple11) o; if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) { return false; } if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) { return false; } if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) { return false; } if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) { return false; } if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) { return false; } if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) { return false; } if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) { return false; } if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) { return false; } if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) { return false; } if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) { return false; } if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) { return false; } return true; }
3.68
graphhopper_PrepareLandmarks_setMinimumNodes
/** * @see LandmarkStorage#setMinimumNodes(int) */ public void setMinimumNodes(int nodes) { if (nodes < 2) throw new IllegalArgumentException("minimum node count must be at least 2"); lms.setMinimumNodes(nodes); }
3.68
hadoop_StageConfig_withWriterQueueCapacity
/** * Set writer queue capacity. * @param value new value * @return the builder */ public StageConfig withWriterQueueCapacity(final int value) { writerQueueCapacity = value; return this; }
3.68
hudi_HoodieCombineHiveInputFormat_getStartOffsets
/** * Returns an array containing the startoffsets of the files in the split. */ @Override public long[] getStartOffsets() { return inputSplitShim.getStartOffsets(); }
3.68
flink_DateTimeUtils_monthly
/** Whether this is in the YEAR-TO-MONTH family of intervals. */ public boolean monthly() { return ordinal() <= MONTH.ordinal(); }
3.68
hadoop_TypedBytesInput_readRawLong
/** * Reads the raw bytes following a <code>Type.LONG</code> code. * @return the obtained bytes sequence * @throws IOException */ public byte[] readRawLong() throws IOException { byte[] bytes = new byte[9]; bytes[0] = (byte) Type.LONG.code; in.readFully(bytes, 1, 8); return bytes; }
3.68
dubbo_AbstractClusterInvoker_reselect
/** * Reselect, use invokers not in `selected` first, if all invokers are in `selected`, * just pick an available one using loadbalance policy. * * @param loadbalance load balance policy * @param invocation invocation * @param invokers invoker candidates * @param selected exclude selected invokers or not * @param availableCheck check invoker available if true * @return the reselect result to do invoke * @throws RpcException exception */ private Invoker<T> reselect( LoadBalance loadbalance, Invocation invocation, List<Invoker<T>> invokers, List<Invoker<T>> selected, boolean availableCheck) throws RpcException { // Allocating one in advance, this list is certain to be used. List<Invoker<T>> reselectInvokers = new ArrayList<>(Math.min(invokers.size(), reselectCount)); // 1. Try picking some invokers not in `selected`. // 1.1. If all selectable invokers' size is smaller than reselectCount, just add all // 1.2. If all selectable invokers' size is greater than reselectCount, randomly select reselectCount. // The result size of invokers might smaller than reselectCount due to disAvailable or de-duplication // (might be zero). // This means there is probable that reselectInvokers is empty however all invoker list may contain // available invokers. // Use reselectCount can reduce retry times if invokers' size is huge, which may lead to long time // hang up. if (reselectCount >= invokers.size()) { for (Invoker<T> invoker : invokers) { // check if available if (availableCheck && !invoker.isAvailable()) { // add to invalidate invoker invalidateInvoker(invoker); continue; } if (selected == null || !selected.contains(invoker)) { reselectInvokers.add(invoker); } } } else { for (int i = 0; i < reselectCount; i++) { // select one randomly Invoker<T> invoker = invokers.get(ThreadLocalRandom.current().nextInt(invokers.size())); // check if available if (availableCheck && !invoker.isAvailable()) { // add to invalidate invoker invalidateInvoker(invoker); continue; } // de-duplication if (selected == null || !selected.contains(invoker) || !reselectInvokers.contains(invoker)) { reselectInvokers.add(invoker); } } } // 2. Use loadBalance to select one (all the reselectInvokers are available) if (!reselectInvokers.isEmpty()) { return loadbalance.select(reselectInvokers, getUrl(), invocation); } // 3. reselectInvokers is empty. Unable to find at least one available invoker. // Re-check all the selected invokers. If some in the selected list are available, add to reselectInvokers. if (selected != null) { for (Invoker<T> invoker : selected) { if ((invoker.isAvailable()) // available first && !reselectInvokers.contains(invoker)) { reselectInvokers.add(invoker); } } } // 4. If reselectInvokers is not empty after re-check. // Pick an available invoker using loadBalance policy if (!reselectInvokers.isEmpty()) { return loadbalance.select(reselectInvokers, getUrl(), invocation); } // 5. No invoker match, return null. return null; }
3.68
hudi_TableServicePipeline_add
/** * Add a table service task to the end of table service pipe. The task will be executed in FIFO manner. * * @param task table service task to run in pipeline. */ public void add(TableServiceTask task) { tableServiceTasks.add(task); }
3.68
hbase_LockProcedure_unlock
// Can be called before procedure gets scheduled, in which case, the execute() will finish // immediately and release the underlying locks. public void unlock(final MasterProcedureEnv env) { unlock.set(true); locked.set(false); // Maybe timeout already awakened the event and the procedure has finished. synchronized (event) { if (!event.isReady() && suspended) { setState(ProcedureProtos.ProcedureState.RUNNABLE); event.wake(env.getProcedureScheduler()); suspended = false; } } }
3.68
flink_AsyncSinkWriter_createNextAvailableBatch
/** * Creates the next batch of request entries while respecting the {@code maxBatchSize} and * {@code maxBatchSizeInBytes}. Also adds these to the metrics counters. */ private List<RequestEntryT> createNextAvailableBatch(RequestInfo requestInfo) { List<RequestEntryT> batch = new ArrayList<>(requestInfo.getBatchSize()); long batchSizeBytes = 0; for (int i = 0; i < requestInfo.getBatchSize(); i++) { long requestEntrySize = bufferedRequestEntries.peek().getSize(); if (batchSizeBytes + requestEntrySize > maxBatchSizeInBytes) { break; } RequestEntryWrapper<RequestEntryT> elem = bufferedRequestEntries.remove(); batch.add(elem.getRequestEntry()); bufferedRequestEntriesTotalSizeInBytes -= requestEntrySize; batchSizeBytes += requestEntrySize; } numRecordsOutCounter.inc(batch.size()); numBytesOutCounter.inc(batchSizeBytes); return batch; }
3.68
hadoop_CallableSupplier_submit
/** * Submit a callable into a completable future. * RTEs are rethrown. * Non RTEs are caught and wrapped; IOExceptions to * {@code RuntimeIOException} instances. * @param executor executor. * @param auditSpan audit span (or null) * @param call call to invoke * @param <T> type * @return the future to wait for */ @SuppressWarnings("unchecked") public static <T> CompletableFuture<T> submit( final Executor executor, final AuditSpan auditSpan, final Callable<T> call) { return CompletableFuture.supplyAsync( new CallableSupplier<T>(auditSpan, call), executor); }
3.68
pulsar_Topics_createShadowTopic
/** * Create a new shadow topic, see #{@link #createShadowTopic(String, String, Map)} for details. */ default void createShadowTopic(String shadowTopic, String sourceTopic) throws PulsarAdminException { createShadowTopic(shadowTopic, sourceTopic, null); }
3.68
morf_OracleDialect_getSqlForRandomString
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForRandomString(org.alfasoftware.morf.sql.element.Function) */ @Override protected String getSqlForRandomString(Function function) { return String.format("dbms_random.string('A', %s)",getSqlFrom(function.getArguments().get(0))); }
3.68
rocketmq-connect_ExpressionBuilder_setQuoteIdentifiers
/** * Set when this expression builder should quote identifiers, such as table and column names. * * @param method the quoting method; may be null if the default method * ({@link QuoteMethod#ALWAYS always}) should be used * @return this expression builder; never null */ public ExpressionBuilder setQuoteIdentifiers(QuoteMethod method) { this.quoteSqlIdentifiers = method != null ? method : DEFAULT_QUOTE_METHOD; return this; }
3.68
flink_Pattern_allowCombinations
/** * Applicable only to {@link Quantifier#looping(ConsumingStrategy)} and {@link * Quantifier#times(ConsumingStrategy)} patterns, this option allows more flexibility to the * matching events. * * <p>If {@code allowCombinations()} is not applied for a pattern {@code * A.oneOrMore().followedBy(B)} and a sequence of events {@code A1 A2 B} appears, this will * generate patterns: {@code A1 B} and {@code A1 A2 B}. If this method is applied, we will have * {@code A1 B}, {@code A2 B} and {@code A1 A2 B}. * * @return The same pattern with the updated quantifier. * * @throws MalformedPatternException if the quantifier is not applicable to this pattern. */ public Pattern<T, F> allowCombinations() { quantifier.combinations(); return this; }
3.68
cron-utils_TimeNode_getNearestForwardValue
/** * We return same reference value if matches or next one if does not match. * Then we start applying shifts. * This way we ensure same value is returned if no shift is requested. * * @param reference - reference value * @param shiftsToApply - shifts to apply * @return NearestValue instance, never null. Holds information on nearest (forward) value and shifts performed. */ @VisibleForTesting NearestValue getNearestForwardValue(final int reference, int shiftsToApply) { final List<Integer> temporaryValues = new ArrayList<>(this.values); int index = 0; boolean foundGreater = false; final AtomicInteger shift = new AtomicInteger(0); if (!temporaryValues.contains(reference)) { for (final Integer value : temporaryValues) { if (value > reference) { index = temporaryValues.indexOf(value); shiftsToApply--;//we just moved a position! foundGreater = true; break; } } if (!foundGreater) { shift.incrementAndGet(); } } else { index = temporaryValues.indexOf(reference); } int value = temporaryValues.get(index); for (int j = 0; j < shiftsToApply; j++) { value = getValueFromList(temporaryValues, index + 1, shift); index = temporaryValues.indexOf(value); } return new NearestValue(value, shift.get()); }
3.68