name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_PartialUpdateAvroPayload_overwriteField | /**
* Return true if value equals defaultValue otherwise false.
*/
public Boolean overwriteField(Object value, Object defaultValue) {
return value == null;
} | 3.68 |
dubbo_AbstractJSONImpl_checkStringList | /**
* Casts a list of unchecked JSON values to a list of String. If the given list
* contains a value that is not a String, throws an exception.
*/
@SuppressWarnings("unchecked")
@Override
public List<String> checkStringList(List<?> rawList) {
assert rawList != null;
for (int i = 0; i < rawList.size(); i++) {
if (!(rawList.get(i) instanceof String)) {
throw new ClassCastException(
String.format("value '%s' for idx %d in '%s' is not string", rawList.get(i), i, rawList));
}
}
return (List<String>) rawList;
} | 3.68 |
querydsl_ComparableExpression_nullif | /**
* Create a {@code nullif(this, other)} expression
*
* @param other
* @return nullif(this, other)
*/
@Override
public ComparableExpression<T> nullif(T other) {
return nullif(ConstantImpl.create(other));
} | 3.68 |
hadoop_NMClientAsync_onContainerReInitializeError | /**
* Error Callback for container re-initialization request.
*
* @param containerId the Id of the container to be Re-Initialized.
* @param t a Throwable.
*/
public void onContainerReInitializeError(ContainerId containerId,
Throwable t) {} | 3.68 |
hbase_OrderedBytes_isEncodedValue | /**
* Returns true when {@code src} appears to be positioned an encoded value, false otherwise.
*/
public static boolean isEncodedValue(PositionedByteRange src) {
return isNull(src) || isNumeric(src) || isFixedInt8(src) || isFixedInt16(src)
|| isFixedInt32(src) || isFixedInt64(src) || isFixedFloat32(src) || isFixedFloat64(src)
|| isText(src) || isBlobCopy(src) || isBlobVar(src);
} | 3.68 |
zxing_HybridBinarizer_calculateThresholdForBlock | /**
* For each block in the image, calculate the average black point using a 5x5 grid
* of the blocks around it. Also handles the corner cases (fractional blocks are computed based
* on the last pixels in the row/column which are also used in the previous block).
*/
private static void calculateThresholdForBlock(byte[] luminances,
int subWidth,
int subHeight,
int width,
int height,
int[][] blackPoints,
BitMatrix matrix) {
int maxYOffset = height - BLOCK_SIZE;
int maxXOffset = width - BLOCK_SIZE;
for (int y = 0; y < subHeight; y++) {
int yoffset = y << BLOCK_SIZE_POWER;
if (yoffset > maxYOffset) {
yoffset = maxYOffset;
}
int top = cap(y, subHeight - 3);
for (int x = 0; x < subWidth; x++) {
int xoffset = x << BLOCK_SIZE_POWER;
if (xoffset > maxXOffset) {
xoffset = maxXOffset;
}
int left = cap(x, subWidth - 3);
int sum = 0;
for (int z = -2; z <= 2; z++) {
int[] blackRow = blackPoints[top + z];
sum += blackRow[left - 2] + blackRow[left - 1] + blackRow[left] + blackRow[left + 1] + blackRow[left + 2];
}
int average = sum / 25;
thresholdBlock(luminances, xoffset, yoffset, average, width, matrix);
}
}
} | 3.68 |
flink_ExecutionConfig_getClosureCleanerLevel | /** Returns the configured {@link ClosureCleanerLevel}. */
public ClosureCleanerLevel getClosureCleanerLevel() {
return configuration.get(PipelineOptions.CLOSURE_CLEANER_LEVEL);
} | 3.68 |
querydsl_DateTimeExpression_nullif | /**
* Create a {@code nullif(this, other)} expression
*
* @param other
* @return nullif(this, other)
*/
@Override
public DateTimeExpression<T> nullif(T other) {
return nullif(ConstantImpl.create(other));
} | 3.68 |
flink_HiveParserBaseSemanticAnalyzer_getUnescapedUnqualifiedTableName | /**
* Get the unqualified name from a table node. This method works for table names qualified with
* their schema (e.g., "catalog.db.table") and table names without schema qualification. In both
* cases, it returns the table name without the schema.
*
* @param node the table node
* @return the table name without schema qualification (i.e., if name is "catalog.db.table" or
* "table", returns "table")
*/
public static String getUnescapedUnqualifiedTableName(HiveParserASTNode node)
throws SemanticException {
assert node.getChildCount() <= 3;
node = (HiveParserASTNode) node.getChild(node.getChildCount() - 1);
return getUnescapedName(node);
} | 3.68 |
hbase_HttpServer_setName | /**
* @see #setAppDir(String)
* @deprecated Since 0.99.0. Use {@link #setAppDir(String)} instead.
*/
@Deprecated
public Builder setName(String name) {
this.name = name;
return this;
} | 3.68 |
hadoop_WritableName_getName | /**
* Return the name for a class.
* Default is {@link Class#getName()}.
* @param writableClass input writableClass.
* @return name for a class.
*/
public static synchronized String getName(Class<?> writableClass) {
String name = CLASS_TO_NAME.get(writableClass);
if (name != null)
return name;
return writableClass.getName();
} | 3.68 |
hbase_StoreFileTrackerValidationUtils_checkForModifyTable | /**
* Pre check when modifying a table.
* <p/>
* The basic idea is when you want to change the store file tracker implementation, you should use
* {@link Trackers#MIGRATION} first and then change to the destination store file tracker
* implementation.
* <p/>
* There are several rules:
* <ul>
* <li>For newly added family, you should not use {@link Trackers#MIGRATION}.</li>
* <li>For modifying a family:
* <ul>
* <li>If old tracker is {@link Trackers#MIGRATION}, then:
* <ul>
* <li>The new tracker is also {@link Trackers#MIGRATION}, then they must have the same src and
* dst tracker.</li>
* <li>The new tracker is not {@link Trackers#MIGRATION}, then the new tracker must be the dst
* tracker of the old tracker.</li>
* </ul>
* </li>
* <li>If the old tracker is not {@link Trackers#MIGRATION}, then:
* <ul>
* <li>If the new tracker is {@link Trackers#MIGRATION}, then the old tracker must be the src
* tracker of the new tracker.</li>
* <li>If the new tracker is not {@link Trackers#MIGRATION}, then the new tracker must be the same
* with old tracker.</li>
* </ul>
* </li>
* </ul>
* </li>
* </ul>
* @throws IOException when there are check errors, the upper layer should fail the
* {@code ModifyTableProcedure}.
*/
public static void checkForModifyTable(Configuration conf, TableDescriptor oldTable,
TableDescriptor newTable, boolean isTableDisabled) throws IOException {
for (ColumnFamilyDescriptor newFamily : newTable.getColumnFamilies()) {
ColumnFamilyDescriptor oldFamily = oldTable.getColumnFamily(newFamily.getName());
if (oldFamily == null) {
checkForNewFamily(conf, newTable, newFamily);
continue;
}
Configuration oldConf = StoreUtils.createStoreConfiguration(conf, oldTable, oldFamily);
Configuration newConf = StoreUtils.createStoreConfiguration(conf, newTable, newFamily);
Class<? extends StoreFileTracker> oldTracker =
StoreFileTrackerFactory.getTrackerClass(oldConf);
Class<? extends StoreFileTracker> newTracker =
StoreFileTrackerFactory.getTrackerClass(newConf);
if (MigrationStoreFileTracker.class.isAssignableFrom(oldTracker)) {
Class<? extends StoreFileTracker> oldSrcTracker =
MigrationStoreFileTracker.getSrcTrackerClass(oldConf);
Class<? extends StoreFileTracker> oldDstTracker =
MigrationStoreFileTracker.getDstTrackerClass(oldConf);
if (oldTracker.equals(newTracker)) {
// confirm that we have the same src tracker and dst tracker
Class<? extends StoreFileTracker> newSrcTracker =
MigrationStoreFileTracker.getSrcTrackerClass(newConf);
if (!oldSrcTracker.equals(newSrcTracker)) {
throw new DoNotRetryIOException("The src tracker has been changed from "
+ StoreFileTrackerFactory.getStoreFileTrackerName(oldSrcTracker) + " to "
+ StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family "
+ newFamily.getNameAsString() + " of table " + newTable.getTableName());
}
Class<? extends StoreFileTracker> newDstTracker =
MigrationStoreFileTracker.getDstTrackerClass(newConf);
if (!oldDstTracker.equals(newDstTracker)) {
throw new DoNotRetryIOException("The dst tracker has been changed from "
+ StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " to "
+ StoreFileTrackerFactory.getStoreFileTrackerName(newDstTracker) + " for family "
+ newFamily.getNameAsString() + " of table " + newTable.getTableName());
}
} else {
// do not allow changing from MIGRATION to its dst SFT implementation while the table is
// disabled. We need to open the HRegion to migrate the tracking information while the SFT
// implementation is MIGRATION, otherwise we may loss data. See HBASE-26611 for more
// details.
if (isTableDisabled) {
throw new TableNotEnabledException(
"Should not change store file tracker implementation from "
+ StoreFileTrackerFactory.Trackers.MIGRATION.name() + " while table "
+ newTable.getTableName() + " is disabled");
}
// we can only change to the dst tracker
if (!newTracker.equals(oldDstTracker)) {
throw new DoNotRetryIOException("Should migrate tracker to "
+ StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " but got "
+ StoreFileTrackerFactory.getStoreFileTrackerName(newTracker) + " for family "
+ newFamily.getNameAsString() + " of table " + newTable.getTableName());
}
}
} else {
if (!oldTracker.equals(newTracker)) {
// can only change to MigrationStoreFileTracker and the src tracker should be the old
// tracker
if (!MigrationStoreFileTracker.class.isAssignableFrom(newTracker)) {
throw new DoNotRetryIOException(
"Should change to " + Trackers.MIGRATION + " first when migrating from "
+ StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family "
+ newFamily.getNameAsString() + " of table " + newTable.getTableName());
}
// here we do not check whether the table is disabled, as after changing to MIGRATION, we
// still rely on the src SFT implementation to actually load the store files, so there
// will be no data loss problem.
Class<? extends StoreFileTracker> newSrcTracker =
MigrationStoreFileTracker.getSrcTrackerClass(newConf);
if (!oldTracker.equals(newSrcTracker)) {
throw new DoNotRetryIOException("Should use src tracker "
+ StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " first but got "
+ StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker)
+ " when migrating from "
+ StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family "
+ newFamily.getNameAsString() + " of table " + newTable.getTableName());
}
Class<? extends StoreFileTracker> newDstTracker =
MigrationStoreFileTracker.getDstTrackerClass(newConf);
// the src and dst tracker should not be the same
if (newSrcTracker.equals(newDstTracker)) {
throw new DoNotRetryIOException("The src tracker and dst tracker are both "
+ StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family "
+ newFamily.getNameAsString() + " of table " + newTable.getTableName());
}
}
}
}
} | 3.68 |
hmily_HmilyLogicalOperator_valueFrom | /**
* Get logical operator value from text.
*
* @param text text
* @return logical operator value
*/
public static Optional<HmilyLogicalOperator> valueFrom(final String text) {
return Arrays.stream(values()).filter(each -> each.texts.contains(text)).findFirst();
} | 3.68 |
hadoop_TypedBytesInput_readRaw | /**
* Reads a typed bytes sequence. The first byte is interpreted as a type code,
* and then the right number of subsequent bytes are read depending on the
* obtained type.
*
* @return the obtained typed bytes sequence or null when the end of the file
* is reached
* @throws IOException
*/
public byte[] readRaw() throws IOException {
int code = -1;
try {
code = in.readUnsignedByte();
} catch (EOFException eof) {
return null;
}
if (code == Type.BYTES.code) {
return readRawBytes();
} else if (code == Type.BYTE.code) {
return readRawByte();
} else if (code == Type.BOOL.code) {
return readRawBool();
} else if (code == Type.INT.code) {
return readRawInt();
} else if (code == Type.LONG.code) {
return readRawLong();
} else if (code == Type.FLOAT.code) {
return readRawFloat();
} else if (code == Type.DOUBLE.code) {
return readRawDouble();
} else if (code == Type.STRING.code) {
return readRawString();
} else if (code == Type.VECTOR.code) {
return readRawVector();
} else if (code == Type.LIST.code) {
return readRawList();
} else if (code == Type.MAP.code) {
return readRawMap();
} else if (code == Type.MARKER.code) {
return null;
} else if (50 <= code && code <= 200) { // application-specific typecodes
return readRawBytes(code);
} else {
throw new RuntimeException("unknown type");
}
} | 3.68 |
morf_UpgradePathFinder_determinePath | /**
* Determines the upgrade path between two schemas. If no path can be found an
* {@link IllegalStateException} is thrown.
*
* @param current The current schema to be upgraded.
* @param target The target schema to upgrade to.
* @param exceptionRegexes Regular exceptions for the table exceptions.
* @return An ordered list of upgrade steps between the two schemas.
* @throws NoUpgradePathExistsException if no upgrade path exists between the database schema and the application schema.
*/
public SchemaChangeSequence determinePath(Schema current, Schema target, Collection<String> exceptionRegexes) throws NoUpgradePathExistsException {
SchemaChangeSequence schemaChangeSequence = getSchemaChangeSequence();
// We have changes to make. Apply them against the current schema to see whether they get us the right position
Schema trialUpgradedSchema = schemaChangeSequence.applyToSchema(current);
if (schemasNotMatch(target, trialUpgradedSchema, APPLICATION_SCHEMA, UPGRADED_SCHEMA, exceptionRegexes)) {
throw new NoUpgradePathExistsException();
}
// Now reverse-apply those changes to see whether they get us back to where we started
Schema reversal = schemaChangeSequence.applyInReverseToSchema(trialUpgradedSchema);
if (schemasNotMatch(reversal, current, REVERSED_SCHEMA, CURRENT_SCHEMA, exceptionRegexes)) {
throw new IllegalStateException("Upgrade reversals are invalid");
}
return schemaChangeSequence;
} | 3.68 |
hbase_QuotaCache_getNamespaceLimiter | /**
* Returns the limiter associated to the specified namespace.
* @param namespace the namespace to limit
* @return the limiter associated to the specified namespace
*/
public QuotaLimiter getNamespaceLimiter(final String namespace) {
return getQuotaState(this.namespaceQuotaCache, namespace).getGlobalLimiter();
} | 3.68 |
hadoop_RenameOperation_completeActiveCopiesAndDeleteSources | /**
* Block waiting for ay active copies to finish
* then delete all queued keys + paths to delete.
* @param reason reason for logs
* @throws IOException failure.
*/
@Retries.RetryTranslated
private void completeActiveCopiesAndDeleteSources(String reason)
throws IOException {
completeActiveCopies(reason);
removeSourceObjects(
keysToDelete
);
// now reset the lists.
keysToDelete.clear();
} | 3.68 |
flink_SchedulerBase_computeVertexParallelismStore | /**
* Compute the {@link VertexParallelismStore} for all vertices of a given job graph, which will
* set defaults and ensure that the returned store contains valid parallelisms.
*
* @param jobGraph the job graph to retrieve vertices from
* @return the computed parallelism store
*/
public static VertexParallelismStore computeVertexParallelismStore(JobGraph jobGraph) {
return computeVertexParallelismStore(jobGraph.getVertices());
} | 3.68 |
morf_UpgradeTestHelper_instantiateAndValidateUpgradeSteps | /**
* Turn the list of classes into a list of objects.
*/
private List<UpgradeStep> instantiateAndValidateUpgradeSteps(Iterable<Class<? extends UpgradeStep>> stepClasses) {
return Streams.stream(stepClasses)
.map(stepClass -> {
UpgradeStep upgradeStep;
try {
Constructor<? extends UpgradeStep> constructor = stepClass.getDeclaredConstructor();
// Permit package-protected classes
constructor.setAccessible(true);
upgradeStep = constructor.newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
validateUpgradeStep(upgradeStep);
return upgradeStep;
})
.collect(Collectors.toList());
} | 3.68 |
hbase_OrderedBytes_normalize | /**
* Strip all trailing zeros to ensure that no digit will be zero and round using our default
* context to ensure precision doesn't exceed max allowed. From Phoenix's {@code NumberUtil}.
* @return new {@link BigDecimal} instance
*/
static BigDecimal normalize(BigDecimal val) {
return null == val ? null : val.stripTrailingZeros().round(DEFAULT_MATH_CONTEXT);
} | 3.68 |
hudi_HoodieTableConfig_storeProperties | /**
* Write the properties to the given output stream and return the table checksum.
*
* @param props - properties to be written
* @param outputStream - output stream to which properties will be written
* @return return the table checksum
* @throws IOException
*/
private static String storeProperties(Properties props, FSDataOutputStream outputStream) throws IOException {
final String checksum;
if (isValidChecksum(props)) {
checksum = props.getProperty(TABLE_CHECKSUM.key());
props.store(outputStream, "Updated at " + Instant.now());
} else {
Properties propsWithChecksum = getOrderedPropertiesWithTableChecksum(props);
propsWithChecksum.store(outputStream, "Properties saved on " + Instant.now());
checksum = propsWithChecksum.getProperty(TABLE_CHECKSUM.key());
props.setProperty(TABLE_CHECKSUM.key(), checksum);
}
return checksum;
} | 3.68 |
hudi_HoodieInputFormatUtils_filterInstantsTimeline | /**
* Filter any specific instants that we do not want to process.
* example timeline:
* <p>
* t0 -> create bucket1.parquet
* t1 -> create and append updates bucket1.log
* t2 -> request compaction
* t3 -> create bucket2.parquet
* <p>
* if compaction at t2 takes a long time, incremental readers on RO tables can move to t3 and would skip updates in t1
* <p>
* To workaround this problem, we want to stop returning data belonging to commits > t2.
* After compaction is complete, incremental reader would see updates in t2, t3, so on.
*
* @param timeline
* @return
*/
public static HoodieDefaultTimeline filterInstantsTimeline(HoodieDefaultTimeline timeline) {
HoodieDefaultTimeline commitsAndCompactionTimeline = timeline.getWriteTimeline();
Option<HoodieInstant> pendingCompactionInstant = commitsAndCompactionTimeline
.filterPendingCompactionTimeline().firstInstant();
if (pendingCompactionInstant.isPresent()) {
HoodieDefaultTimeline instantsTimeline = commitsAndCompactionTimeline
.findInstantsBefore(pendingCompactionInstant.get().getTimestamp());
int numCommitsFilteredByCompaction = commitsAndCompactionTimeline.getCommitsTimeline().countInstants()
- instantsTimeline.getCommitsTimeline().countInstants();
LOG.info("Earliest pending compaction instant is: " + pendingCompactionInstant.get().getTimestamp()
+ " skipping " + numCommitsFilteredByCompaction + " commits");
return instantsTimeline;
} else {
return timeline;
}
} | 3.68 |
hbase_ZKAuthentication_isSecureZooKeeper | /**
* Returns {@code true} when secure authentication is enabled (whether
* {@code hbase.security.authentication} is set to "{@code kerberos}").
*/
public static boolean isSecureZooKeeper(Configuration conf) {
// Detection for embedded HBase client with jaas configuration
// defined for third party programs.
try {
javax.security.auth.login.Configuration testConfig =
javax.security.auth.login.Configuration.getConfiguration();
if (
testConfig.getAppConfigurationEntry("Client") == null
&& testConfig
.getAppConfigurationEntry(JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME) == null
&& testConfig
.getAppConfigurationEntry(JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME) == null
&& conf.get(HConstants.ZK_CLIENT_KERBEROS_PRINCIPAL) == null
&& conf.get(HConstants.ZK_SERVER_KERBEROS_PRINCIPAL) == null
) {
return false;
}
} catch (Exception e) {
// No Jaas configuration defined.
return false;
}
// Master & RSs uses hbase.zookeeper.client.*
return "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication"));
} | 3.68 |
hudi_HiveSchemaUtil_convertParquetSchemaToHiveSchema | /**
* Returns equivalent Hive table schema read from a parquet file.
*
* @param messageType : Parquet Schema
* @return : Hive Table schema read from parquet file MAP[String,String]
*/
public static Map<String, String> convertParquetSchemaToHiveSchema(MessageType messageType, boolean supportTimestamp) throws IOException {
return convertMapSchemaToHiveSchema(parquetSchemaToMapSchema(messageType, supportTimestamp, true));
} | 3.68 |
hudi_HoodieRealtimeRecordReaderUtils_addPartitionFields | /**
* Hive implementation of ParquetRecordReader results in partition columns not present in the original parquet file to
* also be part of the projected schema. Hive expects the record reader implementation to return the row in its
* entirety (with un-projected column having null values). As we use writerSchema for this, make sure writer schema
* also includes partition columns
*
* @param schema Schema to be changed
*/
public static Schema addPartitionFields(Schema schema, List<String> partitioningFields) {
final Set<String> firstLevelFieldNames =
schema.getFields().stream().map(Schema.Field::name).map(String::toLowerCase).collect(Collectors.toSet());
List<String> fieldsToAdd = partitioningFields.stream().map(String::toLowerCase)
.filter(x -> !firstLevelFieldNames.contains(x)).collect(Collectors.toList());
return appendNullSchemaFields(schema, fieldsToAdd);
} | 3.68 |
querydsl_MetaDataExporter_setConfiguration | /**
* Override the configuration
*
* @param configuration override configuration for custom type mappings etc
*/
public void setConfiguration(Configuration configuration) {
module.bind(Configuration.class, configuration);
} | 3.68 |
morf_AbstractSqlDialectTest_testAlterPrimaryKeyColumnCompositeKey | /**
* Test changing a column which is part of a composite primary key.
*/
@Test
public void testAlterPrimaryKeyColumnCompositeKey() {
testAlterTableColumn(COMPOSITE_PRIMARY_KEY_TABLE, AlterationType.ALTER, getColumn(COMPOSITE_PRIMARY_KEY_TABLE, SECOND_PRIMARY_KEY),
column(SECOND_PRIMARY_KEY, DataType.STRING, 5).primaryKey(), expectedAlterPrimaryKeyColumnCompositeKeyStatements());
} | 3.68 |
dubbo_ScopeClusterInvoker_invoke | /**
* Checks if the current ScopeClusterInvoker is exported to the local JVM and invokes the corresponding Invoker.
* If it's not exported locally, then it delegates the invocation to the original Invoker.
*
* @param invocation the invocation to be performed
* @return the result of the invocation
* @throws RpcException if there was an error during the invocation
*/
@Override
public Result invoke(Invocation invocation) throws RpcException {
// When broadcasting, it should be called remotely.
if (isBroadcast()) {
if (logger.isDebugEnabled()) {
logger.debug("Performing broadcast call for method: " + RpcUtils.getMethodName(invocation)
+ " of service: " + getUrl().getServiceKey());
}
return invoker.invoke(invocation);
}
if (peerFlag) {
if (logger.isDebugEnabled()) {
logger.debug("Performing point-to-point call for method: " + RpcUtils.getMethodName(invocation)
+ " of service: " + getUrl().getServiceKey());
}
// If it's a point-to-point direct connection, invoke the original Invoker
return invoker.invoke(invocation);
}
if (isInjvmExported()) {
if (logger.isDebugEnabled()) {
logger.debug("Performing local JVM call for method: " + RpcUtils.getMethodName(invocation)
+ " of service: " + getUrl().getServiceKey());
}
// If it's exported to the local JVM, invoke the corresponding Invoker
return injvmInvoker.invoke(invocation);
}
if (logger.isDebugEnabled()) {
logger.debug("Performing remote call for method: " + RpcUtils.getMethodName(invocation) + " of service: "
+ getUrl().getServiceKey());
}
// Otherwise, delegate the invocation to the original Invoker
return invoker.invoke(invocation);
} | 3.68 |
querydsl_ComparableExpression_lt | /**
* Create a {@code this < right} expression
*
* @param right rhs of the comparison
* @return this < right
* @see java.lang.Comparable#compareTo(Object)
*/
public BooleanExpression lt(Expression<T> right) {
return Expressions.booleanOperation(Ops.LT, mixin, right);
} | 3.68 |
dubbo_BasicJsonWriter_println | /**
* Write a new line.
*/
public IndentingWriter println() {
String separator = System.lineSeparator();
try {
this.out.write(separator.toCharArray(), 0, separator.length());
} catch (IOException ex) {
throw new IllegalStateException(ex);
}
this.prependIndent = true;
return this;
} | 3.68 |
hbase_AtomicUtils_updateMax | /**
* Updates a AtomicLong which is supposed to maintain the maximum values. This method is not
* synchronized but is thread-safe.
*/
public static void updateMax(AtomicLong max, long value) {
while (true) {
long cur = max.get();
if (value <= cur) {
break;
}
if (max.compareAndSet(cur, value)) {
break;
}
}
} | 3.68 |
hadoop_RPCUtil_unwrapAndThrowException | /**
* Utility method that unwraps and returns appropriate exceptions.
*
* @param se
* ServiceException
* @return An instance of the actual exception, which will be a subclass of
* {@link YarnException} or {@link IOException}
* @throws IOException io error occur.
* @throws YarnException exceptions from yarn servers.
*/
public static Void unwrapAndThrowException(ServiceException se)
throws IOException, YarnException {
Throwable cause = se.getCause();
if (cause == null) {
// SE generated by the RPC layer itself.
throw new IOException(se);
} else {
if (cause instanceof RemoteException) {
RemoteException re = (RemoteException) cause;
Class<?> realClass = null;
try {
realClass = Class.forName(re.getClassName());
} catch (ClassNotFoundException cnf) {
// Assume this to be a new exception type added to YARN. This isn't
// absolutely correct since the RPC layer could add an exception as
// well.
throw instantiateYarnException(YarnException.class, re);
}
if (YarnException.class.isAssignableFrom(realClass)) {
throw instantiateYarnException(
realClass.asSubclass(YarnException.class), re);
} else if (IOException.class.isAssignableFrom(realClass)) {
throw instantiateIOException(realClass.asSubclass(IOException.class),
re);
} else if (RuntimeException.class.isAssignableFrom(realClass)) {
throw instantiateRuntimeException(
realClass.asSubclass(RuntimeException.class), re);
} else {
throw re;
}
// RemoteException contains useful information as against the
// java.lang.reflect exceptions.
} else if (cause instanceof IOException) {
// RPC Client exception.
throw (IOException) cause;
} else if (cause instanceof RuntimeException) {
// RPC RuntimeException
throw (RuntimeException) cause;
} else {
// Should not be generated.
throw new IOException(se);
}
}
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectHavingScript | /**
* Tests a select with a "having" clause.
*/
@Test
public void testSelectHavingScript() {
SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD))
.from(new TableReference(ALTERNATE_TABLE))
.groupBy(new FieldReference(STRING_FIELD))
.having(eq(new FieldReference("blah"), "X"));
String value = varCharCast("'X'");
String expectedSql = "SELECT stringField FROM " + tableName(ALTERNATE_TABLE) + " GROUP BY stringField HAVING (blah = " + stringLiteralPrefix() + value+")";
assertEquals("Select with having clause", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
flink_Tuple8_equals | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple8)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple8 tuple = (Tuple8) o;
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
return true;
} | 3.68 |
framework_Embedded_removeClickListener | /**
* Remove a click listener from the component. The listener should earlier
* have been added using {@link #addClickListener(ClickListener)}.
*
* @param listener
* The listener to remove
*
* @deprecated As of 8.0, replaced by {@link Registration#remove()} in the
* registration object returned from
* {@link #addClickListener(ClickListener)}.
*/
@Deprecated
public void removeClickListener(ClickListener listener) {
removeListener(EventId.CLICK_EVENT_IDENTIFIER, ClickEvent.class,
listener);
} | 3.68 |
hadoop_FederationStateStoreHeartbeat_updateClusterState | /**
* Get the current cluster state as a JSON string representation of the
* {@link ClusterMetricsInfo}.
*/
private void updateClusterState() {
try {
// get the current state
currentClusterState.getBuffer().setLength(0);
ClusterMetricsInfo clusterMetricsInfo = new ClusterMetricsInfo(rs);
marshaller.marshallToJSON(clusterMetricsInfo, currentClusterState);
capability = currentClusterState.toString();
} catch (Exception e) {
LOG.warn("Exception while trying to generate cluster state,"
+ " so reverting to last know state.", e);
}
} | 3.68 |
pulsar_ServiceUrlProvider_close | /**
* Close the resource that the provider allocated.
*
*/
@Override
default void close() {
// do nothing
} | 3.68 |
hadoop_BlockRecoveryCommand_add | /**
* Add recovering block to the command.
*/
public void add(RecoveringBlock block) {
recoveringBlocks.add(block);
} | 3.68 |
dubbo_SlidingWindow_list | /**
* Get valid pane list for entire sliding window at the specified time.
* The list will only contain "valid" panes.
*
* @param timeMillis the specified time.
* @return valid pane list for entire sliding window.
*/
public List<Pane<T>> list(long timeMillis) {
if (timeMillis < 0) {
return new ArrayList<>();
}
List<Pane<T>> result = new ArrayList<>(paneCount);
for (int idx = 0; idx < paneCount; idx++) {
Pane<T> pane = referenceArray.get(idx);
if (pane == null || isPaneDeprecated(timeMillis, pane)) {
continue;
}
result.add(pane);
}
return result;
} | 3.68 |
morf_TableReference_isTemporary | /**
* Indicates whether the table is temporary.
*
* @return true if the table is a temporary table.
*/
public boolean isTemporary() {
return temporary;
} | 3.68 |
hmily_MetricsReporter_register | /**
* Register.
*
* @param metricsRegister metrics register
*/
public static void register(final MetricsRegister metricsRegister) {
MetricsReporter.metricsRegister = metricsRegister;
} | 3.68 |
hbase_WALEdit_isCompactionMarker | /**
* Returns true if the given cell is a serialized {@link CompactionDescriptor}
* @see #getCompaction(Cell)
*/
public static boolean isCompactionMarker(Cell cell) {
return CellUtil.matchingColumn(cell, METAFAMILY, COMPACTION);
} | 3.68 |
framework_VTabsheet_hideTabs | /**
* Makes tab bar invisible.
*
* @since 7.2
*/
public void hideTabs() {
tb.setVisible(false);
addStyleName(CLASSNAME + "-hidetabs");
} | 3.68 |
hbase_RpcExecutor_getHandler | /**
* Override if providing alternate Handler implementation.
*/
protected RpcHandler getHandler(final String name, final double handlerFailureThreshhold,
final int handlerCount, final BlockingQueue<CallRunner> q,
final AtomicInteger activeHandlerCount, final AtomicInteger failedHandlerCount,
final Abortable abortable) {
return new RpcHandler(name, handlerFailureThreshhold, handlerCount, q, activeHandlerCount,
failedHandlerCount, abortable);
} | 3.68 |
morf_Oracle_formatJdbcUrl | /**
*
* @see org.alfasoftware.morf.jdbc.DatabaseType#formatJdbcUrl(org.alfasoftware.morf.jdbc.JdbcUrlElements)
*/
@Override
public String formatJdbcUrl(JdbcUrlElements jdbcUrlElements) {
return "jdbc:oracle:thin:@" + jdbcUrlElements.getHostName() + (jdbcUrlElements.getPort() == 0 ? "" : ":" + jdbcUrlElements.getPort()) + "/" + jdbcUrlElements.getInstanceName();
} | 3.68 |
morf_SqlScriptExecutorProvider_executionEnd | /**
* @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.SqlScriptVisitor#executionEnd()
*/
@Override
public void executionEnd() {
// Defaults to no-op
} | 3.68 |
hbase_StochasticLoadBalancer_updateStochasticCosts | /**
* update costs to JMX
*/
private void updateStochasticCosts(TableName tableName, double overall, double[] subCosts) {
if (tableName == null) {
return;
}
// check if the metricsBalancer is MetricsStochasticBalancer before casting
if (metricsBalancer instanceof MetricsStochasticBalancer) {
MetricsStochasticBalancer balancer = (MetricsStochasticBalancer) metricsBalancer;
// overall cost
balancer.updateStochasticCost(tableName.getNameAsString(), OVERALL_COST_FUNCTION_NAME,
"Overall cost", overall);
// each cost function
for (int i = 0; i < costFunctions.size(); i++) {
CostFunction costFunction = costFunctions.get(i);
String costFunctionName = costFunction.getClass().getSimpleName();
double costPercent = (overall == 0) ? 0 : (subCosts[i] / overall);
// TODO: cost function may need a specific description
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName,
"The percent of " + costFunctionName, costPercent);
}
}
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectMultipleNestedWhereScript | /**
* Tests a select with multiple nested where clauses.
*/
@Test
public void testSelectMultipleNestedWhereScript() {
SelectStatement stmt = new SelectStatement().from(new TableReference(TEST_TABLE))
.where(and(
eq(new FieldReference(STRING_FIELD), "A0001"),
or(
greaterThan(new FieldReference(INT_FIELD), 20080101),
lessThan(new FieldReference(DATE_FIELD), 20090101)
)
));
String value = varCharCast("'A0001'");
String expectedSql = "SELECT * FROM " + tableName(TEST_TABLE) + " WHERE ((stringField = " + stringLiteralPrefix() + value+") AND ((intField > 20080101) OR (dateField < 20090101)))";
assertEquals("Select with nested where clause", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
hudi_HoodieBloomIndex_loadColumnRangesFromMetaIndex | /**
* Load the column stats index as BloomIndexFileInfo for all the involved files in the partition.
*
* @param partitions - List of partitions for which column stats need to be loaded
* @param context - Engine context
* @param hoodieTable - Hoodie table
* @return List of partition and file column range info pairs
*/
protected List<Pair<String, BloomIndexFileInfo>> loadColumnRangesFromMetaIndex(
List<String> partitions, final HoodieEngineContext context, final HoodieTable<?, ?, ?, ?> hoodieTable) {
// also obtain file ranges, if range pruning is enabled
context.setJobStatus(this.getClass().getName(), "Load meta index key ranges for file slices: " + config.getTableName());
String keyField = HoodieRecord.HoodieMetadataField.RECORD_KEY_METADATA_FIELD.getFieldName();
List<Pair<String, HoodieBaseFile>> baseFilesForAllPartitions = HoodieIndexUtils.getLatestBaseFilesForAllPartitions(partitions, context, hoodieTable);
// Partition and file name pairs
List<Pair<String, String>> partitionFileNameList = new ArrayList<>(baseFilesForAllPartitions.size());
Map<Pair<String, String>, String> partitionAndFileNameToFileId = new HashMap<>(baseFilesForAllPartitions.size(), 1);
baseFilesForAllPartitions.forEach(pair -> {
Pair<String, String> partitionAndFileName = Pair.of(pair.getKey(), pair.getValue().getFileName());
partitionFileNameList.add(partitionAndFileName);
partitionAndFileNameToFileId.put(partitionAndFileName, pair.getValue().getFileId());
});
if (partitionFileNameList.isEmpty()) {
return Collections.emptyList();
}
Map<Pair<String, String>, HoodieMetadataColumnStats> fileToColumnStatsMap =
hoodieTable.getMetadataTable().getColumnStats(partitionFileNameList, keyField);
List<Pair<String, BloomIndexFileInfo>> result = new ArrayList<>(fileToColumnStatsMap.size());
for (Map.Entry<Pair<String, String>, HoodieMetadataColumnStats> entry : fileToColumnStatsMap.entrySet()) {
result.add(Pair.of(entry.getKey().getLeft(),
new BloomIndexFileInfo(
partitionAndFileNameToFileId.get(entry.getKey()),
// NOTE: Here we assume that the type of the primary key field is string
(String) unwrapAvroValueWrapper(entry.getValue().getMinValue()),
(String) unwrapAvroValueWrapper(entry.getValue().getMaxValue())
)));
}
return result;
} | 3.68 |
dubbo_AbstractClusterInvoker_setRemote | /**
* Set the remoteAddress and remoteApplicationName so that filter can get them.
*
*/
private void setRemote(Invoker<?> invoker, Invocation invocation) {
invocation.addInvokedInvoker(invoker);
RpcServiceContext serviceContext = RpcContext.getServiceContext();
serviceContext.setRemoteAddress(invoker.getUrl().toInetSocketAddress());
serviceContext.setRemoteApplicationName(invoker.getUrl().getRemoteApplication());
} | 3.68 |
framework_WebBrowser_getCurrentDate | /**
* Returns the current date and time of the browser. This will not be
* entirely accurate due to varying network latencies, but should provide a
* close-enough value for most cases. Also note that the returned Date
* object uses servers default time zone, not the clients.
* <p>
* To get the actual date and time shown in the end users computer, you can
* do something like:
*
* <pre>
* WebBrowser browser = ...;
* SimpleTimeZone timeZone = new SimpleTimeZone(browser.getTimezoneOffset(), "Fake client time zone");
* DateFormat format = DateFormat.getDateTimeInstance();
* format.setTimeZone(timeZone);
* myLabel.setValue(format.format(browser.getCurrentDate()));
* </pre>
*
* @return the current date and time of the browser.
* @see #isDSTInEffect()
* @see #getDSTSavings()
* @see #getTimezoneOffset()
*/
public Date getCurrentDate() {
return new Date(new Date().getTime() + clientServerTimeDelta);
} | 3.68 |
hbase_CommonFSUtils_getWrongWALRegionDir | /**
* For backward compatibility with HBASE-20734, where we store recovered edits in a wrong
* directory without BASE_NAMESPACE_DIR. See HBASE-22617 for more details.
* @deprecated For compatibility, will be removed in 4.0.0.
*/
@Deprecated
public static Path getWrongWALRegionDir(final Configuration conf, final TableName tableName,
final String encodedRegionName) throws IOException {
Path wrongTableDir = new Path(new Path(getWALRootDir(conf), tableName.getNamespaceAsString()),
tableName.getQualifierAsString());
return new Path(wrongTableDir, encodedRegionName);
} | 3.68 |
hbase_Reference_toByteArray | /**
* Use this when writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the
* delimiter, pb reads to EOF which may not be what you want).
* @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
*/
byte[] toByteArray() throws IOException {
return ProtobufUtil.prependPBMagic(convert().toByteArray());
} | 3.68 |
rocketmq-connect_WorkerTask_getState | /**
* get state
*
* @return
*/
public WorkerTaskState getState() {
return this.state.get();
} | 3.68 |
hbase_BitSetNode_getBitmapIndex | // ========================================================================
// Bitmap Helpers
// ========================================================================
private int getBitmapIndex(final long procId) {
return (int) (procId - start);
} | 3.68 |
hadoop_BlockRecoveryCommand_getNewGenerationStamp | /**
* Return the new generation stamp of the block,
* which also plays role of the recovery id.
*/
public long getNewGenerationStamp() {
return newGenerationStamp;
} | 3.68 |
hbase_BloomFilterMetrics_getNegativeResultsCount | /** Returns Current value for bloom negative results count */
public long getNegativeResultsCount() {
return negativeResults.sum();
} | 3.68 |
hudi_AvroSchemaCompatibility_getDescription | /**
* Gets a human readable description of this validation result.
*
* @return a human readable description of this validation result.
*/
public String getDescription() {
return mDescription;
} | 3.68 |
querydsl_JTSGeometryExpressions_asEWKT | /**
* Return a specified ST_Geometry value from Extended Well-Known Text representation (EWKT).
*
* @param expr geometry
* @return EWKT form
*/
public static StringExpression asEWKT(JTSGeometryExpression<?> expr) {
return Expressions.stringOperation(SpatialOps.AS_EWKT, expr);
} | 3.68 |
framework_VAbstractCalendarPanel_getDateField | /**
* Returns the date field which this panel is attached to.
*
* @return the "parent" date field
*/
protected VDateField<R> getDateField() {
return parent;
} | 3.68 |
hadoop_ServiceLauncher_serviceMain | /**
* Varargs version of the entry point for testing and other in-JVM use.
* Hands off to {@link #serviceMain(List)}
* @param args command line arguments.
*/
public static void serviceMain(String... args) {
serviceMain(Arrays.asList(args));
} | 3.68 |
hbase_MetaTableMetrics_getRegionIdFromOp | /**
* Get regionId from Ops such as: get, put, delete.
* @param op such as get, put or delete.
*/
private String getRegionIdFromOp(Row op) {
final String tableRowKey = Bytes.toString(op.getRow());
if (StringUtils.isEmpty(tableRowKey)) {
return null;
}
final String[] splits = tableRowKey.split(",");
return splits.length > 2 ? splits[2] : null;
} | 3.68 |
flink_RpcSerializedValue_getSerializedDataLength | /** Return length of serialized data, zero if no serialized data. */
public int getSerializedDataLength() {
return serializedData == null ? 0 : serializedData.length;
} | 3.68 |
framework_TableConnector_showSavedContextMenu | /**
* Shows a saved row context menu if the row for the context menu is still
* visible. Does nothing if a context menu has not been saved.
*
* @param savedContextMenu
*/
public void showSavedContextMenu(ContextMenuDetails savedContextMenu) {
if (isEnabled() && savedContextMenu != null) {
for (Widget w : getWidget().scrollBody) {
VScrollTableRow row = (VScrollTableRow) w;
if (row.getKey().equals(savedContextMenu.rowKey)) {
row.showContextMenu(savedContextMenu.left,
savedContextMenu.top);
}
}
}
} | 3.68 |
flink_ListViewSerializer_transformLegacySerializerSnapshot | /**
* We need to override this as a {@link LegacySerializerSnapshotTransformer} because in Flink
* 1.6.x and below, this serializer was incorrectly returning directly the snapshot of the
* nested list serializer as its own snapshot.
*
* <p>This method transforms the incorrect list serializer snapshot to be a proper {@link
* ListViewSerializerSnapshot}.
*/
@Override
public <U> TypeSerializerSnapshot<ListView<T>> transformLegacySerializerSnapshot(
TypeSerializerSnapshot<U> legacySnapshot) {
if (legacySnapshot instanceof ListViewSerializerSnapshot) {
return (TypeSerializerSnapshot<ListView<T>>) legacySnapshot;
} else {
throw new UnsupportedOperationException(
legacySnapshot.getClass().getCanonicalName() + " is not supported.");
}
} | 3.68 |
hadoop_CommonCallableSupplier_submit | /**
* Submit a callable into a completable future.
* RTEs are rethrown.
* Non RTEs are caught and wrapped; IOExceptions to
* {@code RuntimeIOException} instances.
* @param executor executor.
* @param call call to invoke
* @param <T> type
* @return the future to wait for
*/
@SuppressWarnings("unchecked")
public static <T> CompletableFuture<T> submit(final Executor executor,
final Callable<T> call) {
return CompletableFuture
.supplyAsync(new CommonCallableSupplier<T>(call), executor);
} | 3.68 |
graphhopper_MinHeapWithUpdate_push | /**
* Adds an element to the heap, the given id must not exceed the size specified in the constructor. Its illegal
* to push the same id twice (unless it was polled/removed before). To update the value of an id contained in the
* heap use the {@link #update} method.
*/
public void push(int id, float value) {
checkIdInRange(id);
if (size == max)
throw new IllegalStateException("Cannot push anymore, the heap is already full. size: " + size);
if (contains(id))
throw new IllegalStateException("Element with id: " + id + " was pushed already, you need to use the update method if you want to change its value");
size++;
tree[size] = id;
positions[id] = size;
vals[size] = value;
percolateUp(size);
} | 3.68 |
hadoop_FSBuilderSupport_getPositiveLong | /**
* Get a long value with resilience to unparseable values.
* Negative values are replaced with the default.
* @param key key to log
* @param defVal default value
* @return long value
*/
public long getPositiveLong(String key, long defVal) {
long l = getLong(key, defVal);
if (l < 0) {
LOG.debug("The option {} has a negative value {}, replacing with the default {}",
key, l, defVal);
l = defVal;
}
return l;
} | 3.68 |
hbase_HtmlQuoting_quoteOutputStream | /**
* Return an output stream that quotes all of the output.
* @param out the stream to write the quoted output to
* @return a new stream that the application show write to
*/
public static OutputStream quoteOutputStream(final OutputStream out) {
return new OutputStream() {
private byte[] data = new byte[1];
@Override
public void write(byte[] data, int off, int len) throws IOException {
quoteHtmlChars(out, data, off, len);
}
@Override
public void write(int b) throws IOException {
data[0] = (byte) b;
quoteHtmlChars(out, data, 0, 1);
}
@Override
public void flush() throws IOException {
out.flush();
}
@Override
public void close() throws IOException {
out.close();
}
};
} | 3.68 |
querydsl_ExpressionUtils_path | /**
* Create a new Path expression
*
* @param type type of expression
* @param metadata path metadata
* @param <T> type of expression
* @return path expression
*/
public static <T> Path<T> path(Class<? extends T> type, PathMetadata metadata) {
return new PathImpl<T>(type, metadata);
} | 3.68 |
MagicPlugin_Base64Coder_decodeLines | /**
* Decodes a byte array from Base64 format and ignores line separators, tabs and blanks.
* CR, LF, Tab and Space characters are ignored in the input data.
* This method is compatible with <code>sun.misc.BASE64Decoder.decodeBuffer(String)</code>.
*
* @param s A Base64 String to be decoded.
* @return An array containing the decoded data bytes.
* @throws IllegalArgumentException If the input is not valid Base64 encoded data.
*/
public static byte[] decodeLines(String s) {
char[] buf = new char[s.length()];
int p = 0;
for (int ip = 0; ip < s.length(); ip++) {
char c = s.charAt(ip);
if (c != ' ' && c != '\r' && c != '\n' && c != '\t')
buf[p++] = c;
}
return decode(buf, 0, p);
} | 3.68 |
framework_VGridLayout_calcColumnUsedSpace | /**
* Calculates column used space
*/
private int calcColumnUsedSpace() {
int usedSpace = 0;
int horizontalSpacing = getHorizontalSpacing();
boolean visibleFound = false;
for (int i = 0; i < minColumnWidths.length; i++) {
if (minColumnWidths[i] > 0 || !hiddenEmptyColumn(i)) {
if (visibleFound) {
// only include spacing if there already is a visible column
// before this one
usedSpace += horizontalSpacing + minColumnWidths[i];
} else {
usedSpace += minColumnWidths[i];
visibleFound = true;
}
}
}
return usedSpace;
} | 3.68 |
hbase_HRegion_shouldFlushStore | /**
* Should the store be flushed because it is old enough.
* <p>
* Every FlushPolicy should call this to determine whether a store is old enough to flush (except
* that you always flush all stores). Otherwise the method will always returns true which will
* make a lot of flush requests.
*/
boolean shouldFlushStore(HStore store) {
long earliest = this.wal.getEarliestMemStoreSeqNum(getRegionInfo().getEncodedNameAsBytes(),
store.getColumnFamilyDescriptor().getName()) - 1;
if (earliest > 0 && earliest + flushPerChanges < mvcc.getReadPoint()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Flush column family " + store.getColumnFamilyName() + " of "
+ getRegionInfo().getEncodedName() + " because unflushed sequenceid=" + earliest
+ " is > " + this.flushPerChanges + " from current=" + mvcc.getReadPoint());
}
return true;
}
if (this.flushCheckInterval <= 0) {
return false;
}
long now = EnvironmentEdgeManager.currentTime();
if (store.timeOfOldestEdit() < now - this.flushCheckInterval) {
if (LOG.isDebugEnabled()) {
LOG.debug("Flush column family: " + store.getColumnFamilyName() + " of "
+ getRegionInfo().getEncodedName() + " because time of oldest edit="
+ store.timeOfOldestEdit() + " is > " + this.flushCheckInterval + " from now =" + now);
}
return true;
}
return false;
} | 3.68 |
AreaShop_CommandAreaShop_getTabCompleteList | /**
* Get a list of string to complete a command with (raw list, not matching ones not filtered out).
* @param toComplete The number of the argument that has to be completed
* @param start The already given start of the command
* @param sender The CommandSender that wants to tab complete
* @return A collection with all the possibilities for argument to complete
*/
public List<String> getTabCompleteList(int toComplete, String[] start, CommandSender sender) {
return new ArrayList<>();
} | 3.68 |
morf_UpgradePath_hasStepsToApply | /**
* Returns whether it contains an upgrade path. i.e. if we have either
* {@link #getSteps()} or {@link #getSql()}.
*
* @return true if there is a path.
*/
public boolean hasStepsToApply() {
return !getSteps().isEmpty() || !sql.isEmpty();
} | 3.68 |
framework_TablePushStreaming_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
final Table t = new Table("The table");
t.setContainerDataSource(generateContainer(10, 10, iteration++));
t.setSizeFull();
Runnable r = () -> {
for (int i = 0; i < 99; i++) {
try {
Thread.sleep(200);
} catch (InterruptedException e) {
e.printStackTrace();
}
access(() -> t.setContainerDataSource(generateContainer(
t.getVisibleColumns().length, t.size(), iteration++)));
}
};
Thread tr = new Thread(r);
tr.start();
setContent(t);
} | 3.68 |
flink_UserDefinedFunction_close | /**
* Tear-down method for user-defined function. It can be used for clean up work. By default,
* this method does nothing.
*/
public void close() throws Exception {
// do nothing
} | 3.68 |
pulsar_BrokerService_registerConfigurationListener | /**
* Allows a listener to listen on update of {@link ServiceConfiguration} change, so listener can take appropriate
* action if any specific config-field value has been changed.
*
* On notification, listener should first check if config value has been changed and after taking appropriate
* action, listener should update config value with new value if it has been changed (so, next time listener can
* compare values on configMap change).
* @param <T>
*
* @param configKey
* : configuration field name
* @param listener
* : listener which takes appropriate action on config-value change
*/
public <T> void registerConfigurationListener(String configKey, Consumer<T> listener) {
validateConfigKey(configKey);
configRegisteredListeners.put(configKey, listener);
} | 3.68 |
hbase_ScannerContext_hasMoreValues | /**
* @return true when the state indicates that more values may follow those that have been
* returned
*/
public boolean hasMoreValues() {
return this.moreValues;
} | 3.68 |
flink_ResourceCounter_add | /**
* Adds increment to the count of resourceProfile and returns the new value.
*
* @param resourceProfile resourceProfile to which to add increment
* @param increment increment is the number by which to increase the resourceProfile
* @return new ResourceCounter containing the result of the addition
*/
public ResourceCounter add(ResourceProfile resourceProfile, int increment) {
final Map<ResourceProfile, Integer> newValues = new HashMap<>(resources);
final int newValue = resources.getOrDefault(resourceProfile, 0) + increment;
updateNewValue(newValues, resourceProfile, newValue);
return new ResourceCounter(newValues);
} | 3.68 |
hbase_MemStoreFlusher_setGlobalMemStoreLimit | /**
* Sets the global memstore limit to a new size.
*/
@Override
public void setGlobalMemStoreLimit(long globalMemStoreSize) {
this.server.getRegionServerAccounting().setGlobalMemStoreLimits(globalMemStoreSize);
reclaimMemStoreMemory();
} | 3.68 |
hbase_NamespacesInstanceResource_processUpdate | // Check that POST or PUT is valid and then update namespace.
private Response processUpdate(NamespacesInstanceModel model, final boolean updateExisting,
final UriInfo uriInfo) {
if (LOG.isTraceEnabled()) {
LOG.trace((updateExisting ? "PUT " : "POST ") + uriInfo.getAbsolutePath());
}
if (model == null) {
try {
model = new NamespacesInstanceModel(namespace);
} catch (IOException ioe) {
servlet.getMetrics().incrementFailedPutRequests(1);
throw new RuntimeException("Cannot retrieve info for '" + namespace + "'.");
}
}
servlet.getMetrics().incrementRequests(1);
if (servlet.isReadOnly()) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT)
.entity("Forbidden" + CRLF).build();
}
Admin admin = null;
boolean namespaceExists = false;
try {
admin = servlet.getAdmin();
namespaceExists = doesNamespaceExist(admin, namespace);
} catch (IOException e) {
servlet.getMetrics().incrementFailedPutRequests(1);
return processException(e);
}
// Do not allow creation if namespace already exists.
if (!updateExisting && namespaceExists) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Namespace '"
+ namespace + "' already exists. Use REST PUT " + "to alter the existing namespace.")
.build();
}
// Do not allow altering if namespace does not exist.
if (updateExisting && !namespaceExists) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity(
"Namespace '" + namespace + "' does not exist. Use " + "REST POST to create the namespace.")
.build();
}
return createOrUpdate(model, uriInfo, admin, updateExisting);
} | 3.68 |
hbase_HRegionFileSystem_mergeStoreFile | /**
* Write out a merge reference under the given merges directory.
* @param mergingRegion {@link RegionInfo} for one of the regions being merged.
* @param familyName Column Family Name
* @param f File to create reference.
* @return Path to created reference.
* @throws IOException if the merge write fails.
*/
public Path mergeStoreFile(RegionInfo mergingRegion, String familyName, HStoreFile f)
throws IOException {
Path referenceDir = new Path(getMergesDir(regionInfoForFs), familyName);
// A whole reference to the store file.
Reference r = Reference.createTopReference(mergingRegion.getStartKey());
// Add the referred-to regions name as a dot separated suffix.
// See REF_NAME_REGEX regex above. The referred-to regions name is
// up in the path of the passed in <code>f</code> -- parentdir is family,
// then the directory above is the region name.
String mergingRegionName = mergingRegion.getEncodedName();
// Write reference with same file id only with the other region name as
// suffix and into the new region location (under same family).
Path p = new Path(referenceDir, f.getPath().getName() + "." + mergingRegionName);
return r.write(fs, p);
} | 3.68 |
hadoop_ReencryptionHandler_restoreFromLastProcessedFile | /**
* Restore the re-encryption from the progress inside ReencryptionStatus.
* This means start from exactly the lastProcessedFile (LPF), skipping all
* earlier paths in lexicographic order. Lexicographically-later directories
* on the LPF parent paths are added to subdirs.
*/
private void restoreFromLastProcessedFile(final long zoneId,
final ZoneReencryptionStatus zs)
throws IOException, InterruptedException {
final INodeDirectory parent;
final byte[] startAfter;
final INodesInPath lpfIIP =
dir.getINodesInPath(zs.getLastCheckpointFile(), FSDirectory.DirOp.READ);
parent = lpfIIP.getLastINode().getParent();
startAfter = lpfIIP.getLastINode().getLocalNameBytes();
traverser.traverseDir(parent, zoneId, startAfter,
new ZoneTraverseInfo(zs.getEzKeyVersionName()));
} | 3.68 |
framework_TreeTable_removeExpandListener | /**
* Removes an expand listener.
*
* @param listener
* the Listener to be removed.
*/
public void removeExpandListener(ExpandListener listener) {
removeListener(ExpandEvent.class, listener,
ExpandListener.EXPAND_METHOD);
} | 3.68 |
hbase_TableInputFormatBase_initializeTable | /**
* Allows subclasses to initialize the table information.
* @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close.
* @param tableName The {@link TableName} of the table to process.
*/
protected void initializeTable(Connection connection, TableName tableName) throws IOException {
if (this.table != null || this.connection != null) {
LOG.warn("initializeTable called multiple times. Overwriting connection and table "
+ "reference; TableInputFormatBase will not close these old references when done.");
}
this.table = connection.getTable(tableName);
this.regionLocator = connection.getRegionLocator(tableName);
this.connection = connection;
} | 3.68 |
hbase_FavoredNodeAssignmentHelper_multiRackCase | /**
* Place secondary and tertiary nodes in a multi rack case. If there are only two racks, then we
* try the place the secondary and tertiary on different rack than primary. But if the other rack
* has only one region server, then we place primary and tertiary on one rack and secondary on
* another. The aim is two distribute the three favored nodes on >= 2 racks. TODO: see how we can
* use generateMissingFavoredNodeMultiRack API here
* @param primaryRS The primary favored node.
* @param primaryRack The rack of the primary favored node.
* @return Array containing secondary and tertiary favored nodes.
* @throws IOException Signals that an I/O exception has occurred.
*/
private ServerName[] multiRackCase(ServerName primaryRS, String primaryRack) throws IOException {
List<ServerName> favoredNodes = Lists.newArrayList(primaryRS);
// Create the secondary and tertiary pair
ServerName secondaryRS = generateMissingFavoredNodeMultiRack(favoredNodes);
favoredNodes.add(secondaryRS);
String secondaryRack = getRackOfServer(secondaryRS);
ServerName tertiaryRS;
if (primaryRack.equals(secondaryRack)) {
tertiaryRS = generateMissingFavoredNode(favoredNodes);
} else {
// Try to place tertiary in secondary RS rack else place on primary rack.
tertiaryRS = getOneRandomServer(secondaryRack, Sets.newHashSet(secondaryRS));
if (tertiaryRS == null) {
tertiaryRS = getOneRandomServer(primaryRack, Sets.newHashSet(primaryRS));
}
// We couldn't find anything in secondary rack, get any FN
if (tertiaryRS == null) {
tertiaryRS = generateMissingFavoredNode(Lists.newArrayList(primaryRS, secondaryRS));
}
}
return new ServerName[] { secondaryRS, tertiaryRS };
} | 3.68 |
flink_HsFileDataManager_tryRead | /** @return number of buffers read. */
private int tryRead() {
Queue<HsSubpartitionFileReader> availableReaders = prepareAndGetAvailableReaders();
if (availableReaders.isEmpty()) {
return 0;
}
Queue<MemorySegment> buffers;
try {
buffers = allocateBuffers();
} catch (Exception exception) {
// fail all pending subpartition readers immediately if any exception occurs
failSubpartitionReaders(availableReaders, exception);
LOG.error("Failed to request buffers for data reading.", exception);
return 0;
}
int numBuffersAllocated = buffers.size();
if (numBuffersAllocated <= 0) {
return 0;
}
readData(availableReaders, buffers);
int numBuffersRead = numBuffersAllocated - buffers.size();
releaseBuffers(buffers);
return numBuffersRead;
} | 3.68 |
hudi_TableOptionProperties_loadFromProperties | /**
* Read table options map from the given table base path.
*/
public static Map<String, String> loadFromProperties(String basePath, Configuration hadoopConf) {
Path propertiesFilePath = getPropertiesFilePath(basePath);
Map<String, String> options = new HashMap<>();
Properties props = new Properties();
FileSystem fs = FSUtils.getFs(basePath, hadoopConf);
try (FSDataInputStream inputStream = fs.open(propertiesFilePath)) {
props.load(inputStream);
for (final String name : props.stringPropertyNames()) {
options.put(name, props.getProperty(name));
}
} catch (IOException e) {
throw new HoodieIOException(String.format("Could not load table option properties from %s", propertiesFilePath), e);
}
LOG.info(String.format("Loading table option properties from %s success.", propertiesFilePath));
return options;
} | 3.68 |
AreaShop_CancellableRegionEvent_allow | /**
* Let the event continue, possible overwriting a cancel() call from another plugin.
*/
public void allow() {
this.cancelled = false;
this.reason = null;
} | 3.68 |
dubbo_DubboProtocol_getSharedClient | /**
* Get shared connection
*
* @param url
* @param connectNum connectNum must be greater than or equal to 1
*/
@SuppressWarnings("unchecked")
private SharedClientsProvider getSharedClient(URL url, int connectNum) {
String key = url.getAddress();
// connectNum must be greater than or equal to 1
int expectedConnectNum = Math.max(connectNum, 1);
return referenceClientMap.compute(key, (originKey, originValue) -> {
if (originValue != null && originValue.increaseCount()) {
return originValue;
} else {
return new SharedClientsProvider(
this, originKey, buildReferenceCountExchangeClientList(url, expectedConnectNum));
}
});
} | 3.68 |
querydsl_AbstractSQLQuery_endContext | /**
* Called to end a SQL listener context
*
* @param context the listener context to end
*/
protected void endContext(SQLListenerContext context) {
listeners.end(context);
} | 3.68 |
flink_TableEnvironment_create | /**
* Creates a table environment that is the entry point and central context for creating Table
* and SQL API programs.
*
* <p>It is unified both on a language level for all JVM-based languages (i.e. there is no
* distinction between Scala and Java API) and for bounded and unbounded data processing.
*
* <p>A table environment is responsible for:
*
* <ul>
* <li>Connecting to external systems.
* <li>Registering and retrieving {@link Table}s and other meta objects from a catalog.
* <li>Executing SQL statements.
* <li>Offering further configuration options.
* </ul>
*
* <p>Note: This environment is meant for pure table programs. If you would like to convert from
* or to other Flink APIs, it might be necessary to use one of the available language-specific
* table environments in the corresponding bridging modules.
*
* @param configuration The specified options are used to instantiate the {@link
* TableEnvironment}.
*/
static TableEnvironment create(Configuration configuration) {
return TableEnvironmentImpl.create(configuration);
} | 3.68 |
pulsar_MultiTopicsConsumerImpl_getPartitionedTopics | // get topics name
public List<String> getPartitionedTopics() {
return partitionedTopics.keySet().stream().collect(Collectors.toList());
} | 3.68 |
hbase_Import_addFilterAndArguments | /**
* Add a Filter to be instantiated on import
* @param conf Configuration to update (will be passed to the job)
* @param clazz {@link Filter} subclass to instantiate on the server.
* @param filterArgs List of arguments to pass to the filter on instantiation
*/
public static void addFilterAndArguments(Configuration conf, Class<? extends Filter> clazz,
List<String> filterArgs) throws IOException {
conf.set(Import.FILTER_CLASS_CONF_KEY, clazz.getName());
conf.setStrings(Import.FILTER_ARGS_CONF_KEY, filterArgs.toArray(new String[filterArgs.size()]));
} | 3.68 |
framework_FieldBinder_resolveFields | /**
* Resolves the fields of the design class instance.
*/
private void resolveFields(Class<?> classWithFields) {
for (Field memberField : getFields(classWithFields)) {
if (Component.class.isAssignableFrom(memberField.getType())) {
fieldMap.put(memberField.getName().toLowerCase(Locale.ROOT),
memberField);
}
}
} | 3.68 |
dubbo_ConfigUtils_mergeValues | /**
* Insert default extension into extension list.
* <p>
* Extension list support<ul>
* <li>Special value <code><strong>default</strong></code>, means the location for default extensions.
* <li>Special symbol<code><strong>-</strong></code>, means remove. <code>-foo1</code> will remove default extension 'foo'; <code>-default</code> will remove all default extensions.
* </ul>
*
* @param type Extension type
* @param cfg Extension name list
* @param def Default extension list
* @return result extension list
*/
public static List<String> mergeValues(
ExtensionDirector extensionDirector, Class<?> type, String cfg, List<String> def) {
List<String> defaults = new ArrayList<String>();
if (def != null) {
for (String name : def) {
if (extensionDirector.getExtensionLoader(type).hasExtension(name)) {
defaults.add(name);
}
}
}
List<String> names = new ArrayList<String>();
// add initial values
String[] configs = (cfg == null || cfg.trim().length() == 0) ? new String[0] : COMMA_SPLIT_PATTERN.split(cfg);
for (String config : configs) {
if (config != null && config.trim().length() > 0) {
names.add(config);
}
}
// -default is not included
if (!names.contains(REMOVE_VALUE_PREFIX + DEFAULT_KEY)) {
// add default extension
int i = names.indexOf(DEFAULT_KEY);
if (i > 0) {
names.addAll(i, defaults);
} else {
names.addAll(0, defaults);
}
names.remove(DEFAULT_KEY);
} else {
names.remove(DEFAULT_KEY);
}
// merge - configuration
for (String name : new ArrayList<String>(names)) {
if (name.startsWith(REMOVE_VALUE_PREFIX)) {
names.remove(name);
names.remove(name.substring(1));
}
}
return names;
} | 3.68 |
hibernate-validator_ClassHierarchyHelper_getHierarchy | /**
* Retrieves all superclasses and interfaces recursively.
*
* @param clazz the class to start the search with
* @param classes list of classes to which to add all found super types matching
* the given filters
* @param filters filters applying for the search
*/
private static <T> void getHierarchy(Class<? super T> clazz, List<Class<? super T>> classes, Iterable<Filter> filters) {
for ( Class<? super T> current = clazz; current != null; current = current.getSuperclass() ) {
if ( classes.contains( current ) ) {
return;
}
if ( acceptedByAllFilters( current, filters ) ) {
classes.add( current );
}
for ( Class<?> currentInterface : current.getInterfaces() ) {
//safe since interfaces are super-types
@SuppressWarnings("unchecked")
Class<? super T> currentInterfaceCasted = (Class<? super T>) currentInterface;
getHierarchy( currentInterfaceCasted, classes, filters );
}
}
} | 3.68 |
framework_DesignAttributeHandler_removeSubsequentUppercase | /**
* Replaces subsequent UPPERCASE strings of length 2 or more followed either
* by another uppercase letter or an end of string. This is to generalise
* handling of method names like <tt>showISOWeekNumbers</tt>.
*
* @param param
* Input string.
* @return Input string with sequences of UPPERCASE turned into Normalcase.
*/
private static String removeSubsequentUppercase(String param) {
StringBuffer result = new StringBuffer();
// match all two-or-more caps letters lead by a non-uppercase letter
// followed by either a capital letter or string end
Pattern pattern = Pattern.compile("(^|[^A-Z])([A-Z]{2,})([A-Z]|$)");
Matcher matcher = pattern.matcher(param);
while (matcher.find()) {
String matched = matcher.group(2);
// if this is a beginning of the string, the whole matched group is
// written in lower case
if (matcher.group(1).isEmpty()) {
matcher.appendReplacement(result,
matched.toLowerCase(Locale.ROOT) + matcher.group(3));
// otherwise the first character of the group stays uppercase,
// while the others are lower case
} else {
matcher.appendReplacement(result,
matcher.group(1) + matched.substring(0, 1)
+ matched.substring(1).toLowerCase(Locale.ROOT)
+ matcher.group(3));
}
// in both cases the uppercase letter of the next word (or string's
// end) is added
// this implies there is at least one extra lowercase letter after
// it to be caught by the next call to find()
}
matcher.appendTail(result);
return result.toString();
} | 3.68 |
framework_VLayoutSlot_setExpandRatio | /**
* Set how the slot should be expanded relative to the other slots.
*
* @param expandRatio
* The ratio of the space the slot should occupy
*
* @deprecated this value isn't used for anything by default
*/
@Deprecated
public void setExpandRatio(double expandRatio) {
this.expandRatio = expandRatio;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.