name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_ExecutionConfig_getParallelism | /**
* Gets the parallelism with which operation are executed by default. Operations can
* individually override this value to use a specific parallelism.
*
* <p>Other operations may need to run with a different parallelism - for example calling a
* reduce operation over the entire data set will involve an operation that runs with a
* parallelism of one (the final reduce to the single result value).
*
* @return The parallelism used by operations, unless they override that value. This method
* returns {@link #PARALLELISM_DEFAULT} if the environment's default parallelism should be
* used.
*/
public int getParallelism() {
return configuration.get(CoreOptions.DEFAULT_PARALLELISM);
} | 3.68 |
flink_InputTypeStrategies_commonMultipleArrayType | /**
* An {@link InputTypeStrategy} that expects {@code minCount} arguments that have a common array
* type.
*/
public static InputTypeStrategy commonMultipleArrayType(int minCount) {
return new CommonArrayInputTypeStrategy(ConstantArgumentCount.from(minCount));
} | 3.68 |
hadoop_AbstractS3ACommitter_updateCommonContext | /**
* Add jobID to current context.
*/
protected final void updateCommonContext() {
currentAuditContext().put(AuditConstants.PARAM_JOB_ID, uuid);
} | 3.68 |
hadoop_LocalJobOutputFiles_getOutputIndexFileForWrite | /**
* Create a local map output index file name.
*
* @param size the size of the file
*/
public Path getOutputIndexFileForWrite(long size) throws IOException {
String path = String.format(OUTPUT_FILE_INDEX_FORMAT_STRING, TASKTRACKER_OUTPUT);
return lDirAlloc.getLocalPathForWrite(path, size, conf);
} | 3.68 |
flink_RestfulGateway_disposeSavepoint | /**
* Dispose the given savepoint.
*
* @param savepointPath identifying the savepoint to dispose
* @param timeout RPC timeout
* @return A future acknowledge if the disposal succeeded
*/
default CompletableFuture<Acknowledge> disposeSavepoint(
final String savepointPath, @RpcTimeout final Time timeout) {
throw new UnsupportedOperationException();
} | 3.68 |
flink_DynamicSinkUtils_createRequiredMetadataColumns | /**
* Returns a list of required metadata columns. Ordered by the iteration order of {@link
* SupportsWritingMetadata#listWritableMetadata()}.
*
* <p>This method assumes that sink and schema have been validated via {@link
* #prepareDynamicSink}.
*/
private static List<MetadataColumn> createRequiredMetadataColumns(
ResolvedSchema schema, DynamicTableSink sink) {
final List<Column> tableColumns = schema.getColumns();
final List<Integer> metadataColumns = extractPersistedMetadataColumns(schema);
Map<String, MetadataColumn> metadataKeysToMetadataColumns = new HashMap<>();
for (Integer columnIndex : metadataColumns) {
MetadataColumn metadataColumn = (MetadataColumn) tableColumns.get(columnIndex);
String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
// After resolving, every metadata column has the unique metadata key.
metadataKeysToMetadataColumns.put(metadataKey, metadataColumn);
}
final Map<String, DataType> metadataMap = extractMetadataMap(sink);
return metadataMap.keySet().stream()
.filter(metadataKeysToMetadataColumns::containsKey)
.map(metadataKeysToMetadataColumns::get)
.collect(Collectors.toList());
} | 3.68 |
framework_BeanItemContainer_setBeanIdResolver | /**
* Unsupported in BeanItemContainer.
*/
@Override
protected void setBeanIdResolver(
AbstractBeanContainer.BeanIdResolver<BEANTYPE, BEANTYPE> beanIdResolver)
throws UnsupportedOperationException {
throw new UnsupportedOperationException(
"BeanItemContainer always uses an IdentityBeanIdResolver");
} | 3.68 |
hadoop_RegexMountPoint_getRegexGroupValueFromMather | /**
* Get matched capture group value from regex matched string. E.g.
* Regex: ^/user/(?<username>\\w+), regexGroupNameOrIndexStr: userName
* then /user/hadoop should return hadoop while call
* getRegexGroupValueFromMather(matcher, usersName)
* or getRegexGroupValueFromMather(matcher, 1)
*
* @param srcMatcher - the matcher to be use
* @param regexGroupNameOrIndexStr - the regex group name or index
* @return - Null if no matched group named regexGroupNameOrIndexStr found.
*/
private String getRegexGroupValueFromMather(
Matcher srcMatcher, String regexGroupNameOrIndexStr) {
if (regexGroupNameOrIndexStr.matches("\\d+")) {
// group index
int groupIndex = Integer.parseUnsignedInt(regexGroupNameOrIndexStr);
if (groupIndex >= 0 && groupIndex <= srcMatcher.groupCount()) {
return srcMatcher.group(groupIndex);
}
} else {
// named group in regex
return srcMatcher.group(regexGroupNameOrIndexStr);
}
return null;
} | 3.68 |
flink_LogicalTypeDataTypeConverter_fromDataTypeToLogicalType | /** It convert {@link LegacyTypeInformationType} to planner types. */
@Deprecated
public static LogicalType fromDataTypeToLogicalType(DataType dataType) {
return PlannerTypeUtils.removeLegacyTypes(dataType.getLogicalType());
} | 3.68 |
flink_StateDescriptor_getDefaultValue | /** Returns the default value. */
public T getDefaultValue() {
if (defaultValue != null) {
TypeSerializer<T> serializer = serializerAtomicReference.get();
if (serializer != null) {
return serializer.copy(defaultValue);
} else {
throw new IllegalStateException("Serializer not yet initialized.");
}
} else {
return null;
}
} | 3.68 |
flink_StrategyUtils_findDataTypeOfRoot | /**
* Returns a data type for the given data type and expected root.
*
* <p>This method is aligned with {@link LogicalTypeCasts#supportsImplicitCast(LogicalType,
* LogicalType)}.
*
* <p>The "fallback" data type for each root represents the default data type for a NULL
* literal. NULL literals will receive the smallest precision possible for having little impact
* when finding a common type. The output of this method needs to be checked again if an
* implicit cast is supported.
*/
private static @Nullable DataType findDataTypeOfRoot(
DataType actualDataType, LogicalTypeRoot expectedRoot) {
final LogicalType actualType = actualDataType.getLogicalType();
if (actualType.is(expectedRoot)) {
return actualDataType;
}
switch (expectedRoot) {
case CHAR:
return DataTypes.CHAR(CharType.DEFAULT_LENGTH);
case VARCHAR:
if (actualType.is(CHAR)) {
return DataTypes.VARCHAR(getLength(actualType));
}
return DataTypes.VARCHAR(VarCharType.DEFAULT_LENGTH);
case BOOLEAN:
return DataTypes.BOOLEAN();
case BINARY:
return DataTypes.BINARY(BinaryType.DEFAULT_LENGTH);
case VARBINARY:
if (actualType.is(BINARY)) {
return DataTypes.VARBINARY(getLength(actualType));
}
return DataTypes.VARBINARY(VarBinaryType.DEFAULT_LENGTH);
case DECIMAL:
if (actualType.is(EXACT_NUMERIC)) {
return DataTypes.DECIMAL(getPrecision(actualType), getScale(actualType));
} else if (actualType.is(APPROXIMATE_NUMERIC)) {
final int precision = getPrecision(actualType);
// we don't know where the precision occurs (before or after the dot)
return DataTypes.DECIMAL(precision * 2, precision);
}
return DataTypes.DECIMAL(DecimalType.MIN_PRECISION, DecimalType.MIN_SCALE);
case TINYINT:
return DataTypes.TINYINT();
case SMALLINT:
return DataTypes.SMALLINT();
case INTEGER:
return DataTypes.INT();
case BIGINT:
return DataTypes.BIGINT();
case FLOAT:
return DataTypes.FLOAT();
case DOUBLE:
return DataTypes.DOUBLE();
case DATE:
return DataTypes.DATE();
case TIME_WITHOUT_TIME_ZONE:
if (actualType.is(TIMESTAMP_WITHOUT_TIME_ZONE)) {
return DataTypes.TIME(getPrecision(actualType));
}
return DataTypes.TIME();
case TIMESTAMP_WITHOUT_TIME_ZONE:
return DataTypes.TIMESTAMP();
case TIMESTAMP_WITH_TIME_ZONE:
return DataTypes.TIMESTAMP_WITH_TIME_ZONE();
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
return DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE();
case INTERVAL_YEAR_MONTH:
return DataTypes.INTERVAL(DataTypes.MONTH());
case INTERVAL_DAY_TIME:
return DataTypes.INTERVAL(DataTypes.SECOND());
case NULL:
return DataTypes.NULL();
case ARRAY:
case MULTISET:
case MAP:
case ROW:
case DISTINCT_TYPE:
case STRUCTURED_TYPE:
case RAW:
case SYMBOL:
case UNRESOLVED:
default:
return null;
}
} | 3.68 |
hibernate-validator_ConstraintDefinitionContribution_getConstraintType | /**
* Returns the constraint annotation type for which this instance provides constraint validator instances.
*/
public Class<A> getConstraintType() {
return constraintType;
} | 3.68 |
flink_AvroSchemaConverter_nullableSchema | /** Returns schema with nullable true. */
private static Schema nullableSchema(Schema schema) {
return schema.isNullable()
? schema
: Schema.createUnion(SchemaBuilder.builder().nullType(), schema);
} | 3.68 |
hbase_VerifyReplication_createSubmittableJob | /**
* Sets up the actual job.
* @param conf The current configuration.
* @param args The command line parameters.
* @return The newly created job.
* @throws java.io.IOException When setting up the job fails.
*/
public Job createSubmittableJob(Configuration conf, String[] args) throws IOException {
if (!doCommandLine(args)) {
return null;
}
conf.set(NAME + ".tableName", tableName);
conf.setLong(NAME + ".startTime", startTime);
conf.setLong(NAME + ".endTime", endTime);
conf.setInt(NAME + ".sleepMsBeforeReCompare", sleepMsBeforeReCompare);
conf.set(NAME + ".delimiter", delimiter);
conf.setInt(NAME + ".batch", batch);
conf.setBoolean(NAME + ".verbose", verbose);
conf.setBoolean(NAME + ".includeDeletedCells", includeDeletedCells);
if (families != null) {
conf.set(NAME + ".families", families);
}
if (rowPrefixes != null) {
conf.set(NAME + ".rowPrefixes", rowPrefixes);
}
String peerQuorumAddress;
Pair<ReplicationPeerConfig, Configuration> peerConfigPair = null;
if (peerId != null) {
peerConfigPair = getPeerQuorumConfig(conf, peerId);
ReplicationPeerConfig peerConfig = peerConfigPair.getFirst();
peerQuorumAddress = peerConfig.getClusterKey();
LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: "
+ peerConfig.getConfiguration());
conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress);
HBaseConfiguration.setWithPrefix(conf, PEER_CONFIG_PREFIX,
peerConfig.getConfiguration().entrySet());
} else {
assert this.peerQuorumAddress != null;
peerQuorumAddress = this.peerQuorumAddress;
LOG.info("Peer Quorum Address: " + peerQuorumAddress);
conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress);
}
if (peerTableName != null) {
LOG.info("Peer Table Name: " + peerTableName);
conf.set(NAME + ".peerTableName", peerTableName);
}
conf.setInt(NAME + ".versions", versions);
LOG.info("Number of version: " + versions);
conf.setInt(NAME + ".recompareTries", reCompareTries);
conf.setInt(NAME + ".recompareBackoffExponent", reCompareBackoffExponent);
conf.setInt(NAME + ".recompareThreads", reCompareThreads);
// Set Snapshot specific parameters
if (peerSnapshotName != null) {
conf.set(NAME + ".peerSnapshotName", peerSnapshotName);
// for verifyRep by snapshot, choose a unique sub-directory under peerSnapshotTmpDir to
// restore snapshot.
Path restoreDir = new Path(peerSnapshotTmpDir, UUID.randomUUID().toString());
peerSnapshotTmpDir = restoreDir.toString();
conf.set(NAME + ".peerSnapshotTmpDir", peerSnapshotTmpDir);
conf.set(NAME + ".peerFSAddress", peerFSAddress);
conf.set(NAME + ".peerHBaseRootAddress", peerHBaseRootAddress);
// This is to create HDFS delegation token for peer cluster in case of secured
conf.setStrings(MRJobConfig.JOB_NAMENODES, peerFSAddress, conf.get(HConstants.HBASE_DIR));
}
Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName));
job.setJarByClass(VerifyReplication.class);
Scan scan = new Scan();
scan.setTimeRange(startTime, endTime);
scan.setRaw(includeDeletedCells);
scan.setCacheBlocks(false);
if (batch > 0) {
scan.setBatch(batch);
}
if (versions >= 0) {
scan.readVersions(versions);
LOG.info("Number of versions set to " + versions);
}
if (families != null) {
String[] fams = families.split(",");
for (String fam : fams) {
scan.addFamily(Bytes.toBytes(fam));
}
}
setRowPrefixFilter(scan, rowPrefixes);
if (sourceSnapshotName != null) {
Path snapshotTempPath = new Path(sourceSnapshotTmpDir);
LOG.info(
"Using source snapshot-" + sourceSnapshotName + " with temp dir:" + sourceSnapshotTmpDir);
TableMapReduceUtil.initTableSnapshotMapperJob(sourceSnapshotName, scan, Verifier.class, null,
null, job, true, snapshotTempPath);
restoreSnapshotForPeerCluster(conf, peerQuorumAddress);
} else {
TableMapReduceUtil.initTableMapperJob(tableName, scan, Verifier.class, null, null, job);
}
Configuration peerClusterConf;
if (peerId != null) {
assert peerConfigPair != null;
peerClusterConf = peerConfigPair.getSecond();
} else {
peerClusterConf =
HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX);
}
// Obtain the auth token from peer cluster
TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf);
job.setOutputFormatClass(NullOutputFormat.class);
job.setNumReduceTasks(0);
return job;
} | 3.68 |
pulsar_Producer_isSuccessorTo | /**
* Method to determine if this producer can replace another producer.
* @param other - producer to compare to this one
* @return true if this producer is a subsequent instantiation of the same logical producer. Otherwise, false.
*/
public boolean isSuccessorTo(Producer other) {
return Objects.equals(producerName, other.producerName)
&& Objects.equals(topic, other.topic)
&& producerId == other.producerId
&& Objects.equals(cnx, other.cnx)
&& other.getEpoch() < epoch;
} | 3.68 |
hbase_RegionCoprocessorHost_postWALRestore | /**
* @deprecated Since hbase-2.0.0. No replacement. To be removed in hbase-3.0.0 and replaced with
* something that doesn't expose IntefaceAudience.Private classes.
*/
@Deprecated
public void postWALRestore(final RegionInfo info, final WALKey logKey, final WALEdit logEdit)
throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postWALRestore(this, info, logKey, logEdit);
}
});
} | 3.68 |
hadoop_UpdateContainerTokenEvent_isExecTypeUpdate | /**
* Is this update an ExecType Update.
*
* @return isExecTypeUpdate.
*/
public boolean isExecTypeUpdate() {
return isExecTypeUpdate;
} | 3.68 |
rocketmq-connect_AbstractPositionManagementService_mergeOffset | /**
* Merge new received position info with local store.
*
* @param partition
* @param offset
* @return
*/
private boolean mergeOffset(ExtendRecordPartition partition, RecordOffset offset) {
if (null == partition || partition.getPartition().isEmpty()) {
return false;
}
if (positionStore.getKVMap().containsKey(partition)) {
RecordOffset existedOffset = positionStore.getKVMap().get(partition);
// update
if (!offset.equals(existedOffset)) {
positionStore.put(partition, offset);
return true;
}
} else {
// add new position
positionStore.put(partition, offset);
return true;
}
return false;
} | 3.68 |
hbase_HFileCorruptionChecker_checkTables | /**
* Check the specified table dirs for bad hfiles.
*/
public void checkTables(Collection<Path> tables) throws IOException {
for (Path t : tables) {
checkTableDir(t);
}
} | 3.68 |
hadoop_FilterFileSystem_setReplication | /**
* Set replication for an existing file.
*
* @param src file name
* @param replication new replication
* @throws IOException raised on errors performing I/O.
* @return true if successful;
* false if file does not exist or is a directory
*/
@Override
public boolean setReplication(Path src, short replication) throws IOException {
return fs.setReplication(src, replication);
} | 3.68 |
morf_AbstractSqlDialectTest_setUp | /**
* Initialise the fixture state.
*/
@Before
public void setUp() {
// Get the candidate dialect to test
testDialect = createTestDialect();
// Main test table
testTable = table(TEST_TABLE)
.columns(
idColumn(),
versionColumn(),
column(STRING_FIELD, DataType.STRING, 3).nullable(),
column(INT_FIELD, DataType.INTEGER).nullable(),
column(FLOAT_FIELD, DataType.DECIMAL, 13, 2),
column(DATE_FIELD, DataType.DATE).nullable(),
column(BOOLEAN_FIELD, DataType.BOOLEAN).nullable(),
column(CHAR_FIELD, DataType.STRING, 1).nullable(),
column(BLOB_FIELD, DataType.BLOB).nullable(),
column(BIG_INTEGER_FIELD, DataType.BIG_INTEGER).nullable().defaultValue("12345"),
column(CLOB_FIELD, DataType.CLOB).nullable()
).indexes(
index(TEST_NK).unique().columns(STRING_FIELD),
index(TEST_1).columns(INT_FIELD, FLOAT_FIELD).unique()
);
// Temporary version of the main test table
testTempTable = table(testDialect.decorateTemporaryTableName(TEMP_TEST_TABLE)).temporary()
.columns(
idColumn(),
versionColumn(),
column(STRING_FIELD, DataType.STRING, 3).nullable(),
column(INT_FIELD, DataType.INTEGER).nullable(),
column(FLOAT_FIELD, DataType.DECIMAL, 13, 2),
column(DATE_FIELD, DataType.DATE).nullable(),
column(BOOLEAN_FIELD, DataType.BOOLEAN).nullable(),
column(CHAR_FIELD, DataType.STRING, 1).nullable(),
column(BLOB_FIELD, DataType.BLOB).nullable(),
column(BIG_INTEGER_FIELD, DataType.BIG_INTEGER).nullable().defaultValue("12345"),
column(CLOB_FIELD, DataType.CLOB).nullable()
).indexes(
index(TEMP_TEST_NK).unique().columns(STRING_FIELD),
index(TEMP_TEST_1).columns(INT_FIELD, FLOAT_FIELD)
);
// Simple alternate test table
Table alternateTestTable = table(ALTERNATE_TABLE)
.columns(
idColumn(),
versionColumn(),
column(STRING_FIELD, DataType.STRING, 3).nullable()
).indexes(
index("Alternate_1").columns(STRING_FIELD)
);
// Temporary version of the alternate test table
alternateTestTempTable = table(testDialect.decorateTemporaryTableName("TempAlternate")).temporary()
.columns(
idColumn(),
versionColumn(),
column(STRING_FIELD, DataType.STRING, 3).nullable()
).indexes(
index("TempAlternate_1").columns(STRING_FIELD)
);
// Third test table
Table otherTable = table(OTHER_TABLE)
.columns(
idColumn(),
versionColumn(),
column(STRING_FIELD, DataType.STRING, 3).nullable(),
column(INT_FIELD, DataType.DECIMAL, 8).nullable(),
column(FLOAT_FIELD, DataType.DECIMAL, 13, 2)
);
// Test table with a very long name
Table testTableLongName = table(TABLE_WITH_VERY_LONG_NAME)
.columns(
idColumn(),
versionColumn(),
column(STRING_FIELD, DataType.STRING, 3).nullable(),
column(INT_FIELD, DataType.DECIMAL, 8).nullable(),
column(FLOAT_FIELD, DataType.DECIMAL, 13, 2),
column(DATE_FIELD, DataType.DATE).nullable(),
column(BOOLEAN_FIELD, DataType.BOOLEAN).nullable(),
column(CHAR_FIELD, DataType.STRING, 1).nullable()
).indexes(
index(TEST_NK).unique().columns(STRING_FIELD),
index(TEST_1).columns(INT_FIELD, FLOAT_FIELD)
);
Table testTableAllUpperCase = table(UPPER_TABLE)
.columns(
idColumn(),
versionColumn(),
column(FIELDA, DataType.STRING, 4)
);
Table testTableMixedCase = table(MIXED_TABLE)
.columns(
idColumn(),
versionColumn(),
column(FIELDA, DataType.STRING, 4)
);
// Test table with non null columns
Table nonNullTable = table(NON_NULL_TABLE)
.columns(
idColumn(),
versionColumn(),
column(STRING_FIELD, DataType.STRING, 3),
column(INT_FIELD, DataType.DECIMAL, 8),
column(BOOLEAN_FIELD, DataType.BOOLEAN),
column(DATE_FIELD, DataType.DATE),
column(BLOB_FIELD, DataType.BLOB)
);
// Temporary version of the test table with non null columns
nonNullTempTable = table(testDialect.decorateTemporaryTableName("TempNonNull")).temporary()
.columns(
idColumn(),
versionColumn(),
column(STRING_FIELD, DataType.STRING, 3),
column(INT_FIELD, DataType.DECIMAL, 8),
column(BOOLEAN_FIELD, DataType.BOOLEAN),
column(DATE_FIELD, DataType.DATE),
column(BLOB_FIELD, DataType.BLOB)
);
// Test table with composite primary key
Table compositePrimaryKey = table(COMPOSITE_PRIMARY_KEY_TABLE)
.columns(
idColumn(),
versionColumn(),
column(STRING_FIELD, DataType.STRING, 3),
column(SECOND_PRIMARY_KEY, DataType.STRING, 3).primaryKey()
);
// Test table with a database-supplied unique id
Table autoNumber = table(AUTO_NUMBER_TABLE)
.columns(
autonumber(INT_FIELD, 5)
);
// Test view
TableReference tr = new TableReference(TEST_TABLE);
FieldReference f = new FieldReference(STRING_FIELD);
testView = view("TestView", select(f).from(tr).where(eq(f, new FieldLiteral("blah"))));
TableReference tr1 = new TableReference(OTHER_TABLE);
testViewWithUnion = view("TestView", select(f).from(tr).where(eq(f, new FieldLiteral("blah")))
.unionAll(select(f).from(tr1).where(eq(f, new FieldLiteral("blah")))));
Table inner = table("Inner")
.columns(
column(INNER_FIELD_A, DataType.STRING, 3),
column(INNER_FIELD_B, DataType.STRING, 3)
);
Table insertAB = table("InsertAB")
.columns(
column(INNER_FIELD_A, DataType.STRING, 3),
column(INNER_FIELD_B, DataType.STRING, 3)
);
Table insertA = table("InsertA")
.columns(
column(INNER_FIELD_A, DataType.STRING, 3)
);
// Builds a test schema
metadata = schema(testTable, testTempTable, testTableLongName, alternateTestTable, alternateTestTempTable, otherTable,
testTableAllUpperCase, testTableMixedCase, nonNullTable, nonNullTempTable, compositePrimaryKey, autoNumber,
inner, insertAB, insertA);
} | 3.68 |
framework_StatusChangeEvent_hasValidationErrors | /**
* Gets the associated validation status.
*
* @return {@code true} if the change that triggered this event caused
* validation errors, {@code false} otherwise
*/
public boolean hasValidationErrors() {
return hasValidationErrors;
} | 3.68 |
hadoop_IOStatisticsStoreImpl_addTimedOperation | /**
* Add a duration to the min/mean/max statistics, using the
* given prefix and adding a suffix for each specific value.
* <p>
* The update is non -atomic, even though each individual statistic
* is updated thread-safely. If two threads update the values
* simultaneously, at the end of each operation the state will
* be correct. It is only during the sequence that the statistics
* may be observably inconsistent.
* </p>
* @param prefix statistic prefix
* @param durationMillis duration in milliseconds.
*/
@Override
public void addTimedOperation(String prefix, long durationMillis) {
addMeanStatisticSample(prefix + SUFFIX_MEAN, durationMillis);
addMinimumSample(prefix + SUFFIX_MIN, durationMillis);
addMaximumSample(prefix + SUFFIX_MAX, durationMillis);
} | 3.68 |
framework_AbsoluteLayout_getBottomUnits | /**
* Gets the unit for the 'bottom' attribute.
*
* @return See {@link Sizeable} UNIT_SYMBOLS for a description of the
* available units.
*/
public Unit getBottomUnits() {
return bottomUnits;
} | 3.68 |
hadoop_ApplicationConstants_$ | /**
* Expand the environment variable based on client OS environment variable
* expansion syntax (e.g. $VAR for Linux and %VAR% for Windows).
* <p>
* Note: Use $$() method for cross-platform practice i.e. submit an
* application from a Windows client to a Linux/Unix server or vice versa.
* </p>
* @return expanded environment variable.
*/
public String $() {
if (Shell.WINDOWS) {
return "%" + variable + "%";
} else {
return "$" + variable;
}
} | 3.68 |
hadoop_WeakReferenceThreadMap_setForCurrentThread | /**
* Set the new value for the current thread.
* @param newVal new reference to set for the active thread.
* @return the previously set value, possibly null
*/
public V setForCurrentThread(V newVal) {
requireNonNull(newVal);
long id = currentThreadId();
// if the same object is already in the map, just return it.
WeakReference<V> existingWeakRef = lookup(id);
// The looked up reference could be one of
// 1. null: nothing there
// 2. valid but get() == null : reference lost by GC.
// 3. different from the new value
// 4. the same as the old value
if (resolve(existingWeakRef) == newVal) {
// case 4: do nothing, return the new value
return newVal;
} else {
// cases 1, 2, 3: update the map and return the old value
return put(id, newVal);
}
} | 3.68 |
framework_TwinColSelectElement_getValue | /**
* Return first selected item (item in the right part of component).
*
* @return the option text for the item
*/
public String getValue() {
String value = "";
WebElement selectedElement = findElement(
By.className("v-select-twincol-selections"));
List<WebElement> optionElements = selectedElement
.findElements(By.tagName("option"));
if (!optionElements.isEmpty()) {
value = optionElements.get(0).getText();
}
return value;
} | 3.68 |
graphhopper_GHLongLongBTree_getMemoryUsage | /**
* @return memory usage in MB
*/
@Override
public int getMemoryUsage() {
return Math.round(root.getCapacity() / Helper.MB);
} | 3.68 |
flink_CoFeedbackTransformation_getWaitTime | /**
* Returns the wait time. This is the amount of time that the feedback operator keeps listening
* for feedback elements. Once the time expires the operation will close and will not receive
* further elements.
*/
public Long getWaitTime() {
return waitTime;
} | 3.68 |
querydsl_PointExpression_z | /**
* The z-coordinate value for this Point, if it has one. Returns NIL otherwise.
*
* @return z-coordinate
*/
public NumberExpression<Double> z() {
if (z == null) {
z = Expressions.numberOperation(Double.class, SpatialOps.Z, mixin);
}
return z;
} | 3.68 |
hbase_ClusterStatusListener_receive | /**
* Acts upon the reception of a new cluster status.
* @param ncs the cluster status
*/
public void receive(ClusterMetrics ncs) {
if (ncs.getDeadServerNames() != null) {
for (ServerName sn : ncs.getDeadServerNames()) {
if (!isDeadServer(sn)) {
LOG.info("There is a new dead server: " + sn);
deadServers.add(sn);
if (deadServerHandler != null) {
deadServerHandler.newDead(sn);
}
}
}
}
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_initializeFileGroups | /**
* Initialize file groups for a partition. For file listing, we just have one file group.
* <p>
* All FileGroups for a given metadata partition has a fixed prefix as per the {@link MetadataPartitionType#getFileIdPrefix()}.
* Each file group is suffixed with 4 digits with increments of 1 starting with 0000.
* <p>
* Let's say we configure 10 file groups for record level index partition, and prefix as "record-index-bucket-"
* File groups will be named as :
* record-index-bucket-0000, .... -> ..., record-index-bucket-0009
*/
private void initializeFileGroups(HoodieTableMetaClient dataMetaClient, MetadataPartitionType metadataPartition, String instantTime,
int fileGroupCount) throws IOException {
// Remove all existing file groups or leftover files in the partition
final Path partitionPath = new Path(metadataWriteConfig.getBasePath(), metadataPartition.getPartitionPath());
FileSystem fs = metadataMetaClient.getFs();
try {
final FileStatus[] existingFiles = fs.listStatus(partitionPath);
if (existingFiles.length > 0) {
LOG.warn("Deleting all existing files found in MDT partition " + metadataPartition.getPartitionPath());
fs.delete(partitionPath, true);
ValidationUtils.checkState(!fs.exists(partitionPath), "Failed to delete MDT partition " + metadataPartition);
}
} catch (FileNotFoundException ignored) {
// If the partition did not exist yet, it will be created below
}
// Archival of data table has a dependency on compaction(base files) in metadata table.
// It is assumed that as of time Tx of base instant (/compaction time) in metadata table,
// all commits in data table is in sync with metadata table. So, we always start with log file for any fileGroup.
// Even though the initial commit is a bulkInsert which creates the first baseFiles directly, we still
// create a log file first. This ensures that if any fileGroups of the MDT index do not receive any records
// during initial commit, then the fileGroup would still be recognized (as a FileSlice with no baseFiles but a
// valid logFile). Since these log files being created have no content, it is safe to add them here before
// the bulkInsert.
final String msg = String.format("Creating %d file groups for partition %s with base fileId %s at instant time %s",
fileGroupCount, metadataPartition.getPartitionPath(), metadataPartition.getFileIdPrefix(), instantTime);
LOG.info(msg);
final List<String> fileGroupFileIds = IntStream.range(0, fileGroupCount)
.mapToObj(i -> HoodieTableMetadataUtil.getFileIDForFileGroup(metadataPartition, i))
.collect(Collectors.toList());
ValidationUtils.checkArgument(fileGroupFileIds.size() == fileGroupCount);
engineContext.setJobStatus(this.getClass().getSimpleName(), msg);
engineContext.foreach(fileGroupFileIds, fileGroupFileId -> {
try {
final Map<HeaderMetadataType, String> blockHeader = Collections.singletonMap(HeaderMetadataType.INSTANT_TIME, instantTime);
final HoodieDeleteBlock block = new HoodieDeleteBlock(Collections.emptyList(), false, blockHeader);
HoodieLogFormat.Writer writer = HoodieLogFormat.newWriterBuilder()
.onParentPath(FSUtils.getPartitionPath(metadataWriteConfig.getBasePath(), metadataPartition.getPartitionPath()))
.withFileId(fileGroupFileId)
.withDeltaCommit(instantTime)
.withLogVersion(HoodieLogFile.LOGFILE_BASE_VERSION)
.withFileSize(0L)
.withSizeThreshold(metadataWriteConfig.getLogFileMaxSize())
.withFs(dataMetaClient.getFs())
.withRolloverLogWriteToken(HoodieLogFormat.DEFAULT_WRITE_TOKEN)
.withLogWriteToken(HoodieLogFormat.DEFAULT_WRITE_TOKEN)
.withFileExtension(HoodieLogFile.DELTA_EXTENSION).build();
writer.appendBlock(block);
writer.close();
} catch (InterruptedException e) {
throw new HoodieException("Failed to created fileGroup " + fileGroupFileId + " for partition " + metadataPartition.getPartitionPath(), e);
}
}, fileGroupFileIds.size());
} | 3.68 |
hadoop_OBSObjectBucketUtils_cloneObjectMetadata | /**
* Creates a copy of the passed {@link ObjectMetadata}. Does so without using
* the {@link ObjectMetadata#clone()} method, to avoid copying unnecessary
* headers.
*
* @param source the {@link ObjectMetadata} to copy
* @return a copy of {@link ObjectMetadata} with only relevant attributes
*/
private static ObjectMetadata cloneObjectMetadata(
final ObjectMetadata source) {
// This approach may be too brittle, especially if
// in future there are new attributes added to ObjectMetadata
// that we do not explicitly call to set here
ObjectMetadata ret = newObjectMetadata(source.getContentLength());
if (source.getContentEncoding() != null) {
ret.setContentEncoding(source.getContentEncoding());
}
return ret;
} | 3.68 |
hbase_OrderedNumeric_encodeLong | /**
* Write instance {@code val} into buffer {@code dst}.
* @param dst the {@link PositionedByteRange} to write to
* @param val the value to write to {@code dst}
* @return the number of bytes written
*/
public int encodeLong(PositionedByteRange dst, long val) {
return OrderedBytes.encodeNumeric(dst, val, order);
} | 3.68 |
hadoop_DiskBalancerDataNode_getDataNodePort | /**
* Returns the Port of this DataNode.
*
* @return Port Number
*/
public int getDataNodePort() {
return dataNodePort;
} | 3.68 |
framework_VTabsheet_hasClippedTabs | /**
* Checks whether there are any tabs clipped out of view (hidden behind the
* scroller element or overflowing further) that could be scrolled into (not
* hidden on the server). If no such tabs are clipped, this check returns
* {@code false}. Disabled but visible-on-server tabs count as viewable.
*
* @return {@code true} if any viewable tabs are clipped, {@code false}
* otherwise
*/
private boolean hasClippedTabs() {
// scroller should only be taken into account if some potentially
// visible tabs are already scrolled out of view
return (tb.getOffsetWidth() - getSpacerWidth()) > getOffsetWidth()
- (hasScrolledTabs() ? scroller.getOffsetWidth() : 0);
} | 3.68 |
graphhopper_KVStorage_deserializeObj | /**
* This method creates an Object (type Class) which is located at the specified pointer
*/
private Object deserializeObj(AtomicInteger sizeOfObject, long pointer, Class<?> clazz) {
if (hasDynLength(clazz)) {
int valueLength = vals.getByte(pointer) & 0xFF;
pointer++;
byte[] valueBytes = new byte[valueLength];
vals.getBytes(pointer, valueBytes, valueBytes.length);
if (sizeOfObject != null)
sizeOfObject.set(1 + valueLength); // For String and byte[] we store the length and the value
if (clazz.equals(String.class)) return new String(valueBytes, Helper.UTF_CS);
else if (clazz.equals(byte[].class)) return valueBytes;
throw new IllegalArgumentException();
} else {
byte[] valueBytes = new byte[getFixLength(clazz)];
vals.getBytes(pointer, valueBytes, valueBytes.length);
if (clazz.equals(Integer.class)) {
if (sizeOfObject != null) sizeOfObject.set(4);
return bitUtil.toInt(valueBytes, 0);
} else if (clazz.equals(Long.class)) {
if (sizeOfObject != null) sizeOfObject.set(8);
return bitUtil.toLong(valueBytes, 0);
} else if (clazz.equals(Float.class)) {
if (sizeOfObject != null) sizeOfObject.set(4);
return bitUtil.toFloat(valueBytes, 0);
} else if (clazz.equals(Double.class)) {
if (sizeOfObject != null) sizeOfObject.set(8);
return bitUtil.toDouble(valueBytes, 0);
} else {
throw new IllegalArgumentException("unknown class " + clazz);
}
}
} | 3.68 |
flink_ThreadInfoSamplesRequest_getDelayBetweenSamples | /**
* Returns the configured delay between the individual samples.
*
* @return the delay between the individual samples.
*/
public Duration getDelayBetweenSamples() {
return delayBetweenSamples;
} | 3.68 |
streampipes_AbstractConfigurablePipelineElementBuilder_requiredStaticProperty | /**
*
* @param staticProperty The required static property (e.g., user input as shown in the StreamPipes UI
* @return BU
*/
public K requiredStaticProperty(StaticProperty staticProperty) {
this.staticProperties.add(staticProperty);
return me();
} | 3.68 |
hbase_ProcedureCoordinator_getRpcs | /** Returns the rpcs implementation for all current procedures */
ProcedureCoordinatorRpcs getRpcs() {
return rpcs;
} | 3.68 |
streampipes_TextDocument_getText | /**
* Returns the {@link TextDocument}'s content, non-content or both
*
* @param includeContent Whether to include TextBlocks marked as "content".
* @param includeNonContent Whether to include TextBlocks marked as "non-content".
* @return The text.
*/
public String getText(boolean includeContent, boolean includeNonContent) {
StringBuilder sb = new StringBuilder();
LOOP:
for (TextBlock block : getTextBlocks()) {
if (block.isContent()) {
if (!includeContent) {
continue LOOP;
}
} else {
if (!includeNonContent) {
continue LOOP;
}
}
sb.append(block.getText());
sb.append('\n');
}
return sb.toString();
} | 3.68 |
framework_GridDropTarget_setDropAllowedOnRowsWhenSorted | /**
* Sets whether the grid accepts drop on rows as target when the grid has
* been sorted by the user.
* <p>
* Default value is {@code true} for backwards compatibility with 8.1. When
* {@code true} is used or the grid is not sorted, the mode used in
* {@link #setDropMode(DropMode)} is always used.
* <p>
* {@code false} value means that when the grid has been sorted, the drop
* mode is always {@link DropMode#ON_GRID}, regardless of what was set with
* {@link #setDropMode(DropMode)}. Once the grid is not sorted anymore, the
* sort mode is reverted back to what was set with
* {@link #setDropMode(DropMode)}.
*
* @param dropAllowedOnSortedGridRows
* {@code true} for allowing, {@code false} for not allowing
* drops on sorted grid rows
* @since 8.2
*/
public void setDropAllowedOnRowsWhenSorted(
boolean dropAllowedOnSortedGridRows) {
if (this.dropAllowedOnRowsWhenSorted != dropAllowedOnSortedGridRows) {
this.dropAllowedOnRowsWhenSorted = dropAllowedOnSortedGridRows;
if (!dropAllowedOnSortedGridRows) {
sortListenerRegistration = getParent()
.addSortListener(event -> {
updateDropModeForSortedGrid(
!event.getSortOrder().isEmpty());
});
updateDropModeForSortedGrid(
!getParent().getSortOrder().isEmpty());
} else {
// if the grid has been sorted, but now dropping on sorted grid
// is allowed, switch back to the previously allowed drop mode
if (cachedDropMode != null) {
internalSetDropMode(cachedDropMode);
}
sortListenerRegistration.remove();
sortListenerRegistration = null;
cachedDropMode = null;
}
}
} | 3.68 |
rocketmq-connect_WorkerDirectTask_configs | /**
* Get the configurations of current task.
*
* @return the configuration of current task.
*/
@Override
public KeyValue configs() {
return taskConfig;
} | 3.68 |
rocketmq-connect_DorisStreamLoader_loadJson | /**
* JSON import
*
* @param jsonData
* @throws Exception
*/
public void loadJson(String jsonData, String table) throws Exception {
try (CloseableHttpClient client = httpClientBuilder.build()) {
HttpPut put = new HttpPut(getLoadURL(table));
put.removeHeaders(HttpHeaders.CONTENT_LENGTH);
put.removeHeaders(HttpHeaders.TRANSFER_ENCODING);
put.setHeader(HttpHeaders.EXPECT, "100-continue");
put.setHeader(HttpHeaders.AUTHORIZATION, basicAuthHeader(user, passwd));
// You can set stream load related properties in the Header, here we set label and column_separator.
put.setHeader("label", UUID.randomUUID().toString());
put.setHeader("column_separator", ",");
put.setHeader("format", "json");
// Set up the import file. Here you can also use StringEntity to transfer arbitrary data.
StringEntity entity = new StringEntity(jsonData);
put.setEntity(entity);
log.info(put.toString());
try (CloseableHttpResponse response = client.execute(put)) {
String loadResult = "";
if (response.getEntity() != null) {
loadResult = EntityUtils.toString(response.getEntity());
}
final int statusCode = response.getStatusLine().getStatusCode();
if (statusCode != 200) {
throw new IOException(String.format("Stream load failed. status: %s load result: %s", statusCode, loadResult));
}
log.info("Get load result: " + loadResult);
}
}
} | 3.68 |
flink_ModuleFactory_supportedProperties | /** @deprecated Implement the {@link Factory} based stack instead. */
@Deprecated
default List<String> supportedProperties() {
// Default implementation for modules implementing the new {@link Factory} stack instead.
return null;
} | 3.68 |
morf_TableOutputter_outputDataHeadings | /**
* Outputs the data headings row.
*
* @param workSheet to add the row to
* @param table to fetch metadata from
* @param startRow to add the headings at
* @param helpTextRowNumbers - the map of column names to row index for each
* bit of help text
* @throws WriteException if any of the writes to workSheet failed
* @return the row to carry on inserting at
*/
private int outputDataHeadings(WritableSheet workSheet, Table table, final int startRow, final Map<String, Integer> helpTextRowNumbers) throws WriteException {
int currentRow = startRow;
int columnNumber = 0;
final WritableCellFormat columnHeadingFormat = getBoldFormat();
columnHeadingFormat.setBackground(Colour.VERY_LIGHT_YELLOW);
WritableFont font = new WritableFont(WritableFont.ARIAL, 8, WritableFont.BOLD);
font.setColour(Colour.BLUE);
font.setUnderlineStyle(UnderlineStyle.SINGLE);
columnHeadingFormat.setFont(font);
for (Column column : table.columns()) {
if(columnNumber < MAX_EXCEL_COLUMNS && !column.getName().equals("id") && !column.getName().equals("version")) {
// Data heading is a link back to the help text
WritableHyperlink linkToHelp = new WritableHyperlink(
columnNumber, currentRow,
spreadsheetifyName(column.getName()),
workSheet, 0, helpTextRowNumbers.get(column.getName()));
workSheet.addHyperlink(linkToHelp);
WritableCell label = workSheet.getWritableCell(columnNumber, currentRow);
label.setCellFormat(columnHeadingFormat);
// Update the help text such that it is a link to the heading
Cell helpCell = workSheet.getCell(0, helpTextRowNumbers.get(column.getName()));
WritableHyperlink linkFromHelp = new WritableHyperlink(
0, helpTextRowNumbers.get(column.getName()),
helpCell.getContents(),
workSheet, columnNumber, currentRow);
workSheet.addHyperlink(linkFromHelp);
columnNumber++;
}
}
currentRow++;
return currentRow;
} | 3.68 |
hbase_MasterObserver_preMoveServers | /**
* Called before servers are moved to target region server group
* @param ctx the environment to interact with the framework and master
* @param servers set of servers to move
* @param targetGroup destination group
*/
default void preMoveServers(final ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<Address> servers, String targetGroup) throws IOException {
} | 3.68 |
framework_LegacyLocatorStrategy_getElementByDOMPath | /**
* Locates an element based on a DOM path and a base element.
*
* @param baseElement
* The base element which the path is relative to
* @param path
* String locator (consisting of domChild[x] parts) that
* identifies the element
* @return The element identified by path, relative to baseElement or null
* if the element could not be found.
*/
private Element getElementByDOMPath(Element baseElement, String path) {
String[] parts = path.split(PARENTCHILD_SEPARATOR);
Element element = baseElement;
for (int i = 0, l = parts.length; i < l; ++i) {
String part = parts[i];
if (part.startsWith("domChild[")) {
String childIndexString = part.substring("domChild[".length(),
part.length() - 1);
if (WidgetUtil.findWidget(
baseElement) instanceof VAbstractOrderedLayout) {
if (element.hasChildNodes()) {
Element e = element.getFirstChildElement().cast();
String cn = e.getClassName();
if (cn != null && (cn.equals("v-expand")
|| cn.contains("v-has-caption"))) {
element = e;
}
}
}
try {
int childIndex = Integer.parseInt(childIndexString);
element = DOM.getChild(element, childIndex);
} catch (Exception e) {
return null;
}
if (element == null) {
return null;
}
} else {
path = parts[i];
for (int j = i + 1; j < l; ++j) {
path += PARENTCHILD_SEPARATOR + parts[j];
}
return getElementByPathStartingAt(path, element);
}
}
return element;
} | 3.68 |
morf_NamedParameterPreparedStatement_setBoolean | /**
* Sets the value of a named boolean parameter.
*
* @param parameter the parameter metadata.
* @param value the parameter value.
* @return this, for method chaining
* @exception SQLException if an error occurs when setting the parameter
*/
public NamedParameterPreparedStatement setBoolean(SqlParameter parameter, final boolean value) throws SQLException {
forEachOccurrenceOfParameter(parameter, new Operation() {
@Override
public void apply(int parameterIndex) throws SQLException {
statement.setBoolean(parameterIndex, value);
}
});
return this;
} | 3.68 |
hmily_XaResourceWrapped_start0 | /**
* 子类实现. Start 0.
*
* @param xid the xid
* @param flag the flag
* @throws XAException the xa exception
*/
void start0(final Xid xid, final int flag) throws XAException {
} | 3.68 |
hadoop_VolumeFailureSummary_getFailedStorageLocations | /**
* Returns each storage location that has failed, sorted.
*
* @return each storage location that has failed, sorted
*/
public String[] getFailedStorageLocations() {
return this.failedStorageLocations;
} | 3.68 |
hmily_OriginTrackedPropertiesLoader_isWhiteSpace | /**
* Is white space boolean.
*
* @return the boolean
*/
public boolean isWhiteSpace() {
return !this.escaped && (this.character == ' ' || this.character == '\t'
|| this.character == '\f');
} | 3.68 |
flink_TSetClientInfoReq_findByName | /** Find the _Fields constant that matches name, or null if its not found. */
public static _Fields findByName(java.lang.String name) {
return byName.get(name);
} | 3.68 |
framework_DataCommunicator_getFilter | /**
* Get the object used for filtering in this data communicator.
*
* @return the filter object of this data communicator
* @since 8.0.6
*/
protected Object getFilter() {
return filter;
} | 3.68 |
hadoop_AuditReplayThread_replayLog | /**
* Attempt to replay the provided command. Updates counters accordingly.
*
* @param command The command to replay
* @return True iff the command was successfully replayed (i.e., no exceptions
* were thrown).
*/
private boolean replayLog(final AuditReplayCommand command) {
final String src = command.getSrc();
final String dst = command.getDest();
FileSystem proxyFs = fsCache.get(command.getSimpleUgi());
if (proxyFs == null) {
UserGroupInformation ugi = UserGroupInformation
.createProxyUser(command.getSimpleUgi(), loginUser);
proxyFs = ugi.doAs((PrivilegedAction<FileSystem>) () -> {
try {
FileSystem fs = new DistributedFileSystem();
fs.initialize(namenodeUri, mapperConf);
return fs;
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
});
fsCache.put(command.getSimpleUgi(), proxyFs);
}
final FileSystem fs = proxyFs;
ReplayCommand replayCommand;
try {
replayCommand = ReplayCommand
.valueOf(command.getCommand().split(" ")[0].toUpperCase());
} catch (IllegalArgumentException iae) {
LOG.warn("Unsupported/invalid command: " + command);
replayCountersMap.get(REPLAYCOUNTERS.TOTALUNSUPPORTEDCOMMANDS)
.increment(1);
return false;
}
try {
long startTime = System.currentTimeMillis();
switch (replayCommand) {
case CREATE:
FSDataOutputStream fsDos = fs.create(new Path(src));
if (createBlocks) {
fsDos.writeByte(0);
}
fsDos.close();
break;
case GETFILEINFO:
fs.getFileStatus(new Path(src));
break;
case CONTENTSUMMARY:
fs.getContentSummary(new Path(src));
break;
case MKDIRS:
fs.mkdirs(new Path(src));
break;
case RENAME:
fs.rename(new Path(src), new Path(dst));
break;
case LISTSTATUS:
((DistributedFileSystem) fs).getClient().listPaths(src,
HdfsFileStatus.EMPTY_NAME);
break;
case APPEND:
fs.append(new Path(src));
return true;
case DELETE:
fs.delete(new Path(src), true);
break;
case OPEN:
fs.open(new Path(src)).close();
break;
case SETPERMISSION:
fs.setPermission(new Path(src), FsPermission.getDefault());
break;
case SETOWNER:
fs.setOwner(new Path(src),
UserGroupInformation.getCurrentUser().getShortUserName(),
UserGroupInformation.getCurrentUser().getPrimaryGroupName());
break;
case SETTIMES:
fs.setTimes(new Path(src), System.currentTimeMillis(),
System.currentTimeMillis());
break;
case SETREPLICATION:
fs.setReplication(new Path(src), (short) 1);
break;
case CONCAT:
// dst is like [path1, path2] - strip brackets and split on comma
String bareDist = dst.length() < 2 ? ""
: dst.substring(1, dst.length() - 1).trim();
List<Path> dsts = new ArrayList<>();
for (String s : Splitter.on(",").omitEmptyStrings().trimResults()
.split(bareDist)) {
dsts.add(new Path(s));
}
fs.concat(new Path(src), dsts.toArray(new Path[] {}));
break;
default:
throw new RuntimeException("Unexpected command: " + replayCommand);
}
long latency = System.currentTimeMillis() - startTime;
UserCommandKey userCommandKey = new UserCommandKey(command.getSimpleUgi(),
replayCommand.toString(), replayCommand.getType().toString());
commandLatencyMap.putIfAbsent(userCommandKey, new CountTimeWritable());
CountTimeWritable latencyWritable = commandLatencyMap.get(userCommandKey);
latencyWritable.setCount(latencyWritable.getCount() + 1);
latencyWritable.setTime(latencyWritable.getTime() + latency);
switch (replayCommand.getType()) {
case WRITE:
replayCountersMap.get(REPLAYCOUNTERS.TOTALWRITECOMMANDLATENCY)
.increment(latency);
replayCountersMap.get(REPLAYCOUNTERS.TOTALWRITECOMMANDS).increment(1);
break;
case READ:
replayCountersMap.get(REPLAYCOUNTERS.TOTALREADCOMMANDLATENCY)
.increment(latency);
replayCountersMap.get(REPLAYCOUNTERS.TOTALREADCOMMANDS).increment(1);
break;
default:
throw new RuntimeException("Unexpected command type: "
+ replayCommand.getType());
}
individualCommandsMap
.get(replayCommand + INDIVIDUAL_COMMANDS_LATENCY_SUFFIX)
.increment(latency);
individualCommandsMap
.get(replayCommand + INDIVIDUAL_COMMANDS_COUNT_SUFFIX).increment(1);
return true;
} catch (IOException e) {
LOG.debug("IOException: " + e.getLocalizedMessage());
individualCommandsMap
.get(replayCommand + INDIVIDUAL_COMMANDS_INVALID_SUFFIX).increment(1);
return false;
}
} | 3.68 |
framework_VScrollTable_moveCell | /**
* Swap cells when the column are dragged.
*
* @param oldIndex
* The old index of the cell
* @param newIndex
* The new index of the cell
*/
public void moveCell(int oldIndex, int newIndex) {
final FooterCell hCell = getFooterCell(oldIndex);
final Element cell = hCell.getElement();
visibleCells.remove(oldIndex);
DOM.removeChild(tr, cell);
DOM.insertChild(tr, cell, newIndex);
visibleCells.add(newIndex, hCell);
} | 3.68 |
morf_DeepCopyTransformations_noTransformation | /**
* Returns a DeepCopyTransformation that always delegates the copy
* to the element.
* @return A no-op transformation (just deep-copy)
*/
public static DeepCopyTransformation noTransformation() {
return new NoTransformDeepCopyTransformer();
} | 3.68 |
flink_TpcdsTestProgram_prepareTableEnv | /**
* Prepare TableEnvironment for query.
*
* @param sourceTablePath
* @return
*/
private static TableEnvironment prepareTableEnv(String sourceTablePath, Boolean useTableStats) {
// init Table Env
EnvironmentSettings environmentSettings = EnvironmentSettings.inBatchMode();
TableEnvironment tEnv = TableEnvironment.create(environmentSettings);
// config Optimizer parameters
// TODO use the default shuffle mode of batch runtime mode once FLINK-23470 is implemented
tEnv.getConfig()
.set(
ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE,
GlobalStreamExchangeMode.POINTWISE_EDGES_PIPELINED.toString());
tEnv.getConfig()
.set(
OptimizerConfigOptions.TABLE_OPTIMIZER_BROADCAST_JOIN_THRESHOLD,
10 * 1024 * 1024L);
tEnv.getConfig().set(OptimizerConfigOptions.TABLE_OPTIMIZER_JOIN_REORDER_ENABLED, true);
// register TPC-DS tables
TPCDS_TABLES.forEach(
table -> {
TpcdsSchema schema = TpcdsSchemaProvider.getTableSchema(table);
CsvTableSource.Builder builder = CsvTableSource.builder();
builder.path(sourceTablePath + FILE_SEPARATOR + table + DATA_SUFFIX);
for (int i = 0; i < schema.getFieldNames().size(); i++) {
builder.field(
schema.getFieldNames().get(i),
TypeConversions.fromDataTypeToLegacyInfo(
schema.getFieldTypes().get(i)));
}
builder.fieldDelimiter(COL_DELIMITER);
builder.emptyColumnAsNull();
builder.lineDelimiter("\n");
CsvTableSource tableSource = builder.build();
ConnectorCatalogTable catalogTable =
ConnectorCatalogTable.source(tableSource, true);
tEnv.getCatalog(tEnv.getCurrentCatalog())
.ifPresent(
catalog -> {
try {
catalog.createTable(
new ObjectPath(
tEnv.getCurrentDatabase(), table),
catalogTable,
false);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
});
// register statistics info
if (useTableStats) {
TpcdsStatsProvider.registerTpcdsStats(tEnv);
}
return tEnv;
} | 3.68 |
hadoop_SelectBinding_toString | /**
* Stringify the given SelectObjectContentRequest, as its
* toString() operator doesn't.
* @param request request to convert to a string
* @return a string to print. Does not contain secrets.
*/
public static String toString(final SelectObjectContentRequest request) {
StringBuilder sb = new StringBuilder();
sb.append("SelectObjectContentRequest{")
.append("bucket name=").append(request.bucket())
.append("; key=").append(request.key())
.append("; expressionType=").append(request.expressionType())
.append("; expression=").append(request.expression());
InputSerialization input = request.inputSerialization();
if (input != null) {
sb.append("; Input")
.append(input.toString());
} else {
sb.append("; Input Serialization: none");
}
OutputSerialization out = request.outputSerialization();
if (out != null) {
sb.append("; Output")
.append(out.toString());
} else {
sb.append("; Output Serialization: none");
}
return sb.append("}").toString();
} | 3.68 |
hudi_MarkerDirState_exists | /**
* @return {@code true} if the marker directory exists in the system.
*/
public boolean exists() {
try {
return fileSystem.exists(new Path(markerDirPath));
} catch (IOException ioe) {
throw new HoodieIOException(ioe.getMessage(), ioe);
}
} | 3.68 |
flink_ChannelWriterOutputView_close | /**
* Closes this OutputView, closing the underlying writer and returning all memory segments.
*
* @return A list containing all memory segments originally supplied to this view.
* @throws IOException Thrown, if the underlying writer could not be properly closed.
*/
public List<MemorySegment> close() throws IOException {
// send off set last segment
writeSegment(getCurrentSegment(), getCurrentPositionInSegment(), true);
clear();
// close the writer and gather all segments
final LinkedBlockingQueue<MemorySegment> queue = this.writer.getReturnQueue();
this.writer.close();
// re-collect all memory segments
ArrayList<MemorySegment> list = new ArrayList<MemorySegment>(this.numSegments);
for (int i = 0; i < this.numSegments; i++) {
final MemorySegment m = queue.poll();
if (m == null) {
// we get null if the queue is empty. that should not be the case if the reader was
// properly closed.
throw new RuntimeException(
"ChannelWriterOutputView: MemorySegments have been taken from return queue by different actor.");
}
list.add(m);
}
return list;
} | 3.68 |
hudi_HoodieLogFileReader_addShutDownHook | /**
* Close the inputstream if not closed when the JVM exits.
*/
private void addShutDownHook() {
shutdownThread = new Thread(() -> {
try {
close();
} catch (Exception e) {
LOG.warn("unable to close input stream for log file " + logFile, e);
// fail silently for any sort of exception
}
});
Runtime.getRuntime().addShutdownHook(shutdownThread);
} | 3.68 |
morf_TableReference_hashCode | /**
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
return new HashCodeBuilder(1559, 887)
.append(schemaName)
.append(name)
.append(alias)
.append(temporary)
.append(dblink)
.toHashCode();
} | 3.68 |
morf_UpgradePathFinder_upgradeStepsToApply | /**
* Returns a list of upgrade steps to be applied.
*/
private List<CandidateStep> upgradeStepsToApply() {
final Map<java.util.UUID, CandidateStep> candidateSteps = candidateStepsByUUID();
return candidateSteps.values().stream()
.filter(step -> step.isApplicable(stepsAlreadyApplied, candidateSteps))
.collect(Collectors.toList());
} | 3.68 |
shardingsphere-elasticjob_JobConfiguration_label | /**
* Set label.
*
* @param label label
* @return ElasticJob configuration builder
*/
public Builder label(final String label) {
this.label = label;
return this;
} | 3.68 |
framework_StringLengthValidator_getMinLength | /**
* Gets the minimum permissible length of the string.
*
* @return the minimum length of the string or null if there is no limit
*/
public Integer getMinLength() {
return validator.getMinValue();
} | 3.68 |
dubbo_DubboProtocol_getDubboProtocol | /**
* @deprecated Use {@link DubboProtocol#getDubboProtocol(ScopeModel)} instead
*/
@Deprecated
public static DubboProtocol getDubboProtocol() {
return (DubboProtocol) FrameworkModel.defaultModel()
.getExtensionLoader(Protocol.class)
.getExtension(DubboProtocol.NAME, false);
} | 3.68 |
flink_LogicalTypeMerging_findCommonType | /**
* Returns the most common, more general {@link LogicalType} for a given set of types. If such a
* type exists, all given types can be casted to this more general type.
*
* <p>For example: {@code [INT, BIGINT, DECIMAL(2, 2)]} would lead to {@code DECIMAL(21, 2)}.
*
* <p>This class aims to be compatible with the SQL standard. It is inspired by Apache Calcite's
* {@code SqlTypeFactoryImpl#leastRestrictive} method.
*/
public static Optional<LogicalType> findCommonType(List<LogicalType> types) {
Preconditions.checkArgument(types.size() > 0, "List of types must not be empty.");
// collect statistics first
boolean hasRawType = false;
boolean hasNullType = false;
boolean hasNullableTypes = false;
for (LogicalType type : types) {
final LogicalTypeRoot typeRoot = type.getTypeRoot();
if (typeRoot == RAW) {
hasRawType = true;
} else if (typeRoot == NULL) {
hasNullType = true;
}
if (type.isNullable()) {
hasNullableTypes = true;
}
}
final List<LogicalType> normalizedTypes =
types.stream().map(t -> t.copy(true)).collect(Collectors.toList());
LogicalType foundType = findCommonNullableType(normalizedTypes, hasRawType, hasNullType);
if (foundType == null) {
foundType = findCommonCastableType(normalizedTypes);
}
if (foundType != null) {
final LogicalType typeWithNullability = foundType.copy(hasNullableTypes);
// NULL is reserved for untyped literals only
if (typeWithNullability.is(NULL)) {
return Optional.empty();
}
return Optional.of(typeWithNullability);
}
return Optional.empty();
} | 3.68 |
querydsl_PathBuilder_toString | /**
* Get the String representation of the last path element
*
* @param path path
* @return String representation
*/
private String toString(Path<?> path) {
return path.getMetadata().getElement().toString();
} | 3.68 |
flink_AsynchronousJobOperationKey_getJobId | /**
* Get the job id for the given operation key.
*
* @return job id
*/
public JobID getJobId() {
return jobId;
} | 3.68 |
hbase_MasterObserver_postModifyNamespace | /**
* Called after the modifyNamespace operation has been requested.
* @param ctx the environment to interact with the framework and master
* @param oldNsDescriptor descriptor of namespace before modify operation happened
* @param currentNsDescriptor current NamespaceDescriptor of the namespace
*/
default void postModifyNamespace(final ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor oldNsDescriptor, NamespaceDescriptor currentNsDescriptor)
throws IOException {
} | 3.68 |
pulsar_ResourceGroupService_getRgUpdatesCount | // Visibility for testing.
protected static double getRgUpdatesCount (String rgName) {
return rgUpdates.labels(rgName).get();
} | 3.68 |
flink_SkipListUtils_getValueVersion | /**
* Returns the version of value.
*
* @param memorySegment memory segment for value space.
* @param offset offset of value space in memory segment.
*/
public static int getValueVersion(MemorySegment memorySegment, int offset) {
return memorySegment.getInt(offset + VALUE_VERSION_OFFSET);
} | 3.68 |
hudi_TimelineUtils_getEarliestInstantForMetadataArchival | /**
* Gets the qualified earliest instant from the active timeline of the data table
* for the archival in metadata table.
* <p>
* the qualified earliest instant is chosen as the earlier one between the earliest
* commit (COMMIT, DELTA_COMMIT, and REPLACE_COMMIT only, considering non-savepoint
* commit only if enabling archive beyond savepoint) and the earliest inflight
* instant (all actions).
*
* @param dataTableActiveTimeline the active timeline of the data table.
* @param shouldArchiveBeyondSavepoint whether to archive beyond savepoint.
* @return the instant meeting the requirement.
*/
public static Option<HoodieInstant> getEarliestInstantForMetadataArchival(
HoodieActiveTimeline dataTableActiveTimeline, boolean shouldArchiveBeyondSavepoint) {
// This is for commits only, not including CLEAN, ROLLBACK, etc.
// When archive beyond savepoint is enabled, there are chances that there could be holes
// in the timeline due to archival and savepoint interplay. So, the first non-savepoint
// commit in the data timeline is considered as beginning of the active timeline.
Option<HoodieInstant> earliestCommit = shouldArchiveBeyondSavepoint
? dataTableActiveTimeline.getTimelineOfActions(
CollectionUtils.createSet(
COMMIT_ACTION, DELTA_COMMIT_ACTION, REPLACE_COMMIT_ACTION, SAVEPOINT_ACTION))
.getFirstNonSavepointCommit()
: dataTableActiveTimeline.getCommitsTimeline().firstInstant();
// This is for all instants which are in-flight
Option<HoodieInstant> earliestInflight =
dataTableActiveTimeline.filterInflightsAndRequested().firstInstant();
if (earliestCommit.isPresent() && earliestInflight.isPresent()) {
if (earliestCommit.get().compareTo(earliestInflight.get()) < 0) {
return earliestCommit;
}
return earliestInflight;
} else if (earliestCommit.isPresent()) {
return earliestCommit;
} else if (earliestInflight.isPresent()) {
return earliestInflight;
} else {
return Option.empty();
}
} | 3.68 |
morf_FieldReference_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder result = new StringBuilder();
if (table != null) result.append(table).append(".");
result.append(name);
if (direction != null && direction != Direction.NONE) result.append(" ").append(direction);
if (nullValueHandling.isPresent()) result.append(" NULLS ").append(nullValueHandling.get());
result.append(super.toString());
return result.toString();
} | 3.68 |
flink_DateTimeUtils_getValue | /**
* Returns the TimeUnit associated with an ordinal. The value returned is null if the
* ordinal is not a member of the TimeUnit enumeration.
*/
public static TimeUnit getValue(int ordinal) {
return ordinal < 0 || ordinal >= CACHED_VALUES.length ? null : CACHED_VALUES[ordinal];
} | 3.68 |
flink_TaskManagerRuntimeInfo_getTaskManagerBindAddress | /**
* Gets the bind address of the Taskmanager.
*
* @return The bind address of the TaskManager.
*/
default String getTaskManagerBindAddress() {
return getConfiguration().getString(TaskManagerOptions.BIND_HOST);
} | 3.68 |
flink_FlinkConfMountDecorator_getClusterSidePropertiesMap | /** Get properties map for the cluster-side after removal of some keys. */
private Map<String, String> getClusterSidePropertiesMap(Configuration flinkConfig) {
final Configuration clusterSideConfig = flinkConfig.clone();
// Remove some configuration options that should not be taken to cluster side.
clusterSideConfig.removeConfig(KubernetesConfigOptions.KUBE_CONFIG_FILE);
clusterSideConfig.removeConfig(DeploymentOptionsInternal.CONF_DIR);
clusterSideConfig.removeConfig(RestOptions.BIND_ADDRESS);
clusterSideConfig.removeConfig(JobManagerOptions.BIND_HOST);
clusterSideConfig.removeConfig(TaskManagerOptions.BIND_HOST);
clusterSideConfig.removeConfig(TaskManagerOptions.HOST);
return clusterSideConfig.toMap();
} | 3.68 |
hadoop_ResourceSkyline_setJobSubmissionTime | /**
* Set jobSubmissionTime.
*
* @param jobSubmissionTimeConfig jobSubmissionTime.
*/
public final void setJobSubmissionTime(final long jobSubmissionTimeConfig) {
this.jobSubmissionTime = jobSubmissionTimeConfig;
} | 3.68 |
rocketmq-connect_ReporterManagerUtil_sinkTaskReporters | /**
* build sink task reporter
*
* @param connectorTaskId
* @param connConfig
* @param workerConfig
* @return
*/
public static List<ErrorReporter> sinkTaskReporters(ConnectorTaskId connectorTaskId,
ConnectKeyValue connConfig,
WorkerConfig workerConfig,
ErrorMetricsGroup errorMetricsGroup) {
// ensure reporter order
ArrayList<ErrorReporter> reporters = new ArrayList<>();
LogReporter logReporter = new LogReporter(connectorTaskId, connConfig, errorMetricsGroup);
reporters.add(logReporter);
// dead letter queue reporter
DeadLetterQueueReporter reporter = DeadLetterQueueReporter.build(connectorTaskId, connConfig, workerConfig, errorMetricsGroup);
if (reporter != null) {
reporters.add(reporter);
}
return reporters;
} | 3.68 |
flink_FlinkImageBuilder_asJobManager | /** Use this image for building a JobManager. */
public FlinkImageBuilder asJobManager() {
checkStartupCommandNotSet();
this.startupCommand = "bin/jobmanager.sh start-foreground && tail -f /dev/null";
this.imageNameSuffix = "jobmanager";
return this;
} | 3.68 |
hudi_ClusteringPlanStrategy_checkPrecondition | /**
* Check if the clustering can proceed. If not (i.e., return false), the PlanStrategy will generate an empty plan to stop the scheduling.
*/
public boolean checkPrecondition() {
return true;
} | 3.68 |
hadoop_AbstractS3ACommitter_fromStatusIterator | /**
* Create an active commit of the given pending files.
* @param pendingFS source filesystem.
* @param statuses iterator of file status or subclass to use.
* @return the commit
* @throws IOException if the iterator raises one.
*/
public static ActiveCommit fromStatusIterator(
final FileSystem pendingFS,
final RemoteIterator<? extends FileStatus> statuses) throws IOException {
return new ActiveCommit(pendingFS, toList(statuses));
} | 3.68 |
querydsl_SimpleExpression_notIn | /**
* Create a {@code this not in right} expression
*
* @param right rhs of the comparison
* @return this not in right
*/
public BooleanExpression notIn(Expression<? extends T>... right) {
return Expressions.booleanOperation(Ops.NOT_IN, mixin, Expressions.list(right));
} | 3.68 |
hadoop_AllocateResponse_preemptionMessage | /**
* Set the <code>preemptionMessage</code> of the response.
* @see AllocateResponse#setPreemptionMessage(PreemptionMessage)
* @param preemptionMessage <code>preemptionMessage</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder preemptionMessage(
PreemptionMessage preemptionMessage) {
allocateResponse.setPreemptionMessage(preemptionMessage);
return this;
} | 3.68 |
pulsar_ByteBufPair_coalesce | /**
* @return a single buffer with the content of both individual buffers
*/
@VisibleForTesting
public static ByteBuf coalesce(ByteBufPair pair) {
ByteBuf b = Unpooled.buffer(pair.readableBytes());
b.writeBytes(pair.b1, pair.b1.readerIndex(), pair.b1.readableBytes());
b.writeBytes(pair.b2, pair.b2.readerIndex(), pair.b2.readableBytes());
return b;
} | 3.68 |
framework_AbstractSingleSelect_addSelectionListener | /**
* Adds a selection listener to this select. The listener is called when the
* selection is changed either by the user or programmatically.
*
* @param listener
* the selection listener, not null
* @return a registration for the listener
*/
public Registration addSelectionListener(
SingleSelectionListener<T> listener) {
return addListener(SingleSelectionEvent.class, listener,
SingleSelectionListener.SELECTION_CHANGE_METHOD);
} | 3.68 |
flink_RocksDBMemoryControllerUtils_calculateRocksDBDefaultArenaBlockSize | /**
* Calculate the default arena block size as RocksDB calculates it in <a
* href="https://github.com/dataArtisans/frocksdb/blob/49bc897d5d768026f1eb816d960c1f2383396ef4/db/column_family.cc#L196-L201">
* here</a>.
*
* @return the default arena block size
* @param writeBufferSize the write buffer size (bytes)
*/
static long calculateRocksDBDefaultArenaBlockSize(long writeBufferSize) {
long arenaBlockSize = writeBufferSize / 8;
// Align up to 4k
final long align = 4 * 1024;
return ((arenaBlockSize + align - 1) / align) * align;
} | 3.68 |
hbase_ByteBufferUtils_copyFromBufferToBuffer | /**
* Copy from one buffer to another from given offset.
* <p>
* Note : This will advance the position marker of {@code out} but not change the position maker
* for {@code in}
* @param in source buffer
* @param out destination buffer
* @param sourceOffset offset in the source buffer
* @param length how many bytes to copy
*/
public static void copyFromBufferToBuffer(ByteBuffer in, ByteBuffer out, int sourceOffset,
int length) {
if (in.hasArray() && out.hasArray()) {
System.arraycopy(in.array(), sourceOffset + in.arrayOffset(), out.array(),
out.position() + out.arrayOffset(), length);
skip(out, length);
} else if (UNSAFE_AVAIL) {
UnsafeAccess.copy(in, sourceOffset, out, out.position(), length);
skip(out, length);
} else {
ByteBuffer inDup = in.duplicate();
inDup.position(sourceOffset).limit(sourceOffset + length);
out.put(inDup);
}
} | 3.68 |
graphhopper_CHStorage_shortcutNodeBased | /**
* Adds a shortcut to the storage. Shortcuts are stored in the same order they are added. The underlying DataAccess
* object grows automatically when adding more shortcuts.
*/
public int shortcutNodeBased(int nodeA, int nodeB, int accessFlags, double weight, int skip1, int skip2) {
if (edgeBased)
throw new IllegalArgumentException("Cannot add node-based shortcuts to edge-based CH");
return shortcut(nodeA, nodeB, accessFlags, weight, skip1, skip2);
} | 3.68 |
hbase_SnapshotManager_checkSnapshotSupport | /**
* Called at startup, to verify if snapshot operation is supported, and to avoid starting the
* master if there're snapshots present but the cleaners needed are missing. Otherwise we can end
* up with snapshot data loss.
* @param conf The {@link Configuration} object to use
* @param mfs The MasterFileSystem to use
* @throws IOException in case of file-system operation failure
* @throws UnsupportedOperationException in case cleaners are missing and there're snapshot in the
* system
*/
private void checkSnapshotSupport(final Configuration conf, final MasterFileSystem mfs)
throws IOException, UnsupportedOperationException {
// Verify if snapshot is disabled by the user
String enabled = conf.get(HBASE_SNAPSHOT_ENABLED);
boolean snapshotEnabled = conf.getBoolean(HBASE_SNAPSHOT_ENABLED, false);
boolean userDisabled = (enabled != null && enabled.trim().length() > 0 && !snapshotEnabled);
// Extract cleaners from conf
Set<String> hfileCleaners = new HashSet<>();
String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
if (cleaners != null) Collections.addAll(hfileCleaners, cleaners);
Set<String> logCleaners = new HashSet<>();
cleaners = conf.getStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
if (cleaners != null) Collections.addAll(logCleaners, cleaners);
// check if an older version of snapshot directory was present
Path oldSnapshotDir = new Path(mfs.getRootDir(), HConstants.OLD_SNAPSHOT_DIR_NAME);
FileSystem fs = mfs.getFileSystem();
List<SnapshotDescription> ss = getCompletedSnapshots(new Path(rootDir, oldSnapshotDir), false);
if (ss != null && !ss.isEmpty()) {
LOG.error("Snapshots from an earlier release were found under: " + oldSnapshotDir);
LOG.error("Please rename the directory as " + HConstants.SNAPSHOT_DIR_NAME);
}
// If the user has enabled the snapshot, we force the cleaners to be present
// otherwise we still need to check if cleaners are enabled or not and verify
// that there're no snapshot in the .snapshot folder.
if (snapshotEnabled) {
// Inject snapshot cleaners, if snapshot.enable is true
hfileCleaners.add(SnapshotHFileCleaner.class.getName());
hfileCleaners.add(HFileLinkCleaner.class.getName());
// If sync acl to HDFS feature is enabled, then inject the cleaner
if (SnapshotScannerHDFSAclHelper.isAclSyncToHdfsEnabled(conf)) {
hfileCleaners.add(SnapshotScannerHDFSAclCleaner.class.getName());
}
// Set cleaners conf
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
hfileCleaners.toArray(new String[hfileCleaners.size()]));
conf.setStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS,
logCleaners.toArray(new String[logCleaners.size()]));
} else {
// There may be restore tables if snapshot is enabled and then disabled, so add
// HFileLinkCleaner, see HBASE-26670 for more details.
hfileCleaners.add(HFileLinkCleaner.class.getName());
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
hfileCleaners.toArray(new String[hfileCleaners.size()]));
// Verify if SnapshotHFileCleaner are present
snapshotEnabled = hfileCleaners.contains(SnapshotHFileCleaner.class.getName());
// Warn if the cleaners are enabled but the snapshot.enabled property is false/not set.
if (snapshotEnabled) {
LOG.warn("Snapshot log and hfile cleaners are present in the configuration, " + "but the '"
+ HBASE_SNAPSHOT_ENABLED + "' property "
+ (userDisabled ? "is set to 'false'." : "is not set."));
}
}
// Mark snapshot feature as enabled if cleaners are present and user has not disabled it.
this.isSnapshotSupported = snapshotEnabled && !userDisabled;
// If cleaners are not enabled, verify that there're no snapshot in the .snapshot folder
// otherwise we end up with snapshot data loss.
if (!snapshotEnabled) {
LOG.info("Snapshot feature is not enabled, missing log and hfile cleaners.");
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(mfs.getRootDir());
if (fs.exists(snapshotDir)) {
FileStatus[] snapshots = CommonFSUtils.listStatus(fs, snapshotDir,
new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
if (snapshots != null) {
LOG.error("Snapshots are present, but cleaners are not enabled.");
checkSnapshotSupport();
}
}
}
} | 3.68 |
flink_SharedBufferAccessor_extractPatterns | /**
* Returns all elements from the previous relation starting at the given entry.
*
* @param nodeId id of the starting entry
* @param version Version of the previous relation which shall be extracted
* @return Collection of previous relations starting with the given value
*/
public List<Map<String, List<EventId>>> extractPatterns(
final NodeId nodeId, final DeweyNumber version) {
List<Map<String, List<EventId>>> result = new ArrayList<>();
// stack to remember the current extraction states
Stack<SharedBufferAccessor.ExtractionState> extractionStates = new Stack<>();
// get the starting shared buffer entry for the previous relation
Lockable<SharedBufferNode> entryLock = sharedBuffer.getEntry(nodeId);
if (entryLock != null) {
SharedBufferNode entry = entryLock.getElement();
extractionStates.add(
new SharedBufferAccessor.ExtractionState(
Tuple2.of(nodeId, entry), version, new Stack<>()));
// use a depth first search to reconstruct the previous relations
while (!extractionStates.isEmpty()) {
final SharedBufferAccessor.ExtractionState extractionState = extractionStates.pop();
// current path of the depth first search
final Stack<Tuple2<NodeId, SharedBufferNode>> currentPath =
extractionState.getPath();
final Tuple2<NodeId, SharedBufferNode> currentEntry = extractionState.getEntry();
// termination criterion
if (currentEntry == null) {
final Map<String, List<EventId>> completePath = new LinkedHashMap<>();
while (!currentPath.isEmpty()) {
final NodeId currentPathEntry = currentPath.pop().f0;
String page = currentPathEntry.getPageName();
List<EventId> values =
completePath.computeIfAbsent(page, k -> new ArrayList<>());
values.add(currentPathEntry.getEventId());
}
result.add(completePath);
} else {
// append state to the path
currentPath.push(currentEntry);
boolean firstMatch = true;
for (Lockable<SharedBufferEdge> lockableEdge : currentEntry.f1.getEdges()) {
// we can only proceed if the current version is compatible to the version
// of this previous relation
final SharedBufferEdge edge = lockableEdge.getElement();
final DeweyNumber currentVersion = extractionState.getVersion();
if (currentVersion.isCompatibleWith(edge.getDeweyNumber())) {
final NodeId target = edge.getTarget();
Stack<Tuple2<NodeId, SharedBufferNode>> newPath;
if (firstMatch) {
// for the first match we don't have to copy the current path
newPath = currentPath;
firstMatch = false;
} else {
newPath = new Stack<>();
newPath.addAll(currentPath);
}
extractionStates.push(
new SharedBufferAccessor.ExtractionState(
target != null
? Tuple2.of(
target,
sharedBuffer
.getEntry(target)
.getElement())
: null,
edge.getDeweyNumber(),
newPath));
}
}
}
}
}
return result;
} | 3.68 |
hbase_WALEntryBatch_getNbOperations | /** Returns total number of operations in this batch */
public int getNbOperations() {
return getNbRowKeys() + getNbHFiles();
} | 3.68 |
flink_NestedRowData_setNullAt | /** See {@link BinaryRowData#setNullAt(int)}. */
@Override
public void setNullAt(int i) {
assertIndexIsValid(i);
BinarySegmentUtils.bitSet(segments, offset, i + 8);
BinarySegmentUtils.setLong(segments, getFieldOffset(i), 0);
} | 3.68 |
hadoop_RollingFileSystemSink_stringifySecurityProperty | /**
* Turn a security property into a nicely formatted set of <i>name=value</i>
* strings, allowing for either the property or the configuration not to be
* set.
*
* @param property the property to stringify
* @return the stringified property
*/
private String stringifySecurityProperty(String property) {
String securityProperty;
if (properties.containsKey(property)) {
String propertyValue = properties.getString(property);
String confValue = conf.get(properties.getString(property));
if (confValue != null) {
securityProperty = property + "=" + propertyValue
+ ", " + properties.getString(property) + "=" + confValue;
} else {
securityProperty = property + "=" + propertyValue
+ ", " + properties.getString(property) + "=<NOT SET>";
}
} else {
securityProperty = property + "=<NOT SET>";
}
return securityProperty;
} | 3.68 |
hadoop_NameNodeUtils_getClientNamenodeAddress | /**
* Return the namenode address that will be used by clients to access this
* namenode or name service. This needs to be called before the config
* is overriden.
*
* This method behaves as follows:
*
* 1. fs.defaultFS is undefined:
* - return null.
* 2. fs.defaultFS is defined but has no hostname (logical or physical):
* - return null.
* 3. Single NN (no HA, no federation):
* - return URI authority from fs.defaultFS
* 4. Current NN is in an HA nameservice (with or without federation):
* - return nameservice for current NN.
* 5. Current NN is in non-HA namespace, federated cluster:
* - return value of dfs.namenode.rpc-address.[nsId].[nnId]
* - If the above key is not defined, then return authority from
* fs.defaultFS if the port number is > 0.
* 6. If port number in the authority is missing or zero in step 6:
* - return null
*/
@VisibleForTesting
@Nullable
static String getClientNamenodeAddress(
Configuration conf, @Nullable String nsId) {
final Collection<String> nameservices =
DFSUtilClient.getNameServiceIds(conf);
final String nnAddr = conf.getTrimmed(FS_DEFAULT_NAME_KEY);
if (nnAddr == null) {
// default fs is not set.
return null;
}
LOG.info("{} is {}", FS_DEFAULT_NAME_KEY, nnAddr);
final URI nnUri = URI.create(nnAddr);
String defaultNnHost = nnUri.getHost();
if (defaultNnHost == null) {
return null;
}
// Current Nameservice is HA.
if (nsId != null && nameservices.contains(nsId)) {
final Collection<String> namenodes = conf.getTrimmedStringCollection(
DFS_HA_NAMENODES_KEY_PREFIX + "." + nsId);
if (namenodes.size() > 1) {
return nsId;
}
}
// Federation without HA. We must handle the case when the current NN
// is not in the default nameservice.
String currentNnAddress = null;
if (nsId != null) {
String hostNameKey = DFS_NAMENODE_RPC_ADDRESS_KEY + "." + nsId;
currentNnAddress = conf.get(hostNameKey);
}
// Fallback to the address in fs.defaultFS.
if (currentNnAddress == null) {
currentNnAddress = nnUri.getAuthority();
}
int port = 0;
if (currentNnAddress.contains(":")) {
port = Integer.parseInt(currentNnAddress.split(":")[1]);
}
if (port > 0) {
return currentNnAddress;
} else {
// the port is missing or 0. Figure out real bind address later.
return null;
}
} | 3.68 |
hadoop_ZStandardDecompressor_setDictionary | // dictionary is not supported
@Override
public void setDictionary(byte[] b, int off, int len) {
throw new UnsupportedOperationException(
"Dictionary support is not enabled");
} | 3.68 |
dubbo_MetricsApplicationListener_onPostEventBuild | /**
* Perform auto-increment on the monitored key,
* Can use a custom listener instead of this generic operation
*
* @param metricsKey Monitor key
* @param collector Corresponding collector
*/
public static AbstractMetricsKeyListener onPostEventBuild(
MetricsKey metricsKey, CombMetricsCollector<?> collector) {
return AbstractMetricsKeyListener.onEvent(metricsKey, event -> collector.increment(metricsKey));
} | 3.68 |
framework_Table_getItemId | /**
* Returns the item id of context clicked row.
*
* @return item id of clicked row; <code>null</code> if header, footer
* or empty area of Table
*/
public Object getItemId() {
return itemId;
} | 3.68 |
hmily_HmilyShutdownHook_registerAutoCloseable | /**
* Register auto closeable.
*
* @param autoCloseable the auto closeable
*/
public void registerAutoCloseable(final AutoCloseable autoCloseable) {
autoCloseableHashSet.add(autoCloseable);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.