name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_Table_setColumnHeaders | /**
* Sets the headers of the columns.
*
* <p>
* The headers match the property id:s given by the set visible column
* headers. The table must be set in either
* {@link #COLUMN_HEADER_MODE_EXPLICIT} or
* {@link #COLUMN_HEADER_MODE_EXPLICIT_DEFAULTS_ID} mode to show the
* headers. In the defaults mode any nulls in the headers array are replaced
* with id.toString() outputs when rendering.
* </p>
*
* @param columnHeaders
* the Array of column headers that match the
* {@link #getVisibleColumns()} method.
*/
public void setColumnHeaders(String... columnHeaders) {
if (columnHeaders.length != visibleColumns.size()) {
throw new IllegalArgumentException(
"The length of the headers array must match the number of visible columns");
}
this.columnHeaders.clear();
int i = 0;
for (final Object column : visibleColumns) {
if (i >= columnHeaders.length) {
break;
}
this.columnHeaders.put(column, columnHeaders[i++]);
}
markAsDirty();
} | 3.68 |
druid_DruidPooledConnection_getPhysicalConnectNanoSpan | /**
* @since 1.0.17
*/
public long getPhysicalConnectNanoSpan() {
return this.holder.getCreateNanoSpan();
} | 3.68 |
framework_LayoutManager_addElementResizeListener | /**
* Adds a listener that will be notified whenever the size of a specific
* element changes. Adding a listener to an element also ensures that all
* sizes for that element will be available starting from the next layout
* phase.
*
* @param element
* the element that should be checked for size changes
* @param listener
* an ElementResizeListener that will be informed whenever the
* size of the target element has changed
*/
public void addElementResizeListener(Element element,
ElementResizeListener listener) {
Collection<ElementResizeListener> listeners = elementResizeListeners
.get(element);
if (listeners == null) {
listeners = new HashSet<>();
elementResizeListeners.put(element, listeners);
ensureMeasured(element);
}
listeners.add(listener);
} | 3.68 |
hbase_SnapshotInfo_isMissing | /** Returns true if the file is missing */
public boolean isMissing() {
return this.size < 0;
} | 3.68 |
hmily_MapBinder_bindEntries | /**
* Bind entries.
*
* @param source the source
* @param map the map
*/
void bindEntries(final ConfigPropertySource source, final Map<Object, Object> map) {
source.stream().forEach(name -> {
boolean ancestorOf = root.isAncestorOf(name);
if (ancestorOf) {
BindData<?> valueBindData = getValueBindData(name);
PropertyName entryName = getEntryName(source, name);
Object key = getKeyName(entryName);
map.computeIfAbsent(key, k -> this.elementBinder.bind(entryName, valueBindData, this.env));
}
});
} | 3.68 |
flink_RemoteInputChannel_requestBuffer | /**
* Requests buffer from input channel directly for receiving network data. It should always
* return an available buffer in credit-based mode unless the channel has been released.
*
* @return The available buffer.
*/
@Nullable
public Buffer requestBuffer() {
return bufferManager.requestBuffer();
} | 3.68 |
hudi_HoodieExampleDataGenerator_generateInsertsOnPartition | /**
* Generates new inserts, across a single partition path. It also updates the list of existing keys.
*/
public List<HoodieRecord<T>> generateInsertsOnPartition(String commitTime, Integer n, String partitionPath) {
return generateInsertsStreamOnPartition(commitTime, n, partitionPath).collect(Collectors.toList());
} | 3.68 |
flink_EntropyInjector_addEntropy | /**
* Handles entropy injection across regular and entropy-aware file systems.
*
* <p>If the given file system is entropy-aware (a implements {@link
* EntropyInjectingFileSystem}), then this method replaces the entropy marker in the path with
* random characters. The entropy marker is defined by {@link
* EntropyInjectingFileSystem#getEntropyInjectionKey()}.
*
* <p>If the given file system does not implement {@code EntropyInjectingFileSystem}, then this
* method returns the same path.
*/
public static Path addEntropy(FileSystem fs, Path path) throws IOException {
// check and possibly inject entropy into the path
final EntropyInjectingFileSystem efs = getEntropyFs(fs);
return efs == null ? path : resolveEntropy(path, efs, true);
} | 3.68 |
hbase_ServerRegionReplicaUtil_isRegionReplicaStoreFileRefreshEnabled | /** Returns True if we are to refresh user-space hfiles in Region Read Replicas. */
public static boolean isRegionReplicaStoreFileRefreshEnabled(Configuration conf) {
return conf.getBoolean(REGION_REPLICA_STORE_FILE_REFRESH,
DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH);
} | 3.68 |
morf_SqlDialect_escapeSql | /**
* Turn a string value into an SQL string literal which has that value.
* This escapes single quotes as double-single quotes.
* @param literalValue the value to escape
* @return escaped value
*/
protected String escapeSql(String literalValue) {
if (literalValue == null) {
return null;
}
return StringUtils.replace(literalValue, "'", "''");
} | 3.68 |
hadoop_FutureDataInputStreamBuilderImpl_bufferSize | /**
* Set the size of the buffer to be used.
*
* @param bufSize buffer size.
* @return FutureDataInputStreamBuilder.
*/
public FutureDataInputStreamBuilder bufferSize(int bufSize) {
bufferSize = bufSize;
return getThisBuilder();
} | 3.68 |
hbase_MetricsConnection_getScanTracker | /** scanTracker metric */
public CallTracker getScanTracker() {
return scanTracker;
} | 3.68 |
hbase_SaslClientAuthenticationProviders_addProviderIfNotExists | /**
* Adds the given {@code provider} to the set, only if an equivalent provider does not already
* exist in the set.
*/
static void addProviderIfNotExists(SaslClientAuthenticationProvider provider,
HashMap<Byte, SaslClientAuthenticationProvider> providers) {
Byte code = provider.getSaslAuthMethod().getCode();
SaslClientAuthenticationProvider existingProvider = providers.get(code);
if (existingProvider != null) {
throw new RuntimeException("Already registered authentication provider with " + code + " "
+ existingProvider.getClass());
}
providers.put(code, provider);
} | 3.68 |
flink_CrossOperator_projectSecond | /**
* Continues a ProjectCross transformation and adds fields of the second cross input.
*
* <p>If the second cross input is a {@link Tuple} {@link DataSet}, fields can be selected
* by their index. If the second cross input is not a Tuple DataSet, no parameters should be
* passed.
*
* <p>Fields of the first and second input can be added by chaining the method calls of
* {@link
* org.apache.flink.api.java.operators.CrossOperator.CrossProjection#projectFirst(int...)}
* and {@link
* org.apache.flink.api.java.operators.CrossOperator.CrossProjection#projectSecond(int...)}.
*
* @param secondFieldIndexes If the second input is a Tuple DataSet, the indexes of the
* selected fields. For a non-Tuple DataSet, do not provide parameters. The order of
* fields in the output tuple is defined by to the order of field indexes.
* @return An extended CrossProjection.
* @see Tuple
* @see DataSet
* @see org.apache.flink.api.java.operators.CrossOperator.CrossProjection
* @see org.apache.flink.api.java.operators.CrossOperator.ProjectCross
*/
protected CrossProjection<I1, I2> projectSecond(int... secondFieldIndexes) {
boolean isSecondTuple;
if (ds2.getType() instanceof TupleTypeInfo && secondFieldIndexes.length > 0) {
isSecondTuple = true;
} else {
isSecondTuple = false;
}
if (!isSecondTuple && secondFieldIndexes.length != 0) {
// field index provided for non-Tuple input
throw new IllegalArgumentException(
"Input is not a Tuple. Call projectSecond() without arguments to include it.");
} else if (secondFieldIndexes.length > (22 - this.fieldIndexes.length)) {
// to many field indexes provided
throw new IllegalArgumentException(
"You may select only up to twenty-two (22) fields in total.");
}
int offset = this.fieldIndexes.length;
if (isSecondTuple) {
// extend index and flag arrays
this.fieldIndexes =
Arrays.copyOf(
this.fieldIndexes,
this.fieldIndexes.length + secondFieldIndexes.length);
this.isFieldInFirst =
Arrays.copyOf(
this.isFieldInFirst,
this.isFieldInFirst.length + secondFieldIndexes.length);
// copy field indexes
int maxFieldIndex = numFieldsDs2;
for (int i = 0; i < secondFieldIndexes.length; i++) {
// check if indexes in range
Preconditions.checkElementIndex(secondFieldIndexes[i], maxFieldIndex);
this.isFieldInFirst[offset + i] = false;
this.fieldIndexes[offset + i] = secondFieldIndexes[i];
}
} else {
// extend index and flag arrays
this.fieldIndexes = Arrays.copyOf(this.fieldIndexes, this.fieldIndexes.length + 1);
this.isFieldInFirst =
Arrays.copyOf(this.isFieldInFirst, this.isFieldInFirst.length + 1);
// add input object to output tuple
this.isFieldInFirst[offset] = false;
this.fieldIndexes[offset] = -1;
}
return this;
} | 3.68 |
querydsl_MultiSurfaceExpression_centroid | /**
* The mathematical centroid for this MultiSurface. The result is not guaranteed to be on
* this MultiSurface.
*
* @return centroid
*/
public PointExpression<Point> centroid() {
if (centroid == null) {
centroid = GeometryExpressions.pointOperation(SpatialOps.CENTROID, mixin);
}
return centroid;
} | 3.68 |
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperations7 | /**
* Test for proper SQL mathematics operation generation from DSL expressions
* that use brackets.
* <p>
* Subexpression "a+b" is put to bracket explicitly, and
* the subexpression "c-d" should be put to the bracket implicitly, even without explicit
* {@link org.alfasoftware.morf.sql.SqlUtils#bracket(MathsField)} call.
* </p>
*/
@Test
public void shouldGenerateCorrectSqlForMathOperations7() {
AliasedField aPlusB = bracket(field("a").plus(field("b")));
AliasedField cMinusD = field("c").minus(field("d"));
String result = testDialect.getSqlFrom(aPlusB.divideBy(cMinusD));
assertEquals(expectedSqlForMathOperations7(), result);
} | 3.68 |
hbase_RegionInfo_isEncodedRegionName | /**
* Figure if the passed bytes represent an encoded region name or not.
* @param regionName A Region name either encoded or not.
* @return True if <code>regionName</code> represents an encoded name.
*/
@InterfaceAudience.Private // For use by internals only.
public static boolean isEncodedRegionName(byte[] regionName) {
// If not parseable as region name, presume encoded. TODO: add stringency; e.g. if hex.
if (parseRegionNameOrReturnNull(regionName) == null) {
if (regionName.length > MD5_HEX_LENGTH) {
return false;
} else if (regionName.length == MD5_HEX_LENGTH) {
return true;
} else {
String encodedName = Bytes.toString(regionName);
try {
Integer.parseInt(encodedName);
// If this is a valid integer, it could be hbase:meta's encoded region name.
return true;
} catch (NumberFormatException er) {
return false;
}
}
}
return false;
} | 3.68 |
hadoop_OBSBlockOutputStream_complete | /**
* This completes a multipart upload. Sometimes it fails; here retries are
* handled to avoid losing all data on a transient failure.
*
* @param partETags list of partial uploads
* @return result for completing multipart upload
* @throws IOException on any problem
*/
private CompleteMultipartUploadResult complete(
final List<PartEtag> partETags) throws IOException {
String operation = String.format(
"Completing multi-part upload for key '%s',"
+ " id '%s' with %s partitions ",
key, uploadId, partETags.size());
try {
LOG.debug(operation);
return writeOperationHelper.completeMultipartUpload(key,
uploadId, partETags);
} catch (ObsException e) {
throw OBSCommonUtils.translateException(operation, key, e);
}
} | 3.68 |
framework_Payload_setKey | /**
* Sets the key of this payload.
*
* @param key
* key that identifies the payload
*/
public void setKey(String key) {
this.key = key;
} | 3.68 |
framework_VUpload_disableTitle | /**
* For internal use only. May be removed or replaced in the future.
*
* @param disable
* {@code true} if the built-in browser-dependent tooltip should
* be hidden in favor of a Vaadin tooltip, {@code false}
* otherwise
*/
public void disableTitle(boolean disable) {
if (disable) {
// Disable title attribute for upload element.
if (BrowserInfo.get().isChrome()) {
// In Chrome title has to be set to " " to make it invisible
fu.setTitle(" ");
} else if (BrowserInfo.get().isFirefox()) {
// In FF title has to be set to empty string to make it
// invisible
// Method setTitle removes title attribute when it's an empty
// string, so setAttribute() should be used here
fu.getElement().setAttribute("title", "");
}
// For other browsers absent title doesn't show default tooltip for
// input element
} else {
fu.setTitle(null);
}
} | 3.68 |
hbase_ParseFilter_removeQuotesFromByteArray | /**
* Takes a quoted byte array and converts it into an unquoted byte array For example: given a byte
* array representing 'abc', it returns a byte array representing abc
* <p>
* @param quotedByteArray the quoted byte array
* @return Unquoted byte array
*/
public static byte[] removeQuotesFromByteArray(byte[] quotedByteArray) {
if (
quotedByteArray == null || quotedByteArray.length < 2
|| quotedByteArray[0] != ParseConstants.SINGLE_QUOTE
|| quotedByteArray[quotedByteArray.length - 1] != ParseConstants.SINGLE_QUOTE
) {
throw new IllegalArgumentException("removeQuotesFromByteArray needs a quoted byte array");
} else {
byte[] targetString = new byte[quotedByteArray.length - 2];
Bytes.putBytes(targetString, 0, quotedByteArray, 1, quotedByteArray.length - 2);
return targetString;
}
} | 3.68 |
hbase_SnapshotInfo_getStoreFilesSize | /** Returns the total size of the store files referenced by the snapshot */
public long getStoreFilesSize() {
return hfilesSize.get() + hfilesArchiveSize.get() + hfilesMobSize.get();
} | 3.68 |
hbase_MasterObserver_preDeleteSnapshot | /**
* Called before a snapshot is deleted. Called as part of deleteSnapshot RPC call.
* @param ctx the environment to interact with the framework and master
* @param snapshot the SnapshotDescriptor of the snapshot to delete
*/
default void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot) throws IOException {
} | 3.68 |
flink_FlinkJoinToMultiJoinRule_combineOuterJoins | /**
* Combines the outer join conditions and join types from the left and right join inputs. If the
* join itself is either a left or right outer join, then the join condition corresponding to
* the join is also set in the position corresponding to the null-generating input into the
* join. The join type is also set.
*
* @param joinRel join rel
* @param combinedInputs the combined inputs to the join
* @param left left child of the joinrel
* @param right right child of the joinrel
* @param joinSpecs the list where the join types and conditions will be copied
*/
private void combineOuterJoins(
Join joinRel,
List<RelNode> combinedInputs,
RelNode left,
RelNode right,
List<Pair<JoinRelType, RexNode>> joinSpecs,
List<Boolean> inputNullGenFieldList) {
JoinRelType joinType = joinRel.getJoinType();
JoinInfo joinInfo = joinRel.analyzeCondition();
ImmutableIntList leftKeys = joinInfo.leftKeys;
ImmutableIntList rightKeys = joinInfo.rightKeys;
boolean leftCombined =
canCombine(
left,
leftKeys,
joinType,
joinType.generatesNullsOnLeft(),
true,
inputNullGenFieldList,
0);
boolean rightCombined =
canCombine(
right,
rightKeys,
joinType,
joinType.generatesNullsOnRight(),
false,
inputNullGenFieldList,
left.getRowType().getFieldCount());
switch (joinType) {
case LEFT:
if (leftCombined) {
copyOuterJoinInfo((MultiJoin) left, joinSpecs, 0, null, null);
} else {
joinSpecs.add(Pair.of(JoinRelType.INNER, null));
}
joinSpecs.add(Pair.of(joinType, joinRel.getCondition()));
break;
case RIGHT:
joinSpecs.add(Pair.of(joinType, joinRel.getCondition()));
if (rightCombined) {
copyOuterJoinInfo(
(MultiJoin) right,
joinSpecs,
left.getRowType().getFieldCount(),
right.getRowType().getFieldList(),
joinRel.getRowType().getFieldList());
} else {
joinSpecs.add(Pair.of(JoinRelType.INNER, null));
}
break;
default:
if (leftCombined) {
copyOuterJoinInfo((MultiJoin) left, joinSpecs, 0, null, null);
} else {
joinSpecs.add(Pair.of(JoinRelType.INNER, null));
}
if (rightCombined) {
copyOuterJoinInfo(
(MultiJoin) right,
joinSpecs,
left.getRowType().getFieldCount(),
right.getRowType().getFieldList(),
joinRel.getRowType().getFieldList());
} else {
joinSpecs.add(Pair.of(JoinRelType.INNER, null));
}
}
} | 3.68 |
hbase_Scan_includeStartRow | /** Returns if we should include start row when scan */
public boolean includeStartRow() {
return includeStartRow;
} | 3.68 |
graphhopper_VectorTile_getFeaturesList | /**
* <pre>
* The actual features in this tile.
* </pre>
*
* <code>repeated .vector_tile.Tile.Feature features = 2;</code>
*/
public java.util.List<vector_tile.VectorTile.Tile.Feature> getFeaturesList() {
if (featuresBuilder_ == null) {
return java.util.Collections.unmodifiableList(features_);
} else {
return featuresBuilder_.getMessageList();
}
} | 3.68 |
hudi_HoodieTableMetaClient_getBasePathV2 | /**
* Returns base path of the table
*/
public Path getBasePathV2() {
return basePath.get();
} | 3.68 |
shardingsphere-elasticjob_JobNodeStorage_getJobNodeData | /**
* Get job node data.
*
* @param node node
* @return data of job node
*/
public String getJobNodeData(final String node) {
return regCenter.get(jobNodePath.getFullPath(node));
} | 3.68 |
hadoop_BytesWritable_setCapacity | /**
* Change the capacity of the backing storage. The data is preserved.
*
* @param capacity The new capacity in bytes.
*/
public void setCapacity(final int capacity) {
if (capacity != getCapacity()) {
this.size = Math.min(size, capacity);
this.bytes = Arrays.copyOf(this.bytes, capacity);
}
} | 3.68 |
morf_AbstractSqlDialectTest_testInsertFromSelectWithSourceInDifferentSchema | /**
* Tests that an insert from a select works when the source table is in a different schema.
*/
@Test
public void testInsertFromSelectWithSourceInDifferentSchema() {
SelectStatement sourceStmt = new SelectStatement(new FieldReference("id"),
new FieldReference("version"),
new FieldReference(STRING_FIELD),
new FieldReference(INT_FIELD),
new FieldReference(FLOAT_FIELD))
.from(new TableReference("MYSCHEMA", TEST_TABLE));
InsertStatement stmt = new InsertStatement().into(new TableReference(OTHER_TABLE))
.fields(new FieldReference("id"),
new FieldReference("version"),
new FieldReference(STRING_FIELD),
new FieldReference(INT_FIELD),
new FieldReference(FLOAT_FIELD))
.from(sourceStmt);
String expectedSql = "INSERT INTO " + tableName(OTHER_TABLE) + " (id, version, stringField, intField, floatField) SELECT id, version, stringField, intField, floatField FROM " + differentSchemaTableName(TEST_TABLE);
List<String> sql = testDialect.convertStatementToSQL(stmt, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE));
assertEquals("Insert with explicit field lists", ImmutableList.of(expectedSql), sql);
} | 3.68 |
hudi_RequestHandler_registerDataFilesAPI | /**
* Register Data-Files API calls.
*/
private void registerDataFilesAPI() {
app.get(RemoteHoodieTableFileSystemView.LATEST_PARTITION_DATA_FILES_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_PARTITION_DATA_FILES", 1);
List<BaseFileDTO> dtos = dataFileHandler.getLatestDataFiles(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_PARTITION_DATA_FILE_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_PARTITION_DATA_FILE", 1);
List<BaseFileDTO> dtos = dataFileHandler.getLatestDataFile(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.FILEID_PARAM, String.class).getOrThrow(e -> new HoodieException("FILEID is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_ALL_DATA_FILES, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_ALL_DATA_FILES", 1);
List<BaseFileDTO> dtos = dataFileHandler.getLatestDataFiles(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_DATA_FILES_BEFORE_ON_INSTANT_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_DATA_FILES_BEFORE_ON_INSTANT", 1);
List<BaseFileDTO> dtos = dataFileHandler.getLatestDataFilesBeforeOrOn(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM, String.class).getOrThrow(e -> new HoodieException("MAX_INSTANT_PARAM is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_LATEST_BASE_FILES_BEFORE_ON_INSTANT_URL, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_LATEST_BASE_FILES_BEFORE_ON_INSTANT", 1);
Map<String, List<BaseFileDTO>> dtos = dataFileHandler.getAllLatestDataFilesBeforeOrOn(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM, String.class).getOrThrow(e -> new HoodieException("MAX_INSTANT_PARAM is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_DATA_FILE_ON_INSTANT_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_DATA_FILE_ON_INSTANT", 1);
List<BaseFileDTO> dtos = dataFileHandler.getLatestDataFileOn(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.INSTANT_PARAM, String.class).get(),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.FILEID_PARAM, String.class).getOrThrow(e -> new HoodieException("FILEID is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_DATA_FILES, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_DATA_FILES", 1);
List<BaseFileDTO> dtos = dataFileHandler.getAllDataFiles(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_DATA_FILES_RANGE_INSTANT_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_DATA_FILES_RANGE_INSTANT", 1);
List<BaseFileDTO> dtos = dataFileHandler.getLatestDataFilesInRange(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
Arrays.asList(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.INSTANTS_PARAM, String.class).getOrThrow(e -> new HoodieException("INSTANTS_PARAM is invalid")).split(",")));
writeValueAsString(ctx, dtos);
}, true));
} | 3.68 |
hbase_ReplicationSourceManager_getOldLogDir | /**
* Get the directory where wals are archived
* @return the directory where wals are archived
*/
public Path getOldLogDir() {
return this.oldLogDir;
} | 3.68 |
hudi_BufferedRandomAccessFile_init | /**
*
* @param size - capacity of the buffer
*/
private void init(int size) {
this.capacity = Math.max(DEFAULT_BUFFER_SIZE, size);
this.dataBuffer = ByteBuffer.wrap(new byte[this.capacity]);
} | 3.68 |
framework_ContainerHierarchicalWrapper_removeAllItems | /**
* Removes all items from the underlying container and from the hierarcy.
*
* @return <code>true</code> if the operation succeeded, <code>false</code>
* if not
* @throws UnsupportedOperationException
* if the removeAllItems is not supported.
*/
@Override
public boolean removeAllItems() throws UnsupportedOperationException {
final boolean success = container.removeAllItems();
if (!hierarchical && success) {
roots.clear();
parent.clear();
children.clear();
noChildrenAllowed.clear();
}
return success;
} | 3.68 |
morf_SqlScriptExecutor_executeStatementBatch | /**
* Runs the specified SQL statement (which should contain parameters), repeatedly for
* each record, mapping the contents of the records into the statement parameters in
* their defined order. Use to insert, merge or update a large batch of records
* efficiently.
*
* @param sqlStatement the SQL statement.
* @param parameterMetadata the metadata describing the parameters.
* @param parameterData the values to insert.
* @param connection the JDBC connection to use.
* @param explicitCommit Determine if an explicit commit should be invoked after executing the supplied batch
* @param statementsPerFlush the number of statements to execute between JDBC batch flushes. Higher numbers have higher memory cost
* but reduce the number of I/O round-trips to the database.
*/
public void executeStatementBatch(String sqlStatement, Iterable<SqlParameter> parameterMetadata, Iterable<? extends DataValueLookup> parameterData, Connection connection, boolean explicitCommit, int statementsPerFlush) {
try {
try (NamedParameterPreparedStatement preparedStatement = NamedParameterPreparedStatement.parseSql(sqlStatement, sqlDialect).createFor(connection)) {
executeStatementBatch(preparedStatement, parameterMetadata, parameterData, connection, explicitCommit, statementsPerFlush);
} finally {
if (explicitCommit) {
connection.commit();
}
}
} catch (SQLException e) {
throw reclassifiedRuntimeException(e, "SQL exception executing batch");
}
} | 3.68 |
hbase_FirstKeyValueMatchingQualifiersFilter_parseFrom | /**
* Parses a serialized representation of {@link FirstKeyValueMatchingQualifiersFilter}
* @param pbBytes A pb serialized {@link FirstKeyValueMatchingQualifiersFilter} instance
* @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from
* <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {
throw new DeserializationException(
"Stop using FirstKeyValueMatchingQualifiersFilter, which has been permanently removed");
} | 3.68 |
querydsl_PathBuilder_getComparable | /**
* Create a new Comparable typed path
*
* @param <A>
* @param property property name
* @param type property type
* @return property path
*/
@SuppressWarnings("unchecked")
public <A extends Comparable<?>> ComparablePath<A> getComparable(String property, Class<A> type) {
Class<? extends A> vtype = validate(property, type);
return super.createComparable(property, (Class<? super A>) vtype);
} | 3.68 |
flink_HiveParserTypeCheckCtx_getUnparseTranslator | /** @return the unparseTranslator */
public HiveParserUnparseTranslator getUnparseTranslator() {
return unparseTranslator;
} | 3.68 |
hadoop_AbfsDtFetcher_getServiceName | /**
* Returns the service name for the scheme..
*/
public Text getServiceName() {
return new Text(getScheme());
} | 3.68 |
flink_PackagedProgram_invokeInteractiveModeForExecution | /**
* This method assumes that the context environment is prepared, or the execution will be a
* local execution by default.
*/
public void invokeInteractiveModeForExecution() throws ProgramInvocationException {
FlinkSecurityManager.monitorUserSystemExitForCurrentThread();
try {
callMainMethod(mainClass, args);
} finally {
FlinkSecurityManager.unmonitorUserSystemExitForCurrentThread();
}
} | 3.68 |
dubbo_ScopeClusterInvoker_init | /**
* Initializes the ScopeClusterInvoker instance.
*/
private void init() {
Boolean peer = (Boolean) getUrl().getAttribute(PEER_KEY);
String isInjvm = getUrl().getParameter(LOCAL_PROTOCOL);
// When the point-to-point direct connection is directly connected,
// the initialization is directly ended
if (peer != null && peer) {
peerFlag = true;
return;
}
// Check if the service has been exported through Injvm protocol
if (injvmInvoker == null
&& LOCAL_PROTOCOL.equalsIgnoreCase(getRegistryUrl().getProtocol())) {
injvmInvoker = invoker;
isExported.compareAndSet(false, true);
injvmFlag = true;
return;
}
// Check if the service has been exported through Injvm protocol or the SCOPE_LOCAL parameter is set
if (Boolean.TRUE.toString().equalsIgnoreCase(isInjvm)
|| SCOPE_LOCAL.equalsIgnoreCase(getUrl().getParameter(SCOPE_KEY))) {
injvmFlag = true;
} else if (isInjvm == null) {
injvmFlag = isNotRemoteOrGeneric();
}
protocolSPI = getUrl().getApplicationModel()
.getExtensionLoader(Protocol.class)
.getAdaptiveExtension();
injvmExporterListener =
getUrl().getOrDefaultFrameworkModel().getBeanFactory().getBean(InjvmExporterListener.class);
injvmExporterListener.addExporterChangeListener(this, getUrl().getServiceKey());
} | 3.68 |
hbase_SnapshotOfRegionAssignmentFromMeta_getExistingAssignmentPlan | /**
* Get the favored nodes plan
* @return the existing favored nodes plan
*/
public FavoredNodesPlan getExistingAssignmentPlan() {
return this.existingAssignmentPlan;
} | 3.68 |
hbase_UnsafeAccess_copy | /**
* Copies specified number of bytes from given offset of {@code src} buffer into the {@code dest}
* buffer.
* @param src source buffer
* @param srcOffset offset into source buffer
* @param dest destination buffer
* @param destOffset offset into destination buffer
* @param length length of data to copy
*/
public static void copy(ByteBuffer src, int srcOffset, ByteBuffer dest, int destOffset,
int length) {
long srcAddress, destAddress;
Object srcBase = null, destBase = null;
if (src.isDirect()) {
srcAddress = srcOffset + directBufferAddress(src);
} else {
srcAddress = (long) srcOffset + src.arrayOffset() + BYTE_ARRAY_BASE_OFFSET;
srcBase = src.array();
}
if (dest.isDirect()) {
destAddress = destOffset + directBufferAddress(dest);
} else {
destAddress = destOffset + BYTE_ARRAY_BASE_OFFSET + dest.arrayOffset();
destBase = dest.array();
}
unsafeCopy(srcBase, srcAddress, destBase, destAddress, length);
} | 3.68 |
framework_AbstractProperty_removeListener | /**
* @deprecated As of 7.0, replaced by
* {@link #removeValueChangeListener(Property.ValueChangeListener)}
*/
@Override
@Deprecated
public void removeListener(ValueChangeListener listener) {
removeValueChangeListener(listener);
} | 3.68 |
framework_AbsoluteLayout_setRight | /**
* Sets the 'right' attribute; distance from the right of the component
* to the right edge of the layout.
*
* @param rightValue
* The value of the 'right' attribute
* @param rightUnits
* The unit of the 'right' attribute. See UNIT_SYMBOLS for a
* description of the available units.
*/
public void setRight(Float rightValue, Unit rightUnits) {
this.rightValue = rightValue;
this.rightUnits = rightUnits;
markAsDirty();
} | 3.68 |
flink_ProcessFunction_onTimer | /**
* Called when a timer set using {@link TimerService} fires.
*
* @param timestamp The timestamp of the firing timer.
* @param ctx An {@link OnTimerContext} that allows querying the timestamp of the firing timer,
* querying the {@link TimeDomain} of the firing timer and getting a {@link TimerService}
* for registering timers and querying the time. The context is only valid during the
* invocation of this method, do not store it.
* @param out The collector for returning result values.
* @throws Exception This method may throw exceptions. Throwing an exception will cause the
* operation to fail and may trigger recovery.
*/
public void onTimer(long timestamp, OnTimerContext ctx, Collector<O> out) throws Exception {} | 3.68 |
framework_ColorPickerGrid_getPosition | /**
* Gets the position.
*
* @return the position
*/
public int[] getPosition() {
return new int[] { x, y };
} | 3.68 |
incubator-hugegraph-toolchain_FileLineFetcher_checkMatchHeader | /**
* Just match header for second or subsequent file first line
*/
private boolean checkMatchHeader(String line) {
if (!this.source().format().needHeader() ||
this.offset() != FIRST_LINE_OFFSET) {
return false;
}
assert this.source().header() != null;
String[] columns = this.parser.split(line);
return Arrays.equals(this.source().header(), columns);
} | 3.68 |
framework_JavaScriptConnectorHelper_removeResizeListener | // Called from JSNI to remove a listener
private void removeResizeListener(Element element,
JavaScriptObject callbackFunction) {
Map<JavaScriptObject, ElementResizeListener> listenerMap = resizeListeners
.get(element);
if (listenerMap == null) {
return;
}
ElementResizeListener listener = listenerMap.remove(callbackFunction);
if (listener != null) {
LayoutManager.get(connector.getConnection())
.removeElementResizeListener(element, listener);
if (listenerMap.isEmpty()) {
resizeListeners.remove(element);
}
}
} | 3.68 |
flink_StreamExecutionEnvironment_addSource | /**
* Ads a data source with a custom type information thus opening a {@link DataStream}. Only in
* very special cases does the user need to support type information. Otherwise use {@link
* #addSource(org.apache.flink.streaming.api.functions.source.SourceFunction)}
*
* @param function the user defined function
* @param sourceName Name of the data source
* @param <OUT> type of the returned stream
* @param typeInfo the user defined type information for the stream
* @return the data stream constructed
* @deprecated This method relies on the {@link
* org.apache.flink.streaming.api.functions.source.SourceFunction} API, which is due to be
* removed. Use the {@link #fromSource(Source, WatermarkStrategy, String, TypeInformation)}
* method based on the new {@link org.apache.flink.api.connector.source.Source} API instead.
*/
@Deprecated
public <OUT> DataStreamSource<OUT> addSource(
SourceFunction<OUT> function, String sourceName, TypeInformation<OUT> typeInfo) {
return addSource(function, sourceName, typeInfo, Boundedness.CONTINUOUS_UNBOUNDED);
} | 3.68 |
zxing_BitArray_get | /**
* @param i bit to get
* @return true iff bit i is set
*/
public boolean get(int i) {
return (bits[i / 32] & (1 << (i & 0x1F))) != 0;
} | 3.68 |
dubbo_TTable_getBorder | /**
* get border
*
* @return table border
*/
public Border getBorder() {
return border;
} | 3.68 |
hbase_WALFactory_getInstance | // Public only for FSHLog
public static WALFactory getInstance(Configuration configuration) {
WALFactory factory = singleton.get();
if (null == factory) {
WALFactory temp = new WALFactory(configuration);
if (singleton.compareAndSet(null, temp)) {
factory = temp;
} else {
// someone else beat us to initializing
try {
temp.close();
} catch (IOException exception) {
LOG.debug("failed to close temporary singleton. ignoring.", exception);
}
factory = singleton.get();
}
}
return factory;
} | 3.68 |
flink_ListView_setList | /** Replaces the entire view's content with the content of the given {@link List}. */
public void setList(List<T> list) {
this.list = list;
} | 3.68 |
framework_AbstractClientConnector_detach | /**
* {@inheritDoc}
*
* <p>
* The {@link #getSession()} and {@link #getUI()} methods might return
* <code>null</code> after this method is called.
* </p>
*/
@Override
public void detach() {
for (ClientConnector connector : getAllChildrenIterable(this)) {
connector.detach();
}
fireEvent(new DetachEvent(this));
getUI().getConnectorTracker().unregisterConnector(this);
} | 3.68 |
hbase_TableMapReduceUtil_convertScanToString | /**
* Writes the given scan into a Base64 encoded string.
* @param scan The scan to write out.
* @return The scan saved in a Base64 encoded string.
* @throws IOException When writing the scan fails.
*/
public static String convertScanToString(Scan scan) throws IOException {
ClientProtos.Scan proto = ProtobufUtil.toScan(scan);
return Bytes.toString(Base64.getEncoder().encode(proto.toByteArray()));
} | 3.68 |
hadoop_ReencryptionHandler_throttle | /**
* Throttles the ReencryptionHandler in 3 aspects:
* 1. Prevents generating more Callables than the CPU could possibly
* handle.
* 2. Prevents generating more Callables than the ReencryptionUpdater
* can handle, under its own throttling.
* 3. Prevents contending FSN/FSD read locks. This is done based
* on the DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_RATIO_KEY configuration.
* <p>
* Item 1 and 2 are to control NN heap usage.
*
* @throws InterruptedException
*/
@VisibleForTesting
@Override
protected void throttle() throws InterruptedException {
assert !dir.hasReadLock();
assert !dir.getFSNamesystem().hasReadLock();
final int numCores = Runtime.getRuntime().availableProcessors();
if (taskQueue.size() >= numCores) {
LOG.debug("Re-encryption handler throttling because queue size {} is"
+ "larger than number of cores {}", taskQueue.size(), numCores);
while (taskQueue.size() >= numCores) {
Thread.sleep(100);
}
}
// 2. if tasks are piling up on the updater, don't create new callables
// until the queue size goes down.
final int maxTasksPiled = Runtime.getRuntime().availableProcessors() * 2;
int numTasks = numTasksSubmitted();
if (numTasks >= maxTasksPiled) {
LOG.debug("Re-encryption handler throttling because total tasks pending"
+ " re-encryption updater is {}", numTasks);
while (numTasks >= maxTasksPiled) {
Thread.sleep(500);
numTasks = numTasksSubmitted();
}
}
// 3.
if (throttleLimitHandlerRatio >= 1.0) {
return;
}
final long expect = (long) (throttleTimerAll.now(TimeUnit.MILLISECONDS)
* throttleLimitHandlerRatio);
final long actual = throttleTimerLocked.now(TimeUnit.MILLISECONDS);
if (LOG.isDebugEnabled()) {
LOG.debug("Re-encryption handler throttling expect: {}, actual: {},"
+ " throttleTimerAll:{}", expect, actual,
throttleTimerAll.now(TimeUnit.MILLISECONDS));
}
if (expect - actual < 0) {
// in case throttleLimitHandlerRatio is very small, expect will be 0.
// so sleepMs should not be calculated from expect, to really meet the
// ratio. e.g. if ratio is 0.001, expect = 0 and actual = 1, sleepMs
// should be 1000 - throttleTimerAll.now()
final long sleepMs = (long) (actual / throttleLimitHandlerRatio)
- throttleTimerAll.now(TimeUnit.MILLISECONDS);
LOG.debug("Throttling re-encryption, sleeping for {} ms", sleepMs);
Thread.sleep(sleepMs);
}
throttleTimerAll.reset().start();
throttleTimerLocked.reset();
} | 3.68 |
hbase_AuthManager_authorizeUserNamespace | /**
* Check if user has given action privilige in namespace scope.
* @param user user name
* @param namespace namespace
* @param action one of action in [Read, Write, Create, Exec, Admin]
* @return true if user has, false otherwise
*/
public boolean authorizeUserNamespace(User user, String namespace, Permission.Action action) {
if (user == null) {
return false;
}
if (authorizeUserGlobal(user, action)) {
return true;
}
PermissionCache<NamespacePermission> nsPermissions =
namespaceCache.getOrDefault(namespace, NS_NO_PERMISSION);
if (authorizeNamespace(nsPermissions.get(user.getShortName()), namespace, action)) {
return true;
}
for (String group : user.getGroupNames()) {
if (authorizeNamespace(nsPermissions.get(AuthUtil.toGroupEntry(group)), namespace, action)) {
return true;
}
}
return false;
} | 3.68 |
morf_ViewURLAsFile_createTempFile | /**
* Wrapper for {@link java.io.File#createTempFile(String, String, File)} that
* wraps any exceptions in a {@link RuntimeException} and propagates it.
*/
private File createTempFile(String prefix, String suffix, File file) {
try {
return File.createTempFile(prefix, suffix, file);
} catch (IOException e) {
throw new RuntimeException("Unable to create temp file", e);
}
} | 3.68 |
hadoop_DistributedCache_setFileTimestamps | /**
* This is to check the timestamp of the files to be localized.
* Used by internal MapReduce code.
* @param conf Configuration which stores the timestamp's
* @param timestamps comma separated list of timestamps of files.
* The order should be the same as the order in which the files are added.
*/
@Deprecated
public static void setFileTimestamps(Configuration conf, String timestamps) {
conf.set(CACHE_FILES_TIMESTAMPS, timestamps);
} | 3.68 |
framework_ConnectorBundleLoader_ensureDeferredBundleLoaded | /**
* Starts loading the deferred bundle if it hasn't already been started.
*
* @since 8.0.3
*/
public void ensureDeferredBundleLoaded() {
if (!isBundleLoaded(DEFERRED_BUNDLE_NAME)) {
loadBundle(DEFERRED_BUNDLE_NAME, new BundleLoadCallback() {
@Override
public void loaded() {
// Nothing to do
}
@Override
public void failed(Throwable reason) {
getLogger().log(Level.SEVERE,
"Error loading deferred bundle", reason);
}
});
}
} | 3.68 |
hadoop_JobBase_getLongValue | /**
*
* @param name
* the counter name
* @return return the value of the given counter.
*/
protected Long getLongValue(Object name) {
return this.longCounters.get(name);
} | 3.68 |
hadoop_OBSBlockOutputStream_clearHFlushOrSync | /**
* Clear for hflush or hsync.
*/
private synchronized void clearHFlushOrSync() {
appendAble.set(true);
multiPartUpload = null;
} | 3.68 |
framework_NullValidator_isNullAllowed | /**
* Returns <code>true</code> if nulls are allowed otherwise
* <code>false</code>.
*/
public final boolean isNullAllowed() {
return onlyNullAllowed;
} | 3.68 |
framework_VAbstractSplitPanel_setSecondWidget | /**
* For internal use only. May be removed or replaced in the future.
*
* @param w
* the widget to set to the second region or {@code null} to
* remove previously set widget
*/
public void setSecondWidget(Widget w) {
if (secondChild == w) {
return;
}
if (secondChild != null) {
secondChild.removeFromParent();
}
if (w != null) {
super.add(w, secondContainer);
}
secondChild = w;
} | 3.68 |
hadoop_ArrayFile_get | /**
* Return the <code>n</code>th value in the file.
* @param n n key.
* @param value value.
* @throws IOException raised on errors performing I/O.
* @return writable.
*/
public synchronized Writable get(long n, Writable value)
throws IOException {
key.set(n);
return get(key, value);
} | 3.68 |
MagicPlugin_CompatibilityUtilsBase_loadChunk | /**
* This will load chunks asynchronously if possible.
*
* <p>But note that it will never be truly asynchronous, it is important not to call this in a tight retry loop,
* the main server thread needs to free up to actually process the async chunk loads.
*/
@Override
public void loadChunk(World world, int x, int z, boolean generate, Consumer<Chunk> consumer) {
PaperUtils paperUtils = platform.getPaperUtils();
if (paperUtils == null) {
Chunk chunk = world.getChunkAt(x, z);
chunk.load();
if (consumer != null) {
consumer.accept(chunk);
}
return;
}
final LoadingChunk loading = new LoadingChunk(world, x, z);
Integer requestCount = loadingChunks.get(loading);
if (requestCount != null) {
requestCount++;
if (requestCount > MAX_CHUNK_LOAD_TRY) {
platform.getLogger().warning("Exceeded retry count for asynchronous chunk load, loading synchronously");
if (!hasDumpedStack) {
hasDumpedStack = true;
Thread.dumpStack();
}
Chunk chunk = world.getChunkAt(x, z);
chunk.load();
if (consumer != null) {
consumer.accept(chunk);
}
loadingChunks.remove(loading);
return;
}
loadingChunks.put(loading, requestCount);
return;
}
loadingChunks.put(loading, 1);
paperUtils.loadChunk(world, x, z, generate, chunk -> {
loadingChunks.remove(loading);
if (consumer != null) {
consumer.accept(chunk);
}
});
} | 3.68 |
hudi_HoodieAvroUtils_bytesToAvro | /**
* Convert serialized bytes back into avro record.
*/
public static GenericRecord bytesToAvro(byte[] bytes, Schema writerSchema, Schema readerSchema) throws IOException {
BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(bytes, BINARY_DECODER.get());
BINARY_DECODER.set(decoder);
GenericDatumReader<GenericRecord> reader = new GenericDatumReader<>(writerSchema, readerSchema);
return reader.read(null, decoder);
} | 3.68 |
hbase_ServerNonceManager_startOperation | /**
* Starts the operation if operation with such nonce has not already succeeded. If the operation
* is in progress, waits for it to end and checks whether it has succeeded.
* @param group Nonce group.
* @param nonce Nonce.
* @param stoppable Stoppable that terminates waiting (if any) when the server is stopped.
* @return true if the operation has not already succeeded and can proceed; false otherwise.
*/
public boolean startOperation(long group, long nonce, Stoppable stoppable)
throws InterruptedException {
if (nonce == HConstants.NO_NONCE) return true;
NonceKey nk = new NonceKey(group, nonce);
OperationContext ctx = new OperationContext();
while (true) {
OperationContext oldResult = nonces.putIfAbsent(nk, ctx);
if (oldResult == null) return true;
// Collision with some operation - should be extremely rare.
synchronized (oldResult) {
int oldState = oldResult.getState();
LOG.debug("Conflict detected by nonce: " + nk + ", " + oldResult);
if (oldState != OperationContext.WAIT) {
return oldState == OperationContext.PROCEED; // operation ended
}
oldResult.setHasWait();
oldResult.wait(this.conflictWaitIterationMs); // operation is still active... wait and loop
if (stoppable.isStopped()) {
throw new InterruptedException("Server stopped");
}
}
}
} | 3.68 |
hadoop_NodeIDsInfo_add | /**
* This method will generate a new NodeIDsInfo object based on the two NodeIDsInfo objects.
* The information to be combined includes the node list (removed duplicate node)
* and partitionInfo object.
*
* @param left left NodeIDsInfo Object.
* @param right right NodeIDsInfo Object.
* @return new NodeIDsInfo Object.
*/
public static NodeIDsInfo add(NodeIDsInfo left, NodeIDsInfo right) {
Set<String> nodes = new HashSet<>();
if (left != null && left.nodeIDsList != null) {
nodes.addAll(left.nodeIDsList);
}
if (right != null && right.nodeIDsList != null) {
nodes.addAll(right.nodeIDsList);
}
PartitionInfo leftPartitionInfo = null;
if (left != null) {
leftPartitionInfo = left.getPartitionInfo();
}
PartitionInfo rightPartitionInfo = null;
if (right != null) {
rightPartitionInfo = right.getPartitionInfo();
}
PartitionInfo info = PartitionInfo.addTo(leftPartitionInfo, rightPartitionInfo);
return new NodeIDsInfo(nodes, info);
} | 3.68 |
framework_HasStyleNames_addStyleNames | /**
* Adds one or more style names to this component by using one or multiple
* parameters.
*
* @since 8.7
* @param styles
* the style name or style names to be added to the component
* @see #addStyleName(String)
* @see #setStyleName(String)
* @see #removeStyleName(String)
*/
default void addStyleNames(String... styles) {
for (String style : styles) {
addStyleName(style);
}
} | 3.68 |
morf_SqlServerDialect_internalTableDeploymentStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#tableDeploymentStatements(org.alfasoftware.morf.metadata.Table)
*/
@Override
public Collection<String> internalTableDeploymentStatements(Table table) {
List<String> statements = new ArrayList<>();
// Create the table deployment statement
StringBuilder createTableStatement = new StringBuilder();
createTableStatement.append("CREATE ");
createTableStatement.append("TABLE ");
createTableStatement.append(schemaNamePrefix());
createTableStatement.append(table.getName());
createTableStatement.append(" (");
boolean first = true;
for (Column column : table.columns()) {
if (!first) {
createTableStatement.append(", ");
}
createTableStatement.append(String.format("[%s] ", column.getName()));
createTableStatement.append(sqlRepresentationOfColumnType(table, column, false));
if (column.isAutoNumbered()) {
int autoNumberStart = column.getAutoNumberStart() == -1 ? 1 : column.getAutoNumberStart();
createTableStatement.append(" IDENTITY(" + autoNumberStart + ", 1)");
}
first = false;
}
List<Column> primaryKeys = primaryKeysForTable(table);
if (!primaryKeys.isEmpty()) {
createTableStatement.append(", ");
createTableStatement.append(buildPrimaryKeyConstraint(table.getName(), namesOfColumns(primaryKeys)));
}
createTableStatement.append(")");
statements.add(createTableStatement.toString());
return statements;
} | 3.68 |
hudi_HoodieKeyLookupHandle_addKey | /**
* Adds the key for look up.
*/
public void addKey(String recordKey) {
// check record key against bloom filter of current file & add to possible keys if needed
if (bloomFilter.mightContain(recordKey)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Record key " + recordKey + " matches bloom filter in " + partitionPathFileIDPair);
}
candidateRecordKeys.add(recordKey);
}
totalKeysChecked++;
} | 3.68 |
hudi_HoodieIngestionService_requestShutdownIfNeeded | /**
* To determine if shutdown should be requested to allow gracefully terminate the ingestion in continuous mode.
* <p>
* Subclasses should implement the logic to make the decision. If the shutdown condition is met, the implementation
* should call {@link #shutdown(boolean)} to indicate the request.
*
* @see PostWriteTerminationStrategy
*/
protected boolean requestShutdownIfNeeded(Option<HoodieData<WriteStatus>> lastWriteStatus) {
return false;
} | 3.68 |
hadoop_MawoConfiguration_readConfigFile | /**
* Find, read, and parse the configuration file.
*
* @return the properties that were found or empty if no file was found
*/
private static Properties readConfigFile() {
Properties properties = new Properties();
// Get property file stream from classpath
LOG.info("Configuration file being loaded: " + CONFIG_FILE
+ ". Found in classpath at "
+ MawoConfiguration.class.getClassLoader().getResource(CONFIG_FILE));
InputStream inputStream = MawoConfiguration.class.getClassLoader()
.getResourceAsStream(CONFIG_FILE);
if (inputStream == null) {
throw new RuntimeException(CONFIG_FILE + " not found in classpath");
}
// load the properties
try {
properties.load(inputStream);
inputStream.close();
} catch (FileNotFoundException fnf) {
LOG.error(
"No configuration file " + CONFIG_FILE + " found in classpath.");
} catch (IOException ie) {
throw new IllegalArgumentException(
"Can't read configuration file " + CONFIG_FILE, ie);
}
return properties;
} | 3.68 |
pulsar_RangeCache_getNumberOfEntries | /**
* Just for testing. Getting the number of entries is very expensive on the conncurrent map
*/
protected long getNumberOfEntries() {
return entries.size();
} | 3.68 |
flink_MimeTypes_getMimeTypeForFileName | /**
* Gets the MIME type for the file with the given name, by extension. This method tries to
* extract the file extension and then use the {@link #getMimeTypeForExtension(String)} to
* determine the MIME type. If the extension cannot be determined, or the extension is
* unrecognized, this method return {@code null}.
*
* @param fileName The file name.
* @return The MIME type, or {@code null}, if the file's extension is not recognized.
*/
public static String getMimeTypeForFileName(String fileName) {
int extensionPos = fileName.lastIndexOf('.');
if (extensionPos >= 1 && extensionPos < fileName.length() - 1) {
String extension = fileName.substring(extensionPos + 1);
return getMimeTypeForExtension(extension);
} else {
return null;
}
} | 3.68 |
framework_VLoadingIndicator_isVisible | /**
* Returns whether or not the loading indicator is showing.
*
* @return true if the loading indicator is visible, false otherwise
*/
public boolean isVisible() {
if (getElement().getStyle().getDisplay()
.equals(Display.NONE.getCssName())) {
return false;
}
return true;
} | 3.68 |
framework_VaadinSession_getCurrent | /**
* Gets the currently used session. The current session is automatically
* defined when processing requests related to the session (see
* {@link ThreadLocal}) and in {@link VaadinSession#access(Runnable)} and
* {@link UI#access(Runnable)}. In other cases, (e.g. from background
* threads, the current session is not automatically defined.
* <p>
* The session is stored using a weak reference to avoid leaking memory in
* case it is not explicitly cleared.
*
* @return the current session instance if available, otherwise
* <code>null</code>
*
* @see #setCurrent(VaadinSession)
*
* @since 7.0
*/
public static VaadinSession getCurrent() {
return CurrentInstance.get(VaadinSession.class);
} | 3.68 |
flink_MapView_putAll | /**
* Inserts all mappings from the specified map to this map view.
*
* @param map The map whose entries are inserted into this map view.
* @throws Exception Thrown if the system cannot access the map.
*/
public void putAll(Map<K, V> map) throws Exception {
this.map.putAll(map);
} | 3.68 |
flink_BlobServer_getPort | /**
* Returns the port on which the server is listening.
*
* @return port on which the server is listening
*/
@Override
public int getPort() {
return this.serverSocket.getLocalPort();
} | 3.68 |
zxing_MinimalECIInput_isFNC1 | /**
* Determines if a value is the FNC1 character
*
* @param index the index of the value
*
* @return true if the value at position {@code index} is the FNC1 character
*
* @throws IndexOutOfBoundsException
* if the {@code index} argument is negative or not less than
* {@code length()}
*/
public boolean isFNC1(int index) {
if (index < 0 || index >= length()) {
throw new IndexOutOfBoundsException("" + index);
}
return bytes[index] == 1000;
} | 3.68 |
flink_InputGate_getChannelInfos | /** Returns the channel infos of this gate. */
public List<InputChannelInfo> getChannelInfos() {
return IntStream.range(0, getNumberOfInputChannels())
.mapToObj(index -> getChannel(index).getChannelInfo())
.collect(Collectors.toList());
} | 3.68 |
framework_AbsoluteLayout_toString | /*
* (non-Javadoc)
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return getCSSString();
} | 3.68 |
hudi_AWSDmsAvroPayload_handleDeleteOperation | /**
*
* Handle a possible delete - check for "D" in Op column and return empty row if found.
* @param insertValue The new row that is being "inserted".
*/
private Option<IndexedRecord> handleDeleteOperation(IndexedRecord insertValue) throws IOException {
boolean delete = false;
if (insertValue instanceof GenericRecord) {
GenericRecord record = (GenericRecord) insertValue;
delete = isDMSDeleteRecord(record);
}
return delete ? Option.empty() : Option.of(insertValue);
} | 3.68 |
flink_StreamProjection_projectTuple24 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>
SingleOutputStreamOperator<
Tuple24<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>>
projectTuple24() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<
Tuple24<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>>
tType =
new TupleTypeInfo<
Tuple24<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>>(fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<
IN,
Tuple24<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
hadoop_FileIoProvider_read | /**
* {@inheritDoc}.
*/
@Override
public int read(@Nonnull byte[] b, int off, int len) throws IOException {
final long begin = profilingEventHook.beforeFileIo(volume, READ, len);
try {
faultInjectorEventHook.beforeFileIo(volume, READ, len);
int numBytesRead = super.read(b, off, len);
profilingEventHook.afterFileIo(volume, READ, begin, numBytesRead);
return numBytesRead;
} catch(Exception e) {
onFailure(volume, begin);
throw e;
}
} | 3.68 |
flink_NullableSerializer_wrap | /**
* This method wraps the {@code originalSerializer} with the {@code NullableSerializer} if not
* already wrapped.
*
* @param originalSerializer serializer to wrap and add {@code null} support
* @param padNullValueIfFixedLen pad null value to preserve the fixed length of original
* serializer
* @return wrapped serializer which supports {@code null} values
*/
public static <T> TypeSerializer<T> wrap(
@Nonnull TypeSerializer<T> originalSerializer, boolean padNullValueIfFixedLen) {
return originalSerializer instanceof NullableSerializer
? originalSerializer
: new NullableSerializer<>(originalSerializer, padNullValueIfFixedLen);
} | 3.68 |
hadoop_AbfsOutputStream_writeAppendBlobCurrentBufferToService | /**
* Appending the current active data block to service. Clearing the active
* data block and releasing all buffered data.
* @throws IOException if there is any failure while starting an upload for
* the dataBlock or while closing the BlockUploadData.
*/
private void writeAppendBlobCurrentBufferToService() throws IOException {
DataBlocks.DataBlock activeBlock = getActiveBlock();
// No data, return.
if (!hasActiveBlockDataToUpload()) {
return;
}
final int bytesLength = activeBlock.dataSize();
DataBlocks.BlockUploadData uploadData = activeBlock.startUpload();
clearActiveBlock();
outputStreamStatistics.writeCurrentBuffer();
outputStreamStatistics.bytesToUpload(bytesLength);
final long offset = position;
position += bytesLength;
AbfsPerfTracker tracker = client.getAbfsPerfTracker();
try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker,
"writeCurrentBufferToService", "append")) {
AppendRequestParameters reqParams = new AppendRequestParameters(offset, 0,
bytesLength, APPEND_MODE, true, leaseId, isExpectHeaderEnabled);
AbfsRestOperation op = client.append(path, uploadData.toByteArray(), reqParams,
cachedSasToken.get(), new TracingContext(tracingContext));
cachedSasToken.update(op.getSasToken());
outputStreamStatistics.uploadSuccessful(bytesLength);
perfInfo.registerResult(op.getResult());
perfInfo.registerSuccess(true);
return;
} catch (Exception ex) {
outputStreamStatistics.uploadFailed(bytesLength);
failureWhileSubmit(ex);
} finally {
IOUtils.close(uploadData);
}
} | 3.68 |
hadoop_Base64_decodeAsByteObjectArray | /**
* Decodes a given Base64 string into its corresponding byte array.
*
* @param data
* the Base64 string, as a <code>String</code> object, to decode
*
* @return the corresponding decoded byte array
* @throws IllegalArgumentException
* If the string is not a valid base64 encoded string
*/
public static Byte[] decodeAsByteObjectArray(final String data) {
int byteArrayLength = 3 * data.length() / 4;
if (data.endsWith("==")) {
byteArrayLength -= 2;
}
else if (data.endsWith("=")) {
byteArrayLength -= 1;
}
final Byte[] retArray = new Byte[byteArrayLength];
int byteDex = 0;
int charDex = 0;
for (; charDex < data.length(); charDex += 4) {
// get 4 chars, convert to 3 bytes
final int char1 = DECODE_64[(byte) data.charAt(charDex)];
final int char2 = DECODE_64[(byte) data.charAt(charDex + 1)];
final int char3 = DECODE_64[(byte) data.charAt(charDex + 2)];
final int char4 = DECODE_64[(byte) data.charAt(charDex + 3)];
if (char1 < 0 || char2 < 0 || char3 == -1 || char4 == -1) {
// invalid character(-1), or bad padding (-2)
throw new IllegalArgumentException("The data parameter is not a valid base64-encoded string.");
}
int tVal = char1 << 18;
tVal += char2 << 12;
tVal += (char3 & 0xff) << 6;
tVal += char4 & 0xff;
if (char3 == -2) {
// two "==" pad chars, check bits 12-24
tVal &= 0x00FFF000;
retArray[byteDex++] = (byte) (tVal >> 16 & 0xFF);
}
else if (char4 == -2) {
// one pad char "=" , check bits 6-24.
tVal &= 0x00FFFFC0;
retArray[byteDex++] = (byte) (tVal >> 16 & 0xFF);
retArray[byteDex++] = (byte) (tVal >> 8 & 0xFF);
}
else {
// No pads take all 3 bytes, bits 0-24
retArray[byteDex++] = (byte) (tVal >> 16 & 0xFF);
retArray[byteDex++] = (byte) (tVal >> 8 & 0xFF);
retArray[byteDex++] = (byte) (tVal & 0xFF);
}
}
return retArray;
} | 3.68 |
framework_ScrollbarBundle_isScrollerVisible | /**
* Checks whether the scroll handle is currently visible or not.
*
* @return <code>true</code> if the scroll handle is currently visible.
* <code>false</code> if not.
*/
public boolean isScrollerVisible() {
return isScrollerVisible;
} | 3.68 |
framework_Button_getRelativeY | /**
* Returns the relative mouse position (y coordinate) when the click
* took place. The position is relative to the clicked component.
*
* @return The mouse cursor y position relative to the clicked layout
* component or -1 if no y coordinate available
*/
public int getRelativeY() {
if (null != details) {
return details.getRelativeY();
} else {
return -1;
}
} | 3.68 |
hibernate-validator_NotEmptyValidatorForArraysOfFloat_isValid | /**
* Checks the array is not {@code null} and not empty.
*
* @param array the array to validate
* @param constraintValidatorContext context in which the constraint is evaluated
* @return returns {@code true} if the array is not {@code null} and the array is not empty
*/
@Override
public boolean isValid(float[] array, ConstraintValidatorContext constraintValidatorContext) {
if ( array == null ) {
return false;
}
return array.length > 0;
} | 3.68 |
querydsl_GenericExporter_setTargetFolder | /**
* Set the target folder for generated sources
*
* @param targetFolder
*/
public void setTargetFolder(File targetFolder) {
this.targetFolder = targetFolder;
} | 3.68 |
hudi_FileSystemBasedLockProvider_getLockConfig | /**
* Returns a filesystem based lock config with given table path.
*/
public static TypedProperties getLockConfig(String tablePath) {
TypedProperties props = new TypedProperties();
props.put(HoodieLockConfig.LOCK_PROVIDER_CLASS_NAME.key(), FileSystemBasedLockProvider.class.getName());
props.put(HoodieLockConfig.LOCK_ACQUIRE_WAIT_TIMEOUT_MS.key(), "2000");
props.put(HoodieLockConfig.FILESYSTEM_LOCK_EXPIRE.key(), "1");
props.put(HoodieLockConfig.LOCK_ACQUIRE_CLIENT_NUM_RETRIES.key(), "30");
props.put(HoodieLockConfig.FILESYSTEM_LOCK_PATH.key(), defaultLockPath(tablePath));
return props;
} | 3.68 |
hadoop_RequestFactoryImpl_withMultipartPartCountLimit | /**
* Multipart limit.
* @param value new value
* @return the builder
*/
public RequestFactoryBuilder withMultipartPartCountLimit(
final long value) {
multipartPartCountLimit = value;
return this;
} | 3.68 |
hbase_PermissionStorage_removeNamespacePermissions | /**
* Remove specified namespace from the acl table.
*/
static void removeNamespacePermissions(Configuration conf, String namespace, Table t)
throws IOException {
Delete d = new Delete(Bytes.toBytes(toNamespaceEntry(namespace)));
d.addFamily(ACL_LIST_FAMILY);
if (LOG.isDebugEnabled()) {
LOG.debug("Removing permissions of removed namespace " + namespace);
}
try {
t.delete(d);
} finally {
t.close();
}
} | 3.68 |
framework_Table_getItemDescriptionGenerator | /**
* Get the item description generator which generates tooltips for cells and
* rows in the Table.
*/
public ItemDescriptionGenerator getItemDescriptionGenerator() {
return itemDescriptionGenerator;
} | 3.68 |
hbase_StoreFileScanner_seekToPreviousRowStateless | /**
* This variant of the {@link StoreFileScanner#seekToPreviousRow(Cell)} method requires two seeks.
* It should be used if the cost for seeking is lower i.e. when using a fast seeking data block
* encoding like RIV1.
*/
private boolean seekToPreviousRowStateless(Cell originalKey) throws IOException {
Cell key = originalKey;
do {
Cell keyAtBeginningOfRow = PrivateCellUtil.createFirstOnRow(key);
if (!seekBefore(keyAtBeginningOfRow)) {
return false;
}
Cell firstKeyOfPreviousRow = PrivateCellUtil.createFirstOnRow(hfs.getCell());
if (!seekAtOrAfter(firstKeyOfPreviousRow)) {
return false;
}
if (isStillAtSeekTargetAfterSkippingNewerKvs(firstKeyOfPreviousRow)) {
return true;
}
key = firstKeyOfPreviousRow;
} while (true);
} | 3.68 |
flink_PekkoRpcActor_handleRpcInvocation | /**
* Handle rpc invocations by looking up the rpc method on the rpc endpoint and calling this
* method with the provided method arguments. If the method has a return value, it is returned
* to the sender of the call.
*
* @param rpcInvocation Rpc invocation message
*/
private void handleRpcInvocation(RpcInvocation rpcInvocation) {
Method rpcMethod = null;
try {
String methodName = rpcInvocation.getMethodName();
Class<?>[] parameterTypes = rpcInvocation.getParameterTypes();
rpcMethod = lookupRpcMethod(methodName, parameterTypes);
} catch (final NoSuchMethodException e) {
log.error("Could not find rpc method for rpc invocation.", e);
RpcConnectionException rpcException =
new RpcConnectionException("Could not find rpc method for rpc invocation.", e);
getSender().tell(new Status.Failure(rpcException), getSelf());
}
if (rpcMethod != null) {
try {
// this supports declaration of anonymous classes
rpcMethod.setAccessible(true);
final Method capturedRpcMethod = rpcMethod;
if (rpcMethod.getReturnType().equals(Void.TYPE)) {
// No return value to send back
runWithContextClassLoader(
() -> capturedRpcMethod.invoke(rpcEndpoint, rpcInvocation.getArgs()),
flinkClassLoader);
} else {
final Object result;
try {
result =
runWithContextClassLoader(
() ->
capturedRpcMethod.invoke(
rpcEndpoint, rpcInvocation.getArgs()),
flinkClassLoader);
} catch (InvocationTargetException e) {
log.debug(
"Reporting back error thrown in remote procedure {}", rpcMethod, e);
// tell the sender about the failure
getSender().tell(new Status.Failure(e.getTargetException()), getSelf());
return;
}
final String methodName = rpcMethod.getName();
final boolean isLocalRpcInvocation =
rpcMethod.getAnnotation(Local.class) != null;
if (result instanceof CompletableFuture) {
final CompletableFuture<?> responseFuture = (CompletableFuture<?>) result;
sendAsyncResponse(responseFuture, methodName, isLocalRpcInvocation);
} else {
sendSyncResponse(result, methodName, isLocalRpcInvocation);
}
}
} catch (Throwable e) {
log.error("Error while executing remote procedure call {}.", rpcMethod, e);
// tell the sender about the failure
getSender().tell(new Status.Failure(e), getSelf());
}
}
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.