name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_MultiRowRangeFilter_parseFrom | /**
* Parse a serialized representation of {@link MultiRowRangeFilter}
* @param pbBytes A pb serialized instance
* @return An instance of {@link MultiRowRangeFilter}
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static MultiRowRangeFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {
FilterProtos.MultiRowRangeFilter proto;
try {
proto = FilterProtos.MultiRowRangeFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
int length = proto.getRowRangeListCount();
List<FilterProtos.RowRange> rangeProtos = proto.getRowRangeListList();
List<RowRange> rangeList = new ArrayList<>(length);
for (FilterProtos.RowRange rangeProto : rangeProtos) {
RowRange range =
new RowRange(rangeProto.hasStartRow() ? rangeProto.getStartRow().toByteArray() : null,
rangeProto.getStartRowInclusive(),
rangeProto.hasStopRow() ? rangeProto.getStopRow().toByteArray() : null,
rangeProto.getStopRowInclusive());
rangeList.add(range);
}
return new MultiRowRangeFilter(rangeList);
} | 3.68 |
hbase_HFileBlock_getNextBlockOnDiskSize | /**
* @return the on-disk size of the next block (including the header size and any checksums if
* present) read by peeking into the next block's header; use as a hint when doing a read
* of the next block when scanning or running over a file.
*/
int getNextBlockOnDiskSize() {
return nextBlockOnDiskSize;
} | 3.68 |
graphhopper_Measurement_writeSummary | /**
* Writes a selection of measurement results to a single line in
* a file. Each run of the measurement class will append a new line.
*/
private void writeSummary(String summaryLocation, String propLocation) {
logger.info("writing summary to " + summaryLocation);
// choose properties that should be in summary here
String[] properties = {
"graph.nodes",
"graph.edges",
"graph.import_time",
CH.PREPARE + "time",
CH.PREPARE + "node.time",
CH.PREPARE + "edge.time",
CH.PREPARE + "node.shortcuts",
CH.PREPARE + "edge.shortcuts",
Landmark.PREPARE + "time",
"routing.distance_mean",
"routing.mean",
"routing.visited_nodes_mean",
"routingCH.distance_mean",
"routingCH.mean",
"routingCH.visited_nodes_mean",
"routingCH_no_instr.mean",
"routingCH_full.mean",
"routingCH_edge.distance_mean",
"routingCH_edge.mean",
"routingCH_edge.visited_nodes_mean",
"routingCH_edge_no_instr.mean",
"routingCH_edge_full.mean",
"routingLM8.distance_mean",
"routingLM8.mean",
"routingLM8.visited_nodes_mean",
"measurement.seed",
"measurement.gitinfo",
"measurement.timestamp"
};
File f = new File(summaryLocation);
boolean writeHeader = !f.exists();
try (FileWriter writer = new FileWriter(f, true)) {
if (writeHeader)
writer.write(getSummaryHeader(properties));
writer.write(getSummaryLogLine(properties, propLocation));
} catch (IOException e) {
logger.error("Could not write summary to file '{}'", summaryLocation, e);
}
} | 3.68 |
hudi_MarkerUtils_writeMarkerTypeToFile | /**
* Writes the marker type to the file `MARKERS.type`.
*
* @param markerType marker type.
* @param fileSystem file system to use.
* @param markerDir marker directory.
*/
public static void writeMarkerTypeToFile(MarkerType markerType, FileSystem fileSystem, String markerDir) {
Path markerTypeFilePath = new Path(markerDir, MARKER_TYPE_FILENAME);
FSDataOutputStream fsDataOutputStream = null;
BufferedWriter bufferedWriter = null;
try {
fsDataOutputStream = fileSystem.create(markerTypeFilePath, false);
bufferedWriter = new BufferedWriter(new OutputStreamWriter(fsDataOutputStream, StandardCharsets.UTF_8));
bufferedWriter.write(markerType.toString());
} catch (IOException e) {
throw new HoodieException("Failed to create marker type file " + markerTypeFilePath.toString()
+ "; " + e.getMessage(), e);
} finally {
closeQuietly(bufferedWriter);
closeQuietly(fsDataOutputStream);
}
} | 3.68 |
framework_InMemoryDataProvider_filteringBy | /**
* Wraps this data provider to create a new data provider that is filtered
* by comparing an item property value to the filter value provided in the
* query.
* <p>
* The predicate receives the property value as the first parameter and the
* query filter value as the second parameter, and should return
* <code>true</code> if the corresponding item should be included. The query
* filter value is never <code>null</code> – all items are included without
* running either callback if the query doesn't define any filter.
*
* @param valueProvider
* a value provider that gets the property value, not
* <code>null</code>
* @param predicate
* a predicate to use for comparing the property value to the
* query filter, not <code>null</code>
*
* @return a data provider that filters accordingly, not <code>null</code>
*/
public default <V, Q> DataProvider<T, Q> filteringBy(
ValueProvider<T, V> valueProvider,
SerializableBiPredicate<V, Q> predicate) {
Objects.requireNonNull(valueProvider, "Value provider cannot be null");
Objects.requireNonNull(predicate, "Predicate cannot be null");
return filteringBy((item, filterValue) -> predicate
.test(valueProvider.apply(item), filterValue));
} | 3.68 |
flink_FlinkPipelineTranslationUtil_getJobGraph | /** Transmogrifies the given {@link Pipeline} to a {@link JobGraph}. */
public static JobGraph getJobGraph(
ClassLoader userClassloader,
Pipeline pipeline,
Configuration optimizerConfiguration,
int defaultParallelism) {
FlinkPipelineTranslator pipelineTranslator =
getPipelineTranslator(userClassloader, pipeline);
return pipelineTranslator.translateToJobGraph(
pipeline, optimizerConfiguration, defaultParallelism);
} | 3.68 |
hadoop_PlacementConstraint_minCardinality | /**
* When placement type is cardinality, the minimum number of containers of the
* depending component that a host should have, where containers of this
* component can be allocated on.
**/
public PlacementConstraint minCardinality(Long minCardinality) {
this.minCardinality = minCardinality;
return this;
} | 3.68 |
hudi_HoodieRepairTool_listFilesFromBasePath | /**
* Lists all Hoodie files from the table base path.
*
* @param context {@link HoodieEngineContext} instance.
* @param basePathStr Table base path.
* @param expectedLevel Expected level in the directory hierarchy to include the file status.
* @param parallelism Parallelism for the file listing.
* @return A list of absolute file paths of all Hoodie files.
* @throws IOException upon errors.
*/
static List<String> listFilesFromBasePath(
HoodieEngineContext context, String basePathStr, int expectedLevel, int parallelism) {
FileSystem fs = FSUtils.getFs(basePathStr, context.getHadoopConf().get());
Path basePath = new Path(basePathStr);
return FSUtils.getFileStatusAtLevel(
context, fs, basePath, expectedLevel, parallelism).stream()
.filter(fileStatus -> {
if (!fileStatus.isFile()) {
return false;
}
return FSUtils.isDataFile(fileStatus.getPath());
})
.map(fileStatus -> fileStatus.getPath().toString())
.collect(Collectors.toList());
} | 3.68 |
hadoop_MoveStep_getSizeString | /**
* Returns human readable move sizes.
*
* @param size - bytes being moved.
* @return String
*/
@Override
public String getSizeString(long size) {
return StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1);
} | 3.68 |
open-banking-gateway_Xs2aConsentInfo_isEmbeddedPreAuthNeeded | /**
* Is the Oauth2 pre-step or authorization required
*/
public boolean isEmbeddedPreAuthNeeded(Xs2aContext ctx) {
return ctx.isEmbeddedPreAuthNeeded();
} | 3.68 |
hudi_CompactionUtil_setAvroSchema | /**
* Sets up the avro schema string into the HoodieWriteConfig {@code HoodieWriteConfig}
* through reading from the hoodie table metadata.
*
* @param writeConfig The HoodieWriteConfig
*/
public static void setAvroSchema(HoodieWriteConfig writeConfig, HoodieTableMetaClient metaClient) throws Exception {
TableSchemaResolver tableSchemaResolver = new TableSchemaResolver(metaClient);
Schema tableAvroSchema = tableSchemaResolver.getTableAvroSchema(false);
writeConfig.setSchema(tableAvroSchema.toString());
} | 3.68 |
shardingsphere-elasticjob_JobTracingEventBus_post | /**
* Post event.
*
* @param event job event
*/
public void post(final JobEvent event) {
if (isRegistered && !EXECUTOR_SERVICE.isShutdown()) {
eventBus.post(event);
}
} | 3.68 |
flink_OrcShim_defaultShim | /** Default with orc dependent, we should use v2.3.0. */
static OrcShim<VectorizedRowBatch> defaultShim() {
return new OrcShimV230();
} | 3.68 |
framework_GridDropTarget_setDropThreshold | /**
* Sets the threshold between drop locations from the top and the bottom of
* a row in pixels.
* <p>
* Dropping an element
* <ul>
* <li>within {@code threshold} pixels from the top of a row results in a
* drop event with {@link com.vaadin.shared.ui.grid.DropLocation#ABOVE
* DropLocation.ABOVE}</li>
* <li>within {@code threshold} pixels from the bottom of a row results in a
* drop event with {@link com.vaadin.shared.ui.grid.DropLocation#BELOW
* DropLocation.BELOW}</li>
* <li>anywhere else within the row results in a drop event with
* {@link com.vaadin.shared.ui.grid.DropLocation#ON_TOP
* DropLocation.ON_TOP}</li>
* </ul>
* The value only has an effect when drop mode is set to
* {@link DropMode#ON_TOP_OR_BETWEEN}.
* <p>
* Default is 5 pixels.
*
* @param threshold
* The threshold from the top and bottom of the row in pixels.
*/
public void setDropThreshold(int threshold) {
getState().dropThreshold = threshold;
} | 3.68 |
flink_ChangelogTruncateHelper_materialized | /**
* Handle changelog materialization, potentially {@link #truncate() truncating} the changelog.
*
* @param upTo exclusive
*/
public void materialized(SequenceNumber upTo) {
materializedUpTo = upTo;
truncate();
} | 3.68 |
flink_RemoteStorageScanner_scanMaxSegmentId | /**
* Scan the max segment id of segment files for the specific partition and subpartition. The max
* segment id can be obtained from a file named by max segment id.
*
* @param partitionId the partition id.
* @param subpartitionId the subpartition id.
*/
private void scanMaxSegmentId(
TieredStoragePartitionId partitionId, TieredStorageSubpartitionId subpartitionId) {
Path segmentFinishDir =
getSegmentFinishDirPath(
baseRemoteStoragePath, partitionId, subpartitionId.getSubpartitionId());
FileStatus[] fileStatuses = new FileStatus[0];
try {
if (!remoteFileSystem.exists(segmentFinishDir)) {
return;
}
fileStatuses = remoteFileSystem.listStatus(segmentFinishDir);
currentRetryTime = 0;
} catch (Throwable t) {
if (t instanceof java.io.FileNotFoundException) {
return;
}
currentRetryTime++;
tryThrowException(t, "Failed to list the segment finish file.");
}
if (fileStatuses.length != 1) {
return;
}
scannedMaxSegmentIds.put(
Tuple2.of(partitionId, subpartitionId),
Integer.parseInt(fileStatuses[0].getPath().getName()));
} | 3.68 |
hbase_ServerNonceManager_reportOperationFromWal | /**
* Reports the operation from WAL during replay.
* @param group Nonce group.
* @param nonce Nonce.
* @param writeTime Entry write time, used to ignore entries that are too old.
*/
public void reportOperationFromWal(long group, long nonce, long writeTime) {
if (nonce == HConstants.NO_NONCE) return;
// Give the write time some slack in case the clocks are not synchronized.
long now = EnvironmentEdgeManager.currentTime();
if (now > writeTime + (deleteNonceGracePeriod * 1.5)) return;
OperationContext newResult = new OperationContext();
newResult.setState(OperationContext.DONT_PROCEED);
NonceKey nk = new NonceKey(group, nonce);
OperationContext oldResult = nonces.putIfAbsent(nk, newResult);
if (oldResult != null) {
// Some schemes can have collisions (for example, expiring hashes), so just log it.
// We have no idea about the semantics here, so this is the least of many evils.
LOG.warn(
"Nonce collision during WAL recovery: " + nk + ", " + oldResult + " with " + newResult);
}
} | 3.68 |
pulsar_BrokerMonitor_initMessageRow | // Helper method to initialize rows which hold message data.
private static void initMessageRow(final Object[] row, final double messageRateIn, final double messageRateOut,
final double messageThroughputIn, final double messageThroughputOut) {
initRow(row, messageRateIn, messageRateOut, messageRateIn + messageRateOut,
messageThroughputIn / 1024,
messageThroughputOut / 1024, (messageThroughputIn + messageThroughputOut) / 1024);
} | 3.68 |
hudi_LSMTimelineWriter_getOrCreateWriterConfig | /**
* Get or create a writer config for parquet writer.
*/
private HoodieWriteConfig getOrCreateWriterConfig() {
if (this.writeConfig == null) {
this.writeConfig = HoodieWriteConfig.newBuilder()
.withProperties(this.config.getProps())
.withPopulateMetaFields(false).build();
}
return this.writeConfig;
} | 3.68 |
graphhopper_ArrayUtil_iota | /**
* Creates an IntArrayList filled with the integers 0,1,2,3,...,size-1
*/
public static IntArrayList iota(int size) {
return range(0, size);
} | 3.68 |
hadoop_ClusterMetrics_getTaskTrackerCount | /**
* Get the number of active trackers in the cluster.
*
* @return active tracker count.
*/
public int getTaskTrackerCount() {
return numTrackers;
} | 3.68 |
pulsar_WebSocketWebResource_validateSuperUserAccess | /**
* Checks whether the user has Pulsar Super-User access to the system.
*
* @throws RestException
* if not authorized
*/
protected void validateSuperUserAccess() {
if (service().getConfig().isAuthenticationEnabled()) {
String appId = clientAppId();
if (log.isDebugEnabled()) {
log.debug("[{}] Check super user access: Authenticated: {} -- Role: {}", uri.getRequestUri(),
clientAppId(), appId);
}
if (!service().getConfig().getSuperUserRoles().contains(appId)) {
throw new RestException(Status.UNAUTHORIZED, "This operation requires super-user access");
}
}
} | 3.68 |
hadoop_CachingGetSpaceUsed_getUsed | /**
* @return an estimate of space used in the directory path.
*/
@Override public long getUsed() throws IOException {
return Math.max(used.get(), 0);
} | 3.68 |
flink_CompactingHashTable_getMaxPartition | /** @return number of memory segments in the largest partition */
private int getMaxPartition() {
int maxPartition = 0;
for (InMemoryPartition<T> p1 : this.partitions) {
if (p1.getBlockCount() > maxPartition) {
maxPartition = p1.getBlockCount();
}
}
return maxPartition;
} | 3.68 |
hbase_ProcedureEvent_wakeInternal | /**
* Only to be used by ProcedureScheduler implementations. Reason: To wake up multiple events,
* locking sequence is schedLock --> synchronized (event) To wake up an event, both schedLock()
* and synchronized(event) are required. The order is schedLock() --> synchronized(event) because
* when waking up multiple events simultaneously, we keep the scheduler locked until all
* procedures suspended on these events have been added back to the queue (Maybe it's not
* required? Evaluate!) To avoid deadlocks, we want to keep the locking order same even when
* waking up single event. That's why, {@link #wake(AbstractProcedureScheduler)} above uses the
* same code path as used when waking up multiple events. Access should remain package-private.
*/
public synchronized void wakeInternal(AbstractProcedureScheduler procedureScheduler) {
if (ready && !suspendedProcedures.isEmpty()) {
LOG.warn("Found procedures suspended in a ready event! Size=" + suspendedProcedures.size());
}
ready = true;
if (LOG.isTraceEnabled()) {
LOG.trace("Unsuspend " + toString());
}
// wakeProcedure adds to the front of queue, so we start from last in the
// waitQueue' queue, so that the procedure which was added first goes in the front for
// the scheduler queue.
procedureScheduler.addFront(suspendedProcedures.descendingIterator());
suspendedProcedures.clear();
} | 3.68 |
hadoop_MountResponse_writeMountList | /**
* Response for RPC call {@link MountInterface.MNTPROC#DUMP}.
* @param xdr XDR message object
* @param xid transaction id
* @param mounts mount entries
* @return response XDR
*/
public static XDR writeMountList(XDR xdr, int xid, List<MountEntry> mounts) {
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
for (MountEntry mountEntry : mounts) {
xdr.writeBoolean(true); // Value follows yes
xdr.writeString(mountEntry.getHost());
xdr.writeString(mountEntry.getPath());
}
xdr.writeBoolean(false); // Value follows no
return xdr;
} | 3.68 |
pulsar_EventLoopUtil_getClientSocketChannelClass | /**
* Return a SocketChannel class suitable for the given EventLoopGroup implementation.
*
* @param eventLoopGroup
* @return
*/
public static Class<? extends SocketChannel> getClientSocketChannelClass(EventLoopGroup eventLoopGroup) {
if (eventLoopGroup instanceof IOUringEventLoopGroup) {
return IOUringSocketChannel.class;
} else if (eventLoopGroup instanceof EpollEventLoopGroup) {
return EpollSocketChannel.class;
} else {
return NioSocketChannel.class;
}
} | 3.68 |
hbase_AggregateImplementation_getAvg | /**
* Gives a Pair with first object as Sum and second object as row count, computed for a given
* combination of column qualifier and column family in the given row range as defined in the Scan
* object. In its current implementation, it takes one column family and one column qualifier (if
* provided). In case of null column qualifier, an aggregate sum over all the entire column family
* will be returned.
* <p>
* The average is computed in AggregationClient#avg(byte[], ColumnInterpreter, Scan) by processing
* results from all regions, so its "ok" to pass sum and a Long type.
*/
@Override
public void getAvg(RpcController controller, AggregateRequest request,
RpcCallback<AggregateResponse> done) {
AggregateResponse response = null;
InternalScanner scanner = null;
try {
ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
S sumVal = null;
Long rowCountVal = 0L;
Scan scan = ProtobufUtil.toScan(request.getScan());
scanner = env.getRegion().getScanner(scan);
byte[] colFamily = scan.getFamilies()[0];
NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
byte[] qualifier = null;
if (qualifiers != null && !qualifiers.isEmpty()) {
qualifier = qualifiers.pollFirst();
}
List<Cell> results = new ArrayList<>();
boolean hasMoreRows = false;
do {
results.clear();
hasMoreRows = scanner.next(results);
int listSize = results.size();
for (int i = 0; i < listSize; i++) {
sumVal =
ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, results.get(i))));
}
rowCountVal++;
} while (hasMoreRows);
if (sumVal != null) {
ByteString first = ci.getProtoForPromotedType(sumVal).toByteString();
AggregateResponse.Builder pair = AggregateResponse.newBuilder();
pair.addFirstPart(first);
ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
bb.rewind();
pair.setSecondPart(ByteString.copyFrom(bb));
response = pair.build();
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
} finally {
if (scanner != null) {
IOUtils.closeQuietly(scanner);
}
}
done.run(response);
} | 3.68 |
framework_AbstractDateField_getParseErrorMessage | /**
* Return the error message that is shown if the user inputted value can't
* be parsed into a Date object. If
* {@link #handleUnparsableDateString(String)} is overridden and it throws a
* custom exception, the message returned by
* {@link Exception#getLocalizedMessage()} will be used instead of the value
* returned by this method.
*
* @see #setParseErrorMessage(String)
*
* @return the error message that the DateField uses when it can't parse the
* textual input from user to a Date object
*/
public String getParseErrorMessage() {
return defaultParseErrorMessage;
} | 3.68 |
flink_AsyncFunction_timeout | /**
* {@link AsyncFunction#asyncInvoke} timeout occurred. By default, the result future is
* exceptionally completed with a timeout exception.
*
* @param input element coming from an upstream task
* @param resultFuture to be completed with the result data
*/
default void timeout(IN input, ResultFuture<OUT> resultFuture) throws Exception {
resultFuture.completeExceptionally(
new TimeoutException("Async function call has timed out."));
} | 3.68 |
hbase_ZKWatcher_getMetaReplicaNodes | /**
* Get the znodes corresponding to the meta replicas from ZK
* @return list of znodes
* @throws KeeperException if a ZooKeeper operation fails
*/
public List<String> getMetaReplicaNodes() throws KeeperException {
List<String> childrenOfBaseNode = ZKUtil.listChildrenNoWatch(this, znodePaths.baseZNode);
return filterMetaReplicaNodes(childrenOfBaseNode);
} | 3.68 |
morf_AbstractSqlDialectTest_testRandom | /**
* Tests the random function
*/
@Test
public void testRandom() {
String result = testDialect.convertStatementToSQL(select(random()).from(tableRef("NEW1")));
assertEquals("Random script should match expected", "SELECT " + expectedRandomFunction() + " FROM " + tableName("NEW1"), result);
} | 3.68 |
flink_SelectByMaxFunction_reduce | /**
* Reduce implementation, returns bigger tuple or value1 if both tuples are equal. Comparison
* highly depends on the order and amount of fields chosen as indices. All given fields (at
* construction time) are checked in the same order as defined (at construction time). If both
* tuples are equal in one index, the next index is compared. Or if no next index is available
* value1 is returned. The tuple which has a bigger value at one index will be returned.
*/
@SuppressWarnings({"unchecked", "rawtypes"})
@Override
public T reduce(T value1, T value2) throws Exception {
for (int index = 0; index < fields.length; index++) {
// Save position of compared key
int position = this.fields[index];
// Get both values - both implement comparable
Comparable comparable1 = value1.getFieldNotNull(position);
Comparable comparable2 = value2.getFieldNotNull(position);
// Compare values
int comp = comparable1.compareTo(comparable2);
// If comp is bigger than 0 comparable 1 is bigger.
// Return the smaller value.
if (comp > 0) {
return value1;
} else if (comp < 0) {
return value2;
}
}
return value1;
} | 3.68 |
graphhopper_WaySegmentParser_setWayFilter | /**
* @param wayFilter return true for OSM ways that should be considered and false otherwise
*/
public Builder setWayFilter(Predicate<ReaderWay> wayFilter) {
waySegmentParser.wayFilter = wayFilter;
return this;
} | 3.68 |
dubbo_ReferenceBeanManager_initReferenceBean | /**
* NOTE: This method should only call after all dubbo config beans and all property resolvers is loaded.
*
* @param referenceBean
* @throws Exception
*/
public synchronized void initReferenceBean(ReferenceBean referenceBean) throws Exception {
if (referenceBean.getReferenceConfig() != null) {
return;
}
// TOTO check same unique service name but difference reference key (means difference attributes).
// reference key
String referenceKey = getReferenceKeyByBeanName(referenceBean.getId());
if (StringUtils.isEmpty(referenceKey)) {
referenceKey = ReferenceBeanSupport.generateReferenceKey(referenceBean, applicationContext);
}
ReferenceConfig referenceConfig = referenceConfigMap.get(referenceKey);
if (referenceConfig == null) {
// create real ReferenceConfig
Map<String, Object> referenceAttributes = ReferenceBeanSupport.getReferenceAttributes(referenceBean);
referenceConfig = ReferenceCreator.create(referenceAttributes, applicationContext)
.defaultInterfaceClass(referenceBean.getObjectType())
.build();
// set id if it is not a generated name
if (referenceBean.getId() != null && !referenceBean.getId().contains("#")) {
referenceConfig.setId(referenceBean.getId());
}
// cache referenceConfig
referenceConfigMap.put(referenceKey, referenceConfig);
// register ReferenceConfig
moduleModel.getConfigManager().addReference(referenceConfig);
moduleModel.getDeployer().setPending();
}
// associate referenceConfig to referenceBean
referenceBean.setKeyAndReferenceConfig(referenceKey, referenceConfig);
} | 3.68 |
hudi_BaseHoodieTableServiceClient_isPreCommitRequired | /**
* Some writers use SparkAllowUpdateStrategy and treat replacecommit plan as revocable plan.
* In those cases, their ConflictResolutionStrategy implementation should run conflict resolution
* even for clustering operations.
*
* @return boolean
*/
protected boolean isPreCommitRequired() {
return this.config.getWriteConflictResolutionStrategy().isPreCommitRequired();
} | 3.68 |
flink_ExecutionEnvironment_readFileOfPrimitives | /**
* Creates a {@link DataSet} that represents the primitive type produced by reading the given
* file in delimited way. This method is similar to {@link #readCsvFile(String)} with single
* field, but it produces a DataSet not through {@link org.apache.flink.api.java.tuple.Tuple1}.
*
* @param filePath The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path").
* @param delimiter The delimiter of the given file.
* @param typeClass The primitive type class to be read.
* @return A {@link DataSet} that represents the data read from the given file as primitive
* type.
*/
public <X> DataSource<X> readFileOfPrimitives(
String filePath, String delimiter, Class<X> typeClass) {
Preconditions.checkNotNull(filePath, "The file path may not be null.");
return new DataSource<>(
this,
new PrimitiveInputFormat<>(new Path(filePath), delimiter, typeClass),
TypeExtractor.getForClass(typeClass),
Utils.getCallLocationName());
} | 3.68 |
hadoop_ActiveAuditManagerS3A_switchToActiveSpan | /**
* Switch to a given span. If it is null, use the
* unbounded span.
* @param span to switch to; may be null
* @return the span switched to
*/
private WrappingAuditSpan switchToActiveSpan(WrappingAuditSpan span) {
if (span != null && span.isValidSpan()) {
activeSpanMap.setForCurrentThread(span);
} else {
activeSpanMap.removeForCurrentThread();
}
return activeSpan();
} | 3.68 |
framework_DefaultEditorEventHandler_findNextEditableColumnIndex | /**
* Finds index of the first editable column, starting at the specified
* index.
*
* @param grid
* the current grid, not null.
* @param startingWith
* start with this column. Index into the
* {@link Grid#getVisibleColumns()}.
* @return the index of the nearest visible column; may return the
* <code>startingWith</code> itself. Returns -1 if there is no such
* column.
*/
protected int findNextEditableColumnIndex(Grid<T> grid, int startingWith) {
final List<Grid.Column<?, T>> columns = grid.getVisibleColumns();
for (int i = startingWith; i < columns.size(); i++) {
if (isEditable(grid, columns.get(i))) {
return i;
}
}
return -1;
} | 3.68 |
hbase_HStoreFile_getPreadScanner | /**
* Get a scanner which uses pread.
* <p>
* Must be called after initReader.
*/
public StoreFileScanner getPreadScanner(boolean cacheBlocks, long readPt, long scannerOrder,
boolean canOptimizeForNonNullColumn) {
return getReader().getStoreFileScanner(cacheBlocks, true, false, readPt, scannerOrder,
canOptimizeForNonNullColumn);
} | 3.68 |
hadoop_Sets_newConcurrentHashSet | /**
* Creates a thread-safe set backed by a hash map. The set is backed by a
* {@link ConcurrentHashMap} instance, and thus carries the same concurrency
* guarantees.
*
* <p>Unlike {@code HashSet}, this class does NOT allow {@code null} to be
* used as an element. The set is serializable.
*
* @param <E> Generics Type.
* @return a new, empty thread-safe {@code Set}
*/
public static <E> Set<E> newConcurrentHashSet() {
return Collections.newSetFromMap(new ConcurrentHashMap<E, Boolean>());
} | 3.68 |
framework_Form_getErrorMessage | /**
* The error message of a Form is the error of the first field with a
* non-empty error.
*
* Empty error messages of the contained fields are skipped, because an
* empty error indicator would be confusing to the user, especially if there
* are errors that have something to display. This is also the reason why
* the calculation of the error message is separate from validation, because
* validation fails also on empty errors.
*/
@Override
public ErrorMessage getErrorMessage() {
// Reimplement the checking of validation error by using
// getErrorMessage() recursively instead of validate().
ErrorMessage validationError = null;
if (isValidationVisible()) {
for (final Object id : propertyIds) {
Object f = fields.get(id);
if (f instanceof AbstractComponent) {
AbstractComponent field = (AbstractComponent) f;
validationError = field.getErrorMessage();
if (validationError != null) {
// Show caption as error for fields with empty errors
if ("".equals(validationError.toString())) {
validationError = new UserError(field.getCaption());
}
break;
} else if (f instanceof Field
&& !((Field<?>) f).isValid()) {
// Something is wrong with the field, but no proper
// error is given. Generate one.
validationError = new UserError(field.getCaption());
break;
}
}
}
}
// Return if there are no errors at all
if (getComponentError() == null && validationError == null
&& currentBufferedSourceException == null) {
return null;
}
// Throw combination of the error types
return new CompositeErrorMessage(
new ErrorMessage[] { getComponentError(), validationError,
AbstractErrorMessage.getErrorMessageForException(
currentBufferedSourceException) });
} | 3.68 |
dubbo_FileCacheStoreFactory_getFile | /**
* Get a file object for the given name
*
* @param name the file name
* @return a file object
*/
private static FileCacheStore getFile(String name, boolean enableFileCache) {
if (!enableFileCache) {
return FileCacheStore.Empty.getInstance(name);
}
try {
FileCacheStore.Builder builder = FileCacheStore.newBuilder();
tryFileLock(builder, name);
File file = new File(name);
if (!file.exists()) {
Path pathObjectOfFile = file.toPath();
Files.createFile(pathObjectOfFile);
}
builder.cacheFilePath(name).cacheFile(file);
return builder.build();
} catch (Throwable t) {
logger.warn(
COMMON_CACHE_PATH_INACCESSIBLE,
"inaccessible of cache path",
"",
"Failed to create file store cache. Local file cache will be disabled. Cache file name: " + name,
t);
return FileCacheStore.Empty.getInstance(name);
}
} | 3.68 |
morf_DataSetHomology_convertToUppercase | /**
* Converts each string from the source collection to uppercase in a new collection.
*
* @param source the source collection
* @return a new collection containing only uppercase strings
*/
private Set<String> convertToUppercase(final Collection<String> source) {
return source.stream().map(String::toUpperCase).collect(Collectors.toSet());
} | 3.68 |
zxing_WhiteRectangleDetector_containsBlackPoint | /**
* Determines whether a segment contains a black point
*
* @param a min value of the scanned coordinate
* @param b max value of the scanned coordinate
* @param fixed value of fixed coordinate
* @param horizontal set to true if scan must be horizontal, false if vertical
* @return true if a black point has been found, else false.
*/
private boolean containsBlackPoint(int a, int b, int fixed, boolean horizontal) {
if (horizontal) {
for (int x = a; x <= b; x++) {
if (image.get(x, fixed)) {
return true;
}
}
} else {
for (int y = a; y <= b; y++) {
if (image.get(fixed, y)) {
return true;
}
}
}
return false;
} | 3.68 |
morf_ChangelogBuilder_withOutputTo | /**
* Set the {@link PrintWriter} target for this changelog. Default is the
* console.
*
* @param outputStream The {@link PrintWriter} to output to.
* @return This builder for chaining
*/
public ChangelogBuilder withOutputTo(PrintWriter outputStream) {
this.outputStream = outputStream;
return this;
} | 3.68 |
hibernate-validator_ValueExtractorResolver_getMaximallySpecificValueExtractorForAllContainerElements | /**
* Used to determine if the passed runtime type is a container and if so return a corresponding maximally specific
* value extractor.
* <p>
* Obviously, it only works if there's only one value extractor corresponding to the runtime type as we don't
* precise any type parameter.
* <p>
* There is a special case: when the passed type is assignable to a {@link Map}, the {@link MapValueExtractor} will
* be returned. This is required by the Bean Validation specification.
* <p>
* Used for cascading validation when the {@code @Valid} annotation is placed on the whole container.
*
* @throws ConstraintDeclarationException if more than 2 maximally specific container-element-compliant value extractors are found
*/
public ValueExtractorDescriptor getMaximallySpecificValueExtractorForAllContainerElements(Class<?> runtimeType, Set<ValueExtractorDescriptor> potentialValueExtractorDescriptors) {
// if it's a Map assignable type, it gets a special treatment to conform to the Bean Validation specification
if ( TypeHelper.isAssignable( Map.class, runtimeType ) ) {
return MapValueExtractor.DESCRIPTOR;
}
return getUniqueValueExtractorOrThrowException( runtimeType, getRuntimeCompliantValueExtractors( runtimeType, potentialValueExtractorDescriptors ) );
} | 3.68 |
hbase_AsyncTable_existsAll | /**
* A simple version for batch exists. It will fail if there are any failures and you will get the
* whole result boolean list at once if the operation is succeeded.
* @param gets the Gets
* @return A {@link CompletableFuture} that wrapper the result boolean list.
*/
default CompletableFuture<List<Boolean>> existsAll(List<Get> gets) {
return allOf(exists(gets));
} | 3.68 |
hbase_KeyValueUtil_ensureKeyValue | /*************** misc **********************************/
/**
* @return <code>cell</code> if it is an object of class {@link KeyValue} else we will return a
* new {@link KeyValue} instance made from <code>cell</code> Note: Even if the cell is an
* object of any of the subclass of {@link KeyValue}, we will create a new
* {@link KeyValue} object wrapping same buffer. This API is used only with MR based tools
* which expect the type to be exactly KeyValue. That is the reason for doing this way.
* @deprecated without any replacement.
*/
@Deprecated
public static KeyValue ensureKeyValue(final Cell cell) {
if (cell == null) return null;
if (cell instanceof KeyValue) {
if (cell.getClass().getName().equals(KeyValue.class.getName())) {
return (KeyValue) cell;
}
// Cell is an Object of any of the sub classes of KeyValue. Make a new KeyValue wrapping the
// same byte[]
KeyValue kv = (KeyValue) cell;
KeyValue newKv = new KeyValue(kv.bytes, kv.offset, kv.length);
newKv.setSequenceId(kv.getSequenceId());
return newKv;
}
return copyToNewKeyValue(cell);
} | 3.68 |
zxing_Detector_getFirstDifferent | /**
* Gets the coordinate of the first point with a different color in the given direction
*/
private Point getFirstDifferent(Point init, boolean color, int dx, int dy) {
int x = init.getX() + dx;
int y = init.getY() + dy;
while (isValid(x, y) && image.get(x, y) == color) {
x += dx;
y += dy;
}
x -= dx;
y -= dy;
while (isValid(x, y) && image.get(x, y) == color) {
x += dx;
}
x -= dx;
while (isValid(x, y) && image.get(x, y) == color) {
y += dy;
}
y -= dy;
return new Point(x, y);
} | 3.68 |
hadoop_AMRMClientAsyncImpl_addContainerRequest | /**
* Request containers for resources before calling <code>allocate</code>
* @param req Resource request
*/
public void addContainerRequest(T req) {
client.addContainerRequest(req);
} | 3.68 |
morf_NamedParameterPreparedStatement_parse | /**
* Parses a SQL query with named parameters. The parameter-index mappings are extracted
* and stored in a map, and the parsed query with parameter placeholders is returned.
*
* @param query The SQL query to parse, which may contain named parameters.
* @return The parsed SQL query with named parameters replaced by placeholders.
*/
private String parse(String query) {
int length = query.length();
StringBuffer parsedQuery = new StringBuffer(length);
boolean inSingleQuote = false; // Tracks if inside a single-quoted string
boolean inDoubleQuote = false; // Tracks if inside a double-quoted string
boolean inComment = false; // Tracks if inside a SQL comment
int index = 1; // Index for parameter placeholders
for (int i = 0; i < length; i++) {
char c = query.charAt(i);
if (inComment) {
// If inside a SQL comment, skip until the end of the line
if (c == '\n') {
inComment = false; // End of SQL comment
}
} else if (inSingleQuote && c == '\'') {
inSingleQuote = false; // End of single-quoted string
} else if (inDoubleQuote && c == '"') {
inDoubleQuote = false; // End of double-quoted string
} else if (!inSingleQuote && !inDoubleQuote) {
if (c == '\'') {
inSingleQuote = true; // Start of single-quoted string
} else if (c == '"') {
inDoubleQuote = true; // Start of double-quoted string
} else if (c == '-' && i + 1 < length && query.charAt(i + 1) == '-') {
inComment = true; // Start of SQL comment
} else if (c == ':' && i + 1 < length && Character.isJavaIdentifierStart(query.charAt(i + 1))) {
int j = i + 2;
while (j < length && Character.isJavaIdentifierPart(query.charAt(j))) {
j++;
}
String name = query.substring(i + 1, j);
c = '?'; // Replace the parameter with question mark
//CHECKSTYLE:OFF ModifiedControlVariableCheck
i += name.length(); // Skip past the end of the parameter
//CHECKSTYLE:ON:
List<Integer> indexList = indexMap.get(name);
if (indexList == null) {
indexList = Lists.newArrayList();
indexMap.put(name, indexList);
}
indexList.add(index);
index++;
}
}
parsedQuery.append(c);
}
return parsedQuery.toString();
} | 3.68 |
morf_SchemaHomology_checkColumn | /**
* @param tableName The table which contains the columns. Can be null, in which case the errors don't contain the table name.
* @param column1 First column to compare.
* @param column2 Second column to compare.
*/
private void checkColumn(String tableName, Column column1, Column column2) {
matches("Column name", column1.getUpperCaseName(), column2.getUpperCaseName());
String prefix = "Column [" + column1.getName() + "] " + (tableName == null ? "" : "on table [" + tableName + "] ");
matches(prefix + "data type", column1.getType(), column2.getType());
matches(prefix + "nullable", column1.isNullable(), column2.isNullable());
matches(prefix + "primary key", column1.isPrimaryKey(), column2.isPrimaryKey());
matches(prefix + "default value", column1.getDefaultValue(), column2.getDefaultValue());
matches(prefix + "autonumber", column1.isAutoNumbered(), column2.isAutoNumbered());
if (column1.isAutoNumbered()) {
matches(prefix + "autonumber start", column1.getAutoNumberStart(), column2.getAutoNumberStart());
}
if (column1.getType().hasWidth()) {
matches(prefix + "width", column1.getWidth(), column2.getWidth());
}
if (column1.getType().hasScale()) {
matches(prefix + "scale", column1.getScale(), column2.getScale());
}
} | 3.68 |
flink_Configuration_setInteger | /**
* Adds the given value to the configuration object. The main key of the config option will be
* used to map the value.
*
* @param key the option specifying the key to be added
* @param value the value of the key/value pair to be added
*/
@PublicEvolving
public void setInteger(ConfigOption<Integer> key, int value) {
setValueInternal(key.key(), value);
} | 3.68 |
hbase_BaseLoadBalancer_randomAssignment | /**
* Used to assign a single region to a random server.
*/
private ServerName randomAssignment(BalancerClusterState cluster, RegionInfo regionInfo,
List<ServerName> servers) {
int numServers = servers.size(); // servers is not null, numServers > 1
ServerName sn = null;
final int maxIterations = numServers * 4;
int iterations = 0;
List<ServerName> usedSNs = new ArrayList<>(servers.size());
Random rand = ThreadLocalRandom.current();
do {
int i = rand.nextInt(numServers);
sn = servers.get(i);
if (!usedSNs.contains(sn)) {
usedSNs.add(sn);
}
} while (cluster.wouldLowerAvailability(regionInfo, sn) && iterations++ < maxIterations);
if (iterations >= maxIterations) {
// We have reached the max. Means the servers that we collected is still lowering the
// availability
for (ServerName unusedServer : servers) {
if (!usedSNs.contains(unusedServer)) {
// check if any other unused server is there for us to use.
// If so use it. Else we have not other go but to go with one of them
if (!cluster.wouldLowerAvailability(regionInfo, unusedServer)) {
sn = unusedServer;
break;
}
}
}
}
cluster.doAssignRegion(regionInfo, sn);
return sn;
} | 3.68 |
flink_VertexThreadInfoTrackerBuilder_setDelayBetweenSamples | /**
* Sets {@code delayBetweenSamples}.
*
* @param delayBetweenSamples Delay between individual samples per task.
* @return Builder.
*/
public VertexThreadInfoTrackerBuilder setDelayBetweenSamples(Duration delayBetweenSamples) {
this.delayBetweenSamples = delayBetweenSamples;
return this;
} | 3.68 |
pulsar_OpenIDProviderMetadataCache_getOpenIDProviderMetadataForKubernetesApiServer | /**
* Retrieve the OpenID Provider Metadata for the Kubernetes API server. This method is used instead of
* {@link #getOpenIDProviderMetadataForIssuer(String)} because different validations are done. The Kubernetes
* API server does not technically implement the complete OIDC spec for discovery, but it does implement some of
* it, so this method validates what it can. Specifically, it skips validation that the Discovery Document
* provider's URI matches the issuer. It verifies that the issuer on the discovery document matches the issuer
* claim
* @return
*/
CompletableFuture<OpenIDProviderMetadata> getOpenIDProviderMetadataForKubernetesApiServer(String issClaim) {
return cache.get(Optional.empty()).thenCompose(openIDProviderMetadata -> {
CompletableFuture<OpenIDProviderMetadata> future = new CompletableFuture<>();
try {
verifyIssuer(issClaim, openIDProviderMetadata, true);
future.complete(openIDProviderMetadata);
} catch (AuthenticationException e) {
incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PROVIDER_METADATA);
future.completeExceptionally(e);
}
return future;
});
} | 3.68 |
framework_VFilterSelect_getMainWidth | /**
* Get the width of the select in pixels where the text area and icon has
* been included.
*
* @return The width in pixels
*/
private int getMainWidth() {
return getOffsetWidth();
} | 3.68 |
hbase_RowMutations_of | /**
* Create a {@link RowMutations} with the specified mutations.
* @param mutations the mutations to send
* @throws IOException if any row in mutations is different to another
*/
public static RowMutations of(List<? extends Mutation> mutations) throws IOException {
if (CollectionUtils.isEmpty(mutations)) {
throw new IllegalArgumentException("Cannot instantiate a RowMutations by empty list");
}
return new RowMutations(mutations.get(0).getRow(), mutations.size()).add(mutations);
} | 3.68 |
hbase_HRegion_disableInterrupts | /**
* If a handler thread is eligible for interrupt, make it ineligible. Should be paired with
* {{@link #enableInterrupts()}.
*/
void disableInterrupts() {
regionLockHolders.computeIfPresent(Thread.currentThread(), (t, b) -> false);
} | 3.68 |
flink_DefaultExecutionGraphFactory_tryRestoreExecutionGraphFromSavepoint | /**
* Tries to restore the given {@link ExecutionGraph} from the provided {@link
* SavepointRestoreSettings}, iff checkpointing is enabled.
*
* @param executionGraphToRestore {@link ExecutionGraph} which is supposed to be restored
* @param savepointRestoreSettings {@link SavepointRestoreSettings} containing information about
* the savepoint to restore from
* @throws Exception if the {@link ExecutionGraph} could not be restored
*/
private void tryRestoreExecutionGraphFromSavepoint(
ExecutionGraph executionGraphToRestore,
SavepointRestoreSettings savepointRestoreSettings)
throws Exception {
if (savepointRestoreSettings.restoreSavepoint()) {
final CheckpointCoordinator checkpointCoordinator =
executionGraphToRestore.getCheckpointCoordinator();
if (checkpointCoordinator != null) {
checkpointCoordinator.restoreSavepoint(
savepointRestoreSettings,
executionGraphToRestore.getAllVertices(),
userCodeClassLoader);
}
}
} | 3.68 |
flink_FutureUtils_completeDelayed | /**
* Asynchronously completes the future after a certain delay.
*
* @param future The future to complete.
* @param success The element to complete the future with.
* @param delay The delay after which the future should be completed.
*/
public static <T> void completeDelayed(CompletableFuture<T> future, T success, Duration delay) {
Delayer.delay(() -> future.complete(success), delay.toMillis(), TimeUnit.MILLISECONDS);
} | 3.68 |
hudi_HoodieLogFileReader_getFSDataInputStream | /**
* Fetch the right {@link FSDataInputStream} to be used by wrapping with required input streams.
* @param fs instance of {@link FileSystem} in use.
* @param bufferSize buffer size to be used.
* @return the right {@link FSDataInputStream} as required.
*/
private static FSDataInputStream getFSDataInputStream(FileSystem fs,
HoodieLogFile logFile,
int bufferSize) throws IOException {
FSDataInputStream fsDataInputStream = fs.open(logFile.getPath(), bufferSize);
if (FSUtils.isGCSFileSystem(fs)) {
// in GCS FS, we might need to interceptor seek offsets as we might get EOF exception
return new SchemeAwareFSDataInputStream(getFSDataInputStreamForGCS(fsDataInputStream, logFile, bufferSize), true);
}
if (FSUtils.isCHDFileSystem(fs)) {
return new BoundedFsDataInputStream(fs, logFile.getPath(), fsDataInputStream);
}
if (fsDataInputStream.getWrappedStream() instanceof FSInputStream) {
return new TimedFSDataInputStream(logFile.getPath(), new FSDataInputStream(
new BufferedFSInputStream((FSInputStream) fsDataInputStream.getWrappedStream(), bufferSize)));
}
// fsDataInputStream.getWrappedStream() maybe a BufferedFSInputStream
// need to wrap in another BufferedFSInputStream the make bufferSize work?
return fsDataInputStream;
} | 3.68 |
morf_DataValueLookupBuilderImpl_getAndConvertByIndex | /**
* Fetches the value at the specified index, converting it to the target
* type using the associated {@link ValueConverter} to the target type.
*
* @param <STORED> The type actually stored in the internal array.
* @param <RETURNED> The type being returned by the API call.
* @param i The index.
* @param mapper The mapper.
* @return The value.
*/
@SuppressWarnings("unchecked")
private final <STORED, RETURNED> RETURNED getAndConvertByIndex(Integer i, ValueMapper<STORED, RETURNED> mapper) {
if (i == null) return null;
STORED value = (STORED) data[i];
return mapper.map(value, (ValueConverter<STORED>) toConverter(value));
} | 3.68 |
flink_CoGroupedStreams_trigger | /** Sets the {@code Trigger} that should be used to trigger window emission. */
@PublicEvolving
public WithWindow<T1, T2, KEY, W> trigger(
Trigger<? super TaggedUnion<T1, T2>, ? super W> newTrigger) {
return new WithWindow<>(
input1,
input2,
keySelector1,
keySelector2,
keyType,
windowAssigner,
newTrigger,
evictor,
allowedLateness);
} | 3.68 |
hbase_ZKWatcher_getMetaReplicaNodesAndWatchChildren | /**
* Same as {@link #getMetaReplicaNodes()} except that this also registers a watcher on base znode
* for subsequent CREATE/DELETE operations on child nodes.
*/
public List<String> getMetaReplicaNodesAndWatchChildren() throws KeeperException {
List<String> childrenOfBaseNode =
ZKUtil.listChildrenAndWatchForNewChildren(this, znodePaths.baseZNode);
return filterMetaReplicaNodes(childrenOfBaseNode);
} | 3.68 |
flink_HiveParserCalcitePlanner_convertNullLiteral | // flink doesn't support type NULL, so we need to convert such literals
private RexNode convertNullLiteral(RexNode rexNode) {
if (rexNode instanceof RexLiteral) {
RexLiteral literal = (RexLiteral) rexNode;
if (literal.isNull() && literal.getTypeName() == SqlTypeName.NULL) {
return cluster.getRexBuilder()
.makeNullLiteral(
cluster.getTypeFactory().createSqlType(SqlTypeName.VARCHAR));
}
}
return rexNode;
} | 3.68 |
hmily_CommonAssembler_assembleHmilyColumnSegment | /**
* Assemble hmily column segment.
*
* @param column column
* @return hmily column segment
*/
public static HmilyColumnSegment assembleHmilyColumnSegment(final ColumnSegment column) {
HmilyIdentifierValue hmilyIdentifierValue = new HmilyIdentifierValue(column.getIdentifier().getValue());
HmilyColumnSegment result = new HmilyColumnSegment(column.getStartIndex(), column.getStopIndex(), hmilyIdentifierValue);
column.getOwner().ifPresent(ownerSegment -> {
HmilyIdentifierValue identifierValue = new HmilyIdentifierValue(ownerSegment.getIdentifier().getValue());
result.setOwner(new HmilyOwnerSegment(ownerSegment.getStartIndex(), ownerSegment.getStopIndex(), identifierValue));
});
return result;
} | 3.68 |
dubbo_ReferenceConfig_getServices | /**
* Get a string presenting the service names that the Dubbo interface subscribed.
* If it is a multiple-values, the content will be a comma-delimited String.
*
* @return non-null
* @see RegistryConstants#SUBSCRIBED_SERVICE_NAMES_KEY
* @since 2.7.8
*/
@Deprecated
@Parameter(key = SUBSCRIBED_SERVICE_NAMES_KEY)
public String getServices() {
return services;
} | 3.68 |
hbase_CellFlatMap_find | /**
* Binary search for a given key in between given boundaries of the array. Positive returned
* numbers mean the index. Negative returned numbers means the key not found. The absolute value
* of the output is the possible insert index for the searched key In twos-complement, (-1 *
* insertion point)-1 is the bitwise not of the insert point.
* @param needle The key to look for in all of the entries
* @return Same return value as Arrays.binarySearch.
*/
private int find(Cell needle) {
int begin = minCellIdx;
int end = maxCellIdx - 1;
while (begin <= end) {
int mid = begin + ((end - begin) >> 1);
Cell midCell = getCell(mid);
int compareRes = comparator.compare(midCell, needle);
if (compareRes == 0) {
return mid; // 0 means equals. We found the key
}
// Key not found. Check the comparison results; reverse the meaning of
// the comparison in case the order is descending (using XOR)
if ((compareRes < 0) ^ descending) {
// midCell is less than needle so we need to look at farther up
begin = mid + 1;
} else {
// midCell is greater than needle so we need to look down
end = mid - 1;
}
}
return (-1 * begin) - 1;
} | 3.68 |
framework_ComponentDetail_getTooltipInfo | /**
* Returns a TooltipInfo associated with Component. If element is given,
* returns an additional TooltipInfo.
*
* @param key
* @return the tooltipInfo
*/
public TooltipInfo getTooltipInfo(Object key) {
if (key == null) {
return tooltipInfo;
} else {
if (additionalTooltips != null) {
return additionalTooltips.get(key);
} else {
return null;
}
}
} | 3.68 |
framework_DragSourceExtensionConnector_fixDragImageTransformForSafari | /**
* Fixes missing drag image on Safari when there is
* {@code transform: translate(x,y)} CSS used on the parent DOM for the
* dragged element. Safari apparently doesn't take those into account, and
* creates the drag image of the element's location without all the
* transforms.
* <p>
* This is required for e.g. Grid where transforms are used to position the
* rows and scroll the body.
*
* @param draggedElement
* the dragged element
* @param clonedStyle
* the style for the cloned element
* @return the amount of X offset that was applied to the dragged element
* due to transform X, needed for calculation the relative position
* of the drag image according to mouse position
*/
private int fixDragImageTransformForSafari(Element draggedElement,
Style clonedStyle) {
int xTransformOffsetForSafari = 0;
int yTransformOffsetForSafari = 0;
Element parent = draggedElement.getParentElement();
/*
* Unfortunately, the following solution does not work when there are
* many nested layers of transforms. It seems that the outer transforms
* do not effect the cloned element the same way. #9408
*/
while (parent != null) {
ComputedStyle computedStyle = new ComputedStyle(parent);
String transform = computedStyle.getProperty("transform");
computedStyle = new ComputedStyle(parent);
transform = computedStyle.getProperty("transform");
if (transform == null || transform.isEmpty()) {
transform = computedStyle.getProperty("-webkitTransform");
}
if (transform != null && !transform.isEmpty()
&& !transform.equalsIgnoreCase("none")) {
// matrix format is "matrix(a,b,c,d,x,y)"
xTransformOffsetForSafari -= getMatrixValue(transform, 4);
yTransformOffsetForSafari -= getMatrixValue(transform, 5);
}
parent = parent.getParentElement();
}
if (xTransformOffsetForSafari != 0 || yTransformOffsetForSafari != 0) {
StringBuilder sb = new StringBuilder("translate(")
.append(xTransformOffsetForSafari).append("px,")
.append(yTransformOffsetForSafari).append("px)");
clonedStyle.setProperty("transform", sb.toString());
}
// the x-offset should be taken into account when the drag image is
// adjusted according to the mouse position. The Y-offset doesn't matter
// for some reason (TM), at least for grid DnD, and is probably related
// to #9408
return xTransformOffsetForSafari;
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_update | /**
* Update from {@code HoodieRollbackMetadata}.
*
* @param rollbackMetadata {@code HoodieRollbackMetadata}
* @param instantTime Timestamp at which the rollback was performed
*/
@Override
public void update(HoodieRollbackMetadata rollbackMetadata, String instantTime) {
if (initialized && metadata != null) {
// The commit which is being rolled back on the dataset
final String commitToRollbackInstantTime = rollbackMetadata.getCommitsRollback().get(0);
// Find the deltacommits since the last compaction
Option<Pair<HoodieTimeline, HoodieInstant>> deltaCommitsInfo =
CompactionUtils.getDeltaCommitsSinceLatestCompaction(metadataMetaClient.getActiveTimeline());
// This could be a compaction or deltacommit instant (See CompactionUtils.getDeltaCommitsSinceLatestCompaction)
HoodieInstant compactionInstant = deltaCommitsInfo.get().getValue();
HoodieTimeline deltacommitsSinceCompaction = deltaCommitsInfo.get().getKey();
// The deltacommit that will be rolled back
HoodieInstant deltaCommitInstant = new HoodieInstant(false, HoodieTimeline.DELTA_COMMIT_ACTION, commitToRollbackInstantTime);
validateRollback(commitToRollbackInstantTime, compactionInstant, deltacommitsSinceCompaction);
// lets apply a delta commit with DT's rb instant(with special suffix) containing following records:
// a. any log files as part of RB commit metadata that was added
// b. log files added by the commit in DT being rolled back. By rolled back, we mean, a rollback block will be added and does not mean it will be deleted.
// both above list should only be added to FILES partition.
String rollbackInstantTime = createRollbackTimestamp(instantTime);
processAndCommit(instantTime, () -> HoodieTableMetadataUtil.convertMetadataToRecords(engineContext, dataMetaClient, rollbackMetadata, instantTime));
if (deltacommitsSinceCompaction.containsInstant(deltaCommitInstant)) {
LOG.info("Rolling back MDT deltacommit " + commitToRollbackInstantTime);
if (!getWriteClient().rollback(commitToRollbackInstantTime, rollbackInstantTime)) {
throw new HoodieMetadataException("Failed to rollback deltacommit at " + commitToRollbackInstantTime);
}
} else {
LOG.info(String.format("Ignoring rollback of instant %s at %s. The commit to rollback is not found in MDT",
commitToRollbackInstantTime, instantTime));
}
closeInternal();
}
} | 3.68 |
flink_SSLUtils_createSSLServerSocketFactory | /**
* Creates a factory for SSL Server Sockets from the given configuration. SSL Server Sockets are
* always part of internal communication.
*/
public static ServerSocketFactory createSSLServerSocketFactory(Configuration config)
throws Exception {
SSLContext sslContext = createInternalSSLContext(config, false);
if (sslContext == null) {
throw new IllegalConfigurationException("SSL is not enabled");
}
String[] protocols = getEnabledProtocols(config);
String[] cipherSuites = getEnabledCipherSuites(config);
SSLServerSocketFactory factory = sslContext.getServerSocketFactory();
return new ConfiguringSSLServerSocketFactory(factory, protocols, cipherSuites);
} | 3.68 |
hbase_ColumnRangeFilter_isMinColumnInclusive | /** Returns if min column range is inclusive. */
public boolean isMinColumnInclusive() {
return minColumnInclusive;
} | 3.68 |
dubbo_RpcServiceContext_getParameterTypes | /**
* get parameter types.
*
* @serial
*/
@Override
public Class<?>[] getParameterTypes() {
return parameterTypes;
} | 3.68 |
hudi_HoodieGauge_setValue | /**
* Set the metric to a new value.
*/
public void setValue(T value) {
this.value = value;
} | 3.68 |
hbase_FileArchiverNotifierImpl_getSnapshotSizeFromResult | /**
* Extracts the size component from a serialized {@link SpaceQuotaSnapshot} protobuf.
* @param r A Result containing one cell with a SpaceQuotaSnapshot protobuf
* @return The size in bytes of the snapshot.
*/
long getSnapshotSizeFromResult(Result r) throws InvalidProtocolBufferException {
// Per javadoc, Result should only be null if an exception was thrown. So, if we're here,
// we should be non-null. If we can't advance to the first cell, same as "no cell".
if (!r.isEmpty() && r.advance()) {
return QuotaTableUtil.parseSnapshotSize(r.current());
}
return 0L;
} | 3.68 |
flink_Tuple2_setFields | /**
* Sets new values to all fields of the tuple.
*
* @param f0 The value for field 0
* @param f1 The value for field 1
*/
public void setFields(T0 f0, T1 f1) {
this.f0 = f0;
this.f1 = f1;
} | 3.68 |
cron-utils_FieldConstraintsBuilder_addLSupport | /**
* Adds L support.
*
* @return same FieldConstraintsBuilder instance
*/
public FieldConstraintsBuilder addLSupport() {
specialChars.add(SpecialChar.L);
return this;
} | 3.68 |
hadoop_AzureBlobFileSystem_statIncrement | /**
* Increment of an Abfs statistic.
*
* @param statistic AbfsStatistic that needs increment.
*/
private void statIncrement(AbfsStatistic statistic) {
incrementStatistic(statistic);
} | 3.68 |
hadoop_EntityRowKey_getRowKeyAsString | /**
* Constructs a row key for the entity table as follows:
* <p>
* {@code userName!clusterId!flowName!flowRunId!AppId!
* entityType!entityIdPrefix!entityId}.
* </p>
* @return String representation of row key.
*/
public String getRowKeyAsString() {
return entityRowKeyConverter.encodeAsString(this);
} | 3.68 |
hadoop_ConnectionContext_close | /**
* Close a connection. Only idle connections can be closed since
* the RPC proxy would be shut down immediately.
*
* @param force whether the connection should be closed anyway.
*/
public synchronized void close(boolean force) {
if (!force && this.numThreads > 0) {
// this is an erroneous case, but we have to close the connection
// anyway since there will be connection leak if we don't do so
// the connection has been moved out of the pool
LOG.error("Active connection with {} handlers will be closed, ConnectionContext is {}",
this.numThreads, this);
}
this.closed = true;
Object proxy = this.client.getProxy();
// Nobody should be using this anymore, so it should close right away
RPC.stopProxy(proxy);
} | 3.68 |
hbase_RowModel_addCell | /**
* Adds a cell to the list of cells for this row
* @param cell the cell
*/
public void addCell(CellModel cell) {
cells.add(cell);
} | 3.68 |
pulsar_HierarchicalLedgerUtils_ledgerListToSet | /**
* Get all ledger ids in the given zk path.
*
* @param ledgerNodes
* List of ledgers in the given path
* example:- {L1652, L1653, L1650}
* @param path
* The zookeeper path of the ledger ids. The path should start with {@ledgerRootPath}
* example (with ledgerRootPath = /ledgers):- /ledgers/00/0053
*/
NavigableSet<Long> ledgerListToSet(List<String> ledgerNodes, String ledgerRootPath, String path) {
NavigableSet<Long> zkActiveLedgers = new TreeSet<>();
if (!path.startsWith(ledgerRootPath)) {
log.warn("Ledger path [{}] is not a valid path name, it should start wth {}", path, ledgerRootPath);
return zkActiveLedgers;
}
long ledgerIdPrefix = 0;
char ch;
for (int i = ledgerRootPath.length() + 1; i < path.length(); i++) {
ch = path.charAt(i);
if (ch < '0' || ch > '9') {
continue;
}
ledgerIdPrefix = ledgerIdPrefix * 10 + (ch - '0');
}
for (String ledgerNode : ledgerNodes) {
if (AbstractZkLedgerManager.isSpecialZnode(ledgerNode)) {
continue;
}
long ledgerId = ledgerIdPrefix;
for (int i = 0; i < ledgerNode.length(); i++) {
ch = ledgerNode.charAt(i);
if (ch < '0' || ch > '9') {
continue;
}
ledgerId = ledgerId * 10 + (ch - '0');
}
zkActiveLedgers.add(ledgerId);
}
return zkActiveLedgers;
} | 3.68 |
dubbo_UrlUtils_serializationOrDefault | /**
* Get the serialization or default serialization
*
* @param url url
* @return {@link String}
*/
public static String serializationOrDefault(URL url) {
//noinspection OptionalGetWithoutIsPresent
Optional<String> serializations = allSerializations(url).stream().findFirst();
return serializations.orElseGet(DefaultSerializationSelector::getDefaultRemotingSerialization);
} | 3.68 |
flink_ZooKeeperStateHandleStore_releaseAll | /**
* Releases all lock nodes of this ZooKeeperStateHandleStore.
*
* @throws Exception if the delete operation of a lock file fails
*/
@Override
public void releaseAll() throws Exception {
Collection<String> children = getAllHandles();
Exception exception = null;
for (String child : children) {
try {
release(child);
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
}
if (exception != null) {
throw new Exception("Could not properly release all state nodes.", exception);
}
} | 3.68 |
flink_ClassLoadingUtils_runWithContextClassLoader | /**
* Runs the given supplier in a {@link TemporaryClassLoaderContext} based on the given
* classloader.
*
* @param supplier supplier to run
* @param contextClassLoader class loader that should be set as the context class loader
*/
public static <T, E extends Throwable> T runWithContextClassLoader(
SupplierWithException<T, E> supplier, ClassLoader contextClassLoader) throws E {
try (TemporaryClassLoaderContext ignored =
TemporaryClassLoaderContext.of(contextClassLoader)) {
return supplier.get();
}
} | 3.68 |
pulsar_BinaryProtoLookupService_getPartitionedTopicMetadata | /**
* calls broker binaryProto-lookup api to get metadata of partitioned-topic.
*
*/
public CompletableFuture<PartitionedTopicMetadata> getPartitionedTopicMetadata(TopicName topicName) {
final MutableObject<CompletableFuture> newFutureCreated = new MutableObject<>();
try {
return partitionedMetadataInProgress.computeIfAbsent(topicName, tpName -> {
CompletableFuture<PartitionedTopicMetadata> newFuture =
getPartitionedTopicMetadata(serviceNameResolver.resolveHost(), topicName);
newFutureCreated.setValue(newFuture);
return newFuture;
});
} finally {
if (newFutureCreated.getValue() != null) {
newFutureCreated.getValue().whenComplete((v, ex) -> {
partitionedMetadataInProgress.remove(topicName, newFutureCreated.getValue());
});
}
}
} | 3.68 |
dubbo_ApplicationModel_getApplication | /**
* @deprecated Replace to {@link ApplicationModel#getApplicationName()}
*/
@Deprecated
public static String getApplication() {
return getName();
} | 3.68 |
framework_DefaultSQLGenerator_generateDeleteQuery | /*
* (non-Javadoc)
*
* @see com.vaadin.addon.sqlcontainer.query.generator.SQLGenerator#
* generateDeleteQuery(java.lang.String,
* com.vaadin.addon.sqlcontainer.RowItem)
*/
@Override
public StatementHelper generateDeleteQuery(String tableName,
List<String> primaryKeyColumns, String versionColumn,
RowItem item) {
if (tableName == null || tableName.trim().equals("")) {
throw new IllegalArgumentException("Table name must be given.");
}
if (item == null) {
throw new IllegalArgumentException(
"Item to be deleted must be given.");
}
if (primaryKeyColumns == null || primaryKeyColumns.isEmpty()) {
throw new IllegalArgumentException(
"Valid keyColumnNames must be provided.");
}
StatementHelper sh = getStatementHelper();
StringBuilder query = new StringBuilder();
query.append("DELETE FROM ").append(tableName).append(" WHERE ");
int count = 1;
for (String keyColName : primaryKeyColumns) {
if ((this instanceof MSSQLGenerator
|| this instanceof OracleGenerator)
&& keyColName.equalsIgnoreCase("rownum")) {
count++;
continue;
}
if (count > 1) {
query.append(" AND ");
}
if (item.getItemProperty(keyColName).getValue() != null) {
query.append(QueryBuilder.quote(keyColName) + " = ?");
sh.addParameterValue(
item.getItemProperty(keyColName).getValue(),
item.getItemProperty(keyColName).getType());
}
count++;
}
if (versionColumn != null) {
if (!item.getItemPropertyIds().contains(versionColumn)) {
throw new IllegalArgumentException(String.format(
"Table '%s' does not contain version column '%s'.",
tableName, versionColumn));
}
query.append(String.format(" AND %s = ?",
QueryBuilder.quote(versionColumn)));
sh.addParameterValue(item.getItemProperty(versionColumn).getValue(),
item.getItemProperty(versionColumn).getType());
}
sh.setQueryString(query.toString());
return sh;
} | 3.68 |
framework_ServerRpcQueue_setConnection | /**
* Sets the application connection this instance is connected to. Called
* internally by the framework.
*
* @param connection
* the application connection this instance is connected to
*/
public void setConnection(ApplicationConnection connection) {
this.connection = connection;
} | 3.68 |
hadoop_VolumeFailureInfo_getFailureDate | /**
* Returns date/time of failure
*
* @return date/time of failure in milliseconds since epoch
*/
public long getFailureDate() {
return this.failureDate;
} | 3.68 |
hadoop_OBSObjectBucketUtils_createEmptyObject | // Used to create an empty file that represents an empty directory
private static void createEmptyObject(final OBSFileSystem owner,
final String objectName)
throws ObsException, IOException {
for (int retryTime = 1;
retryTime < OBSCommonUtils.MAX_RETRY_TIME; retryTime++) {
try {
innerCreateEmptyObject(owner, objectName);
return;
} catch (ObsException e) {
LOG.warn("Failed to create empty object [{}], retry time [{}], "
+ "exception [{}]", objectName, retryTime, e);
try {
Thread.sleep(OBSCommonUtils.DELAY_TIME);
} catch (InterruptedException ie) {
throw e;
}
}
}
innerCreateEmptyObject(owner, objectName);
} | 3.68 |
hudi_InternalSchemaCache_getInternalSchemaAndAvroSchemaForClusteringAndCompaction | /**
* Get internalSchema and avroSchema for compaction/cluster operation.
*
* @param metaClient current hoodie metaClient
* @param compactionAndClusteringInstant first instant before current compaction/cluster instant
* @return (internalSchemaStrOpt, avroSchemaStrOpt) a pair of InternalSchema/avroSchema
*/
public static Pair<Option<String>, Option<String>> getInternalSchemaAndAvroSchemaForClusteringAndCompaction(HoodieTableMetaClient metaClient, String compactionAndClusteringInstant) {
// try to load internalSchema to support Schema Evolution
HoodieTimeline timelineBeforeCurrentCompaction = metaClient.getCommitsAndCompactionTimeline().findInstantsBefore(compactionAndClusteringInstant).filterCompletedInstants();
Option<HoodieInstant> lastInstantBeforeCurrentCompaction = timelineBeforeCurrentCompaction.lastInstant();
if (lastInstantBeforeCurrentCompaction.isPresent()) {
// try to find internalSchema
byte[] data = timelineBeforeCurrentCompaction.getInstantDetails(lastInstantBeforeCurrentCompaction.get()).get();
HoodieCommitMetadata metadata;
try {
metadata = HoodieCommitMetadata.fromBytes(data, HoodieCommitMetadata.class);
} catch (Exception e) {
throw new HoodieException(String.format("cannot read metadata from commit: %s", lastInstantBeforeCurrentCompaction.get()), e);
}
String internalSchemaStr = metadata.getMetadata(SerDeHelper.LATEST_SCHEMA);
if (internalSchemaStr != null) {
String existingSchemaStr = metadata.getMetadata(HoodieCommitMetadata.SCHEMA_KEY);
return Pair.of(Option.of(internalSchemaStr), Option.of(existingSchemaStr));
}
}
return Pair.of(Option.empty(), Option.empty());
} | 3.68 |
rocketmq-connect_ColumnDefinition_classNameForType | /**
* Returns the fully-qualified name of the Java class whose instances are manufactured if the
* method {@link java.sql.ResultSet#getObject(int)} is called to retrieve a value from the column.
* {@link java.sql.ResultSet#getObject(int)} may return a subclass of the class returned by this
* method.
*
* @return the fully-qualified name of the class in the Java programming language that would be
* used by the method <code>ResultSet.getObject</code> to retrieve the value in the specified
* column. This is the class name used for custom mapping.
*/
public String classNameForType() {
return classNameForType;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectGroupForUpdate | /**
* Tests that we can't combine GROUP BY with FOR UPDATE
*/
@Test(expected = IllegalArgumentException.class)
public void testSelectGroupForUpdate() {
SelectStatement stmt = new SelectStatement().from(new TableReference(TEST_TABLE)).groupBy(field("x")).forUpdate();
testDialect.convertStatementToSQL(stmt);
} | 3.68 |
hbase_MemorySizeUtil_getGlobalMemStoreHeapLowerMark | /**
* Retrieve configured size for global memstore lower water mark as fraction of global memstore
* size.
*/
public static float getGlobalMemStoreHeapLowerMark(final Configuration conf,
boolean honorOldConfig) {
String lowMarkPercentStr = conf.get(MEMSTORE_SIZE_LOWER_LIMIT_KEY);
if (lowMarkPercentStr != null) {
float lowMarkPercent = Float.parseFloat(lowMarkPercentStr);
if (lowMarkPercent > 1.0f) {
LOG.error("Bad configuration value for " + MEMSTORE_SIZE_LOWER_LIMIT_KEY + ": "
+ lowMarkPercent + ". Using 1.0f instead.");
lowMarkPercent = 1.0f;
}
return lowMarkPercent;
}
if (!honorOldConfig) return DEFAULT_MEMSTORE_SIZE_LOWER_LIMIT;
String lowerWaterMarkOldValStr = conf.get(MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY);
if (lowerWaterMarkOldValStr != null) {
LOG.warn(MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY + " is deprecated. Instead use "
+ MEMSTORE_SIZE_LOWER_LIMIT_KEY);
float lowerWaterMarkOldVal = Float.parseFloat(lowerWaterMarkOldValStr);
float upperMarkPercent = getGlobalMemStoreHeapPercent(conf, false);
if (lowerWaterMarkOldVal > upperMarkPercent) {
lowerWaterMarkOldVal = upperMarkPercent;
LOG.error("Value of " + MEMSTORE_SIZE_LOWER_LIMIT_OLD_KEY + " (" + lowerWaterMarkOldVal
+ ") is greater than global memstore limit (" + upperMarkPercent + ") set by "
+ MEMSTORE_SIZE_KEY + "/" + MEMSTORE_SIZE_OLD_KEY + ". Setting memstore lower limit "
+ "to " + upperMarkPercent);
}
return lowerWaterMarkOldVal / upperMarkPercent;
}
return DEFAULT_MEMSTORE_SIZE_LOWER_LIMIT;
} | 3.68 |
flink_Costs_addCosts | /**
* Adds the given costs to these costs. If for one of the different cost components (network,
* disk), the costs are unknown, the resulting costs will be unknown.
*
* @param other The costs to add.
*/
public void addCosts(Costs other) {
// ---------- quantifiable costs ----------
if (this.networkCost == UNKNOWN || other.networkCost == UNKNOWN) {
this.networkCost = UNKNOWN;
} else {
this.networkCost += other.networkCost;
}
if (this.diskCost == UNKNOWN || other.diskCost == UNKNOWN) {
this.diskCost = UNKNOWN;
} else {
this.diskCost += other.diskCost;
}
if (this.cpuCost == UNKNOWN || other.cpuCost == UNKNOWN) {
this.cpuCost = UNKNOWN;
} else {
this.cpuCost += other.cpuCost;
}
// ---------- heuristic costs ----------
this.heuristicNetworkCost += other.heuristicNetworkCost;
this.heuristicDiskCost += other.heuristicDiskCost;
this.heuristicCpuCost += other.heuristicCpuCost;
} | 3.68 |
hmily_RepositoryPathUtils_getFullFileName | /**
* Gets full file name.
*
* @param filePath the file path
* @param id the id
* @return the full file name
*/
public static String getFullFileName(final String filePath, final String id) {
return String.format("%s/%s", filePath, id);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.