name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_MailboxProcessor_close | /**
* Lifecycle method to close the mailbox for action submission/retrieval. This will cancel all
* instances of {@link java.util.concurrent.RunnableFuture} that are still contained in the
* mailbox.
*/
@Override
public void close() {
List<Mail> droppedMails = mailbox.close();
if (!droppedMails.isEmpty()) {
LOG.debug("Closing the mailbox dropped mails {}.", droppedMails);
Optional<RuntimeException> maybeErr = Optional.empty();
for (Mail droppedMail : droppedMails) {
try {
droppedMail.tryCancel(false);
} catch (RuntimeException x) {
maybeErr =
Optional.of(ExceptionUtils.firstOrSuppressed(x, maybeErr.orElse(null)));
}
}
maybeErr.ifPresent(
e -> {
throw e;
});
}
} | 3.68 |
morf_AbstractSqlDialectTest_testMathsPlus | /**
* Test that adding numbers returns as expected.
*/
@Test
public void testMathsPlus() {
String result = testDialect.getSqlFrom(new MathsField(new FieldLiteral(1), MathsOperator.PLUS, new FieldLiteral(1)));
assertEquals(expectedMathsPlus(), result);
} | 3.68 |
flink_RuntimeRestAPIDocGenerator_main | /**
* Generates the Runtime REST API documentation.
*
* @param args args[0] contains the directory into which the generated files are placed
* @throws IOException if any file operation failed
*/
public static void main(String[] args) throws IOException, ConfigurationException {
String outputDirectory = args[0];
for (final RuntimeRestAPIVersion apiVersion : RuntimeRestAPIVersion.values()) {
if (apiVersion == RuntimeRestAPIVersion.V0) {
// this version exists only for testing purposes
continue;
}
createHtmlFile(
new DocumentingDispatcherRestEndpoint(),
apiVersion,
Paths.get(
outputDirectory,
"rest_" + apiVersion.getURLVersionPrefix() + "_dispatcher.html"));
}
} | 3.68 |
flink_DataSet_partitionByHash | /**
* Partitions a DataSet using the specified KeySelector.
*
* <p><b>Important:</b>This operation shuffles the whole DataSet over the network and can take
* significant amount of time.
*
* @param keyExtractor The KeyExtractor with which the DataSet is hash-partitioned.
* @return The partitioned DataSet.
* @see KeySelector
*/
public <K extends Comparable<K>> PartitionOperator<T> partitionByHash(
KeySelector<T, K> keyExtractor) {
final TypeInformation<K> keyType =
TypeExtractor.getKeySelectorTypes(keyExtractor, getType());
return new PartitionOperator<>(
this,
PartitionMethod.HASH,
new Keys.SelectorFunctionKeys<>(clean(keyExtractor), this.getType(), keyType),
Utils.getCallLocationName());
} | 3.68 |
framework_LegacyCommunicationManager_repaintAll | /**
* Requests that the given UI should be fully re-rendered on the client
* side.
*
* @since 7.1 @deprecated. As of 7.1. Should be refactored once locales are
* fixed (#11378)
*/
@Deprecated
public void repaintAll(UI ui) {
getClientCache(ui).clear();
ui.getConnectorTracker().markAllConnectorsDirty();
ui.getConnectorTracker().markAllClientSidesUninitialized();
} | 3.68 |
dubbo_ConcurrentHashSet_isEmpty | /**
* Returns <tt>true</tt> if this set contains no elements.
*
* @return <tt>true</tt> if this set contains no elements
*/
@Override
public boolean isEmpty() {
return map.isEmpty();
} | 3.68 |
flink_JobID_generate | /**
* Creates a new (statistically) random JobID.
*
* @return A new random JobID.
*/
public static JobID generate() {
return new JobID();
} | 3.68 |
hbase_Sleeper_skipSleepCycle | /**
* If currently asleep, stops sleeping; if not asleep, will skip the next sleep cycle.
*/
public void skipSleepCycle() {
synchronized (sleepLock) {
triggerWake = true;
sleepLock.notifyAll();
}
} | 3.68 |
flink_NetworkBufferPool_redistributeBuffers | // Must be called from synchronized block
private void redistributeBuffers() {
assert Thread.holdsLock(factoryLock);
if (resizableBufferPools.isEmpty()) {
return;
}
// All buffers, which are not among the required ones
final int numAvailableMemorySegment = totalNumberOfMemorySegments - numTotalRequiredBuffers;
if (numAvailableMemorySegment == 0) {
// in this case, we need to redistribute buffers so that every pool gets its minimum
for (LocalBufferPool bufferPool : resizableBufferPools) {
bufferPool.setNumBuffers(bufferPool.getNumberOfRequiredMemorySegments());
}
return;
}
/*
* With buffer pools being potentially limited, let's distribute the available memory
* segments based on the capacity of each buffer pool, i.e. the maximum number of segments
* an unlimited buffer pool can take is numAvailableMemorySegment, for limited buffer pools
* it may be less. Based on this and the sum of all these values (totalCapacity), we build
* a ratio that we use to distribute the buffers.
*/
long totalCapacity = 0; // long to avoid int overflow
for (LocalBufferPool bufferPool : resizableBufferPools) {
int excessMax =
bufferPool.getMaxNumberOfMemorySegments()
- bufferPool.getNumberOfRequiredMemorySegments();
totalCapacity += Math.min(numAvailableMemorySegment, excessMax);
}
// no capacity to receive additional buffers?
if (totalCapacity == 0) {
return; // necessary to avoid div by zero when nothing to re-distribute
}
// since one of the arguments of 'min(a,b)' is a positive int, this is actually
// guaranteed to be within the 'int' domain
// (we use a checked downCast to handle possible bugs more gracefully).
final int memorySegmentsToDistribute =
MathUtils.checkedDownCast(Math.min(numAvailableMemorySegment, totalCapacity));
long totalPartsUsed = 0; // of totalCapacity
int numDistributedMemorySegment = 0;
for (LocalBufferPool bufferPool : resizableBufferPools) {
int excessMax =
bufferPool.getMaxNumberOfMemorySegments()
- bufferPool.getNumberOfRequiredMemorySegments();
// shortcut
if (excessMax == 0) {
continue;
}
totalPartsUsed += Math.min(numAvailableMemorySegment, excessMax);
// avoid remaining buffers by looking at the total capacity that should have been
// re-distributed up until here
// the downcast will always succeed, because both arguments of the subtraction are in
// the 'int' domain
final int mySize =
MathUtils.checkedDownCast(
memorySegmentsToDistribute * totalPartsUsed / totalCapacity
- numDistributedMemorySegment);
numDistributedMemorySegment += mySize;
bufferPool.setNumBuffers(bufferPool.getNumberOfRequiredMemorySegments() + mySize);
}
assert (totalPartsUsed == totalCapacity);
assert (numDistributedMemorySegment == memorySegmentsToDistribute);
} | 3.68 |
flink_TemporalRowTimeJoinOperator_latestRightRowToJoin | /**
* Binary search {@code rightRowsSorted} to find the latest right row to join with {@code
* leftTime}. Latest means a right row with largest time that is still smaller or equal to
* {@code leftTime}. For example with: rightState = [1(+I), 4(+U), 7(+U), 9(-D), 12(I)],
*
* <p>If left time is 6, the valid period should be [4, 7), data 4(+U) should be joined.
*
* <p>If left time is 10, the valid period should be [9, 12), but data 9(-D) is a DELETE message
* which means the correspond version has no data in period [9, 12), data 9(-D) should not be
* correlated.
*
* @return found element or {@code Optional.empty} If such row was not found (either {@code
* rightRowsSorted} is empty or all {@code rightRowsSorted} are are newer).
*/
private Optional<RowData> latestRightRowToJoin(List<RowData> rightRowsSorted, long leftTime) {
return latestRightRowToJoin(rightRowsSorted, 0, rightRowsSorted.size() - 1, leftTime);
} | 3.68 |
hadoop_HttpFSServerWebApp_get | /**
* Returns HttpFSServer server singleton, configuration and services are
* accessible through it.
*
* @return the HttpFSServer server singleton.
*/
public static HttpFSServerWebApp get() {
return SERVER;
} | 3.68 |
hbase_CellChunkImmutableSegment_initializeCellSet | ///////////////////// PRIVATE METHODS /////////////////////
/*------------------------------------------------------------------------*/
// Create CellSet based on CellChunkMap from compacting iterator
private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator,
MemStoreCompactionStrategy.Action action) {
int numOfCellsAfterCompaction = 0;
int currentChunkIdx = 0;
int offsetInCurentChunk = ChunkCreator.SIZEOF_CHUNK_HEADER;
int numUniqueKeys = 0;
Cell prev = null;
Chunk[] chunks = allocIndexChunks(numOfCells);
while (iterator.hasNext()) { // the iterator hides the elimination logic for compaction
boolean alreadyCopied = false;
Cell c = iterator.next();
numOfCellsAfterCompaction++;
assert (c instanceof ExtendedCell);
if (((ExtendedCell) c).getChunkId() == ExtendedCell.CELL_NOT_BASED_ON_CHUNK) {
// CellChunkMap assumes all cells are allocated on MSLAB.
// Therefore, cells which are not allocated on MSLAB initially,
// are copied into MSLAB here.
c = copyCellIntoMSLAB(c, null); // no memstore sizing object to update
alreadyCopied = true;
}
if (offsetInCurentChunk + ClassSize.CELL_CHUNK_MAP_ENTRY > chunks[currentChunkIdx].size) {
currentChunkIdx++; // continue to the next index chunk
offsetInCurentChunk = ChunkCreator.SIZEOF_CHUNK_HEADER;
}
if (action == MemStoreCompactionStrategy.Action.COMPACT && !alreadyCopied) {
// For compaction copy cell to the new segment (MSLAB copy),here we set forceCloneOfBigCell
// to true, because the chunk which the cell is allocated may be freed after the compaction
// is completed, see HBASE-27464.
c = maybeCloneWithAllocator(c, true);
}
offsetInCurentChunk = // add the Cell reference to the index chunk
createCellReference((ByteBufferKeyValue) c, chunks[currentChunkIdx].getData(),
offsetInCurentChunk);
// the sizes still need to be updated in the new segment
// second parameter true, because in compaction/merge the addition of the cell to new segment
// is always successful
updateMetaInfo(c, true, null); // updates the size per cell
if (action == MemStoreCompactionStrategy.Action.MERGE_COUNT_UNIQUE_KEYS) {
// counting number of unique keys
if (prev != null) {
if (!CellUtil.matchingRowColumnBytes(prev, c)) {
numUniqueKeys++;
}
} else {
numUniqueKeys++;
}
}
prev = c;
}
if (action == MemStoreCompactionStrategy.Action.COMPACT) {
numUniqueKeys = numOfCells;
} else if (action != MemStoreCompactionStrategy.Action.MERGE_COUNT_UNIQUE_KEYS) {
numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES;
}
// build the immutable CellSet
CellChunkMap ccm =
new CellChunkMap(getComparator(), chunks, 0, numOfCellsAfterCompaction, false);
this.setCellSet(null, new CellSet(ccm, numUniqueKeys)); // update the CellSet of this Segment
} | 3.68 |
pulsar_BKCluster_stopBKCluster | /**
* Stop cluster. Also, stops all the auto recovery processes for the bookie
* cluster, if isAutoRecoveryEnabled is true.
*
* @throws Exception
*/
protected void stopBKCluster() throws Exception {
bookieComponents.forEach(LifecycleComponentStack::close);
bookieComponents.clear();
} | 3.68 |
hbase_TableDescriptorBuilder_getTableName | /**
* Get the name of the table
*/
@Override
public TableName getTableName() {
return name;
} | 3.68 |
hudi_BaseHoodieWriteClient_mayBeCleanAndArchive | /**
* Triggers cleaning and archival for the table of interest. This method is called outside of locks. So, internal callers should ensure they acquire lock whereever applicable.
* @param table instance of {@link HoodieTable} of interest.
*/
protected void mayBeCleanAndArchive(HoodieTable table) {
autoCleanOnCommit();
// reload table to that timeline reflects the clean commit
autoArchiveOnCommit(createTable(config, hadoopConf));
} | 3.68 |
framework_PropertyFormatter_setReadOnly | /**
* Sets the Property's read-only mode to the specified status.
*
* @param newStatus
* the new read-only status of the Property.
*/
@Override
public void setReadOnly(boolean newStatus) {
if (dataSource != null) {
dataSource.setReadOnly(newStatus);
}
} | 3.68 |
hmily_ConfigLoader_passive | /**
* Passive subscription processes related events. When the current event is processed,
* the push method is called to push it to subscribers in the system.
*
* @param context the context
* @param handler the handler
* @param config Configuration information of things processed by load method
* @see #push(Supplier, EventData) #push(Supplier, EventData)
*/
default void passive(final Supplier<Context> context,
final PassiveHandler<Config> handler,
Config config) {
} | 3.68 |
querydsl_DateTimeExpression_year | /**
* Create a year expression
*
* @return year
*/
public NumberExpression<Integer> year() {
if (year == null) {
year = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.YEAR, mixin);
}
return year;
} | 3.68 |
flink_EvictingWindowSavepointReader_process | /**
* Reads window state generated without any preaggregation such as {@code WindowedStream#apply}
* and {@code WindowedStream#process}.
*
* @param uid The uid of the operator.
* @param readerFunction The window reader function.
* @param keyType The key type of the window.
* @param stateType The type of records stored in state.
* @param outputType The output type of the reader function.
* @param <K> The type of the key.
* @param <T> The type of the records stored in state.
* @param <OUT> The output type of the reader function.
* @return A {@code DataStream} of objects read from keyed state.
* @throws IOException If the savepoint does not contain the specified uid.
*/
public <K, T, OUT> DataStream<OUT> process(
String uid,
WindowReaderFunction<T, OUT, K, W> readerFunction,
TypeInformation<K> keyType,
TypeInformation<T> stateType,
TypeInformation<OUT> outputType)
throws IOException {
WindowReaderOperator<?, K, StreamRecord<T>, W, OUT> operator =
WindowReaderOperator.evictingWindow(
new ProcessEvictingWindowReader<>(readerFunction),
keyType,
windowSerializer,
stateType,
env.getConfig());
return readWindowOperator(uid, outputType, operator);
} | 3.68 |
flink_ZooKeeperUtils_createLeaderRetrievalService | /**
* Creates a {@link DefaultLeaderRetrievalService} instance with {@link
* ZooKeeperLeaderRetrievalDriver}.
*
* @param client The {@link CuratorFramework} ZooKeeper client to use
* @param path The path for the leader retrieval
* @param configuration configuration for further config options
* @return {@link DefaultLeaderRetrievalService} instance.
*/
public static DefaultLeaderRetrievalService createLeaderRetrievalService(
final CuratorFramework client, final String path, final Configuration configuration) {
return new DefaultLeaderRetrievalService(
createLeaderRetrievalDriverFactory(client, path, configuration));
} | 3.68 |
cron-utils_FieldConstraintsBuilder_createConstraintsInstance | /**
* Creates FieldConstraints instance based on previously built parameters.
*
* @return new FieldConstraints instance
*/
public FieldConstraints createConstraintsInstance() {
return new FieldConstraints(stringMapping, intMapping, specialChars, startRange, endRange, strictRange);
} | 3.68 |
framework_VaadinSession_getLastRequestDuration | /**
* @return The time spent servicing the last request in this session, in
* milliseconds.
*/
public long getLastRequestDuration() {
assert hasLock();
return lastRequestDuration;
} | 3.68 |
hadoop_ReferenceCountMap_getUniqueElementsSize | /**
* Get the number of unique elements
*/
public int getUniqueElementsSize() {
return referenceMap.size();
} | 3.68 |
flink_AbstractBytesMultiMap_writePointer | /** Write value into the output view, and return offset of the value. */
private long writePointer(SimpleCollectingOutputView outputView, int value) throws IOException {
int oldPosition = (int) outputView.getCurrentOffset();
int skip = checkSkipWriteForPointer(outputView);
outputView.getCurrentSegment().putInt(outputView.getCurrentPositionInSegment(), value);
// advance position in segment
outputView.skipBytesToWrite(ELEMENT_POINT_LENGTH);
return oldPosition + skip;
} | 3.68 |
hbase_Replication_stopReplicationService | /**
* Stops replication service.
*/
@Override
public void stopReplicationService() {
this.replicationManager.join();
} | 3.68 |
flink_SqlGatewayRestAPIVersion_getURLVersionPrefix | /**
* Returns the URL version prefix (e.g. "v1") for this version.
*
* @return URL version prefix
*/
@Override
public String getURLVersionPrefix() {
return name().toLowerCase();
} | 3.68 |
framework_VComboBox_resetLastNewItemString | /**
* This method will reset the saved item string which is added last time.
*/
public void resetLastNewItemString() {
// Clean the temp string eagerly in order to re-add the same value again
// after data provider got reset.
// Fixes issue https://github.com/vaadin/framework/issues/11317
lastNewItemString = null;
} | 3.68 |
hbase_AccessController_preBulkLoadHFile | /**
* Verifies user has CREATE or ADMIN privileges on the Column Families involved in the
* bulkLoadHFile request. Specific Column Write privileges are presently ignored.
*/
@Override
public void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths) throws IOException {
User user = getActiveUser(ctx);
for (Pair<byte[], String> el : familyPaths) {
accessChecker.requirePermission(user, "preBulkLoadHFile",
ctx.getEnvironment().getRegion().getTableDescriptor().getTableName(), el.getFirst(), null,
null, Action.ADMIN, Action.CREATE);
}
} | 3.68 |
zxing_ResultHandler_areContentsSecure | /**
* Some barcode contents are considered secure, and should not be saved to history, copied to
* the clipboard, or otherwise persisted.
*
* @return If true, do not create any permanent record of these contents.
*/
public boolean areContentsSecure() {
return false;
} | 3.68 |
hadoop_EntityCacheItem_getAppLogs | /**
* @return The application log associated to this cache item, may be null.
*/
public synchronized EntityGroupFSTimelineStore.AppLogs getAppLogs() {
return this.appLogs;
} | 3.68 |
morf_TableBean_columns | /**
* @see org.alfasoftware.morf.metadata.Table#columns()
*/
@Override
public List<Column> columns() {
return columns;
} | 3.68 |
hbase_BrokenStoreFileCleaner_isCompactedFile | // Compacted files can still have readers and are cleaned by a separate chore, so they have to
// be skipped here
private boolean isCompactedFile(FileStatus file, HStore store) {
return store.getStoreEngine().getStoreFileManager().getCompactedfiles().stream()
.anyMatch(sf -> sf.getPath().equals(file.getPath()));
} | 3.68 |
framework_GridDragSource_generateData | /**
* Drag data generator. Appends drag data to row data json if generator
* function(s) are set by the user of this extension.
*
* @param item
* Row item for data generation.
* @param jsonObject
* Row data in json format.
*/
@Override
public void generateData(Object item, JsonObject jsonObject) {
JsonObject generatedValues = Json.createObject();
generatorFunctions.forEach((type, generator) -> generatedValues
.put(type, generator.apply((T) item)));
jsonObject.put(GridDragSourceState.JSONKEY_DRAG_DATA, generatedValues);
} | 3.68 |
flink_MapValue_toString | /*
* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return this.map.toString();
} | 3.68 |
hadoop_MetricsCache_update | /**
* Update the cache and return the current cache record
* @param mr the update record
* @return the updated cache record
*/
public Record update(MetricsRecord mr) {
return update(mr, false);
} | 3.68 |
hbase_MetricsRegionServerWrapperImpl_initMobFileCache | /**
* Initializes the mob file cache.
*/
private void initMobFileCache() {
this.mobFileCache = this.regionServer.getMobFileCache().orElse(null);
} | 3.68 |
hadoop_AuxServiceRecord_toIndentedString | /**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
} | 3.68 |
flink_LocalFileSystem_pathToFile | /**
* Converts the given Path to a File for this file system. If the path is empty, we will return
* <tt>new File(".")</tt> instead of <tt>new File("")</tt>, since the latter returns
* <tt>false</tt> for <tt>isDirectory</tt> judgement (See issue
* https://issues.apache.org/jira/browse/FLINK-18612).
*/
public File pathToFile(Path path) {
String localPath = path.getPath();
checkState(localPath != null, "Cannot convert a null path to File");
if (localPath.length() == 0) {
return new File(".");
}
return new File(localPath);
} | 3.68 |
hudi_ColumnStatsIndices_transposeColumnStatsIndex | /**
* Transposes and converts the raw table format of the Column Stats Index representation,
* where each row/record corresponds to individual (column, file) pair, into the table format
* where each row corresponds to single file with statistic for individual columns collated
* w/in such row:
* <p>
* Metadata Table Column Stats Index format:
*
* <pre>
* +---------------------------+------------+------------+------------+-------------+
* | fileName | columnName | minValue | maxValue | num_nulls |
* +---------------------------+------------+------------+------------+-------------+
* | one_base_file.parquet | A | 1 | 10 | 0 |
* | another_base_file.parquet | A | -10 | 0 | 5 |
* +---------------------------+------------+------------+------------+-------------+
* </pre>
* <p>
* Returned table format
*
* <pre>
* +---------------------------+------------+------------+-------------+
* | file | A_minValue | A_maxValue | A_nullCount |
* +---------------------------+------------+------------+-------------+
* | one_base_file.parquet | 1 | 10 | 0 |
* | another_base_file.parquet | -10 | 0 | 5 |
* +---------------------------+------------+------------+-------------+
* </pre>
* <p>
* NOTE: Column Stats Index might potentially contain statistics for many columns (if not all), while
* query at hand might only be referencing a handful of those. As such, we collect all the
* column references from the filtering expressions, and only transpose records corresponding to the
* columns referenced in those
*
* @param colStats RowData list bearing raw Column Stats Index table
* @param queryColumns target columns to be included into the final table
* @param tableSchema schema of the source data table
* @return reshaped table according to the format outlined above
*/
public static Pair<List<RowData>, String[]> transposeColumnStatsIndex(List<RowData> colStats, String[] queryColumns, RowType tableSchema) {
Map<String, LogicalType> tableFieldTypeMap = tableSchema.getFields().stream()
.collect(Collectors.toMap(RowType.RowField::getName, RowType.RowField::getType));
// NOTE: We have to collect list of indexed columns to make sure we properly align the rows
// w/in the transposed dataset: since some files might not have all the columns indexed
// either due to the Column Stats Index config changes, schema evolution, etc. we have
// to make sure that all the rows w/in transposed data-frame are properly padded (with null
// values) for such file-column combinations
Set<String> indexedColumns = colStats.stream().map(row -> row.getString(ORD_COL_NAME)
.toString()).collect(Collectors.toSet());
// NOTE: We're sorting the columns to make sure final index schema matches layout
// of the transposed table
TreeSet<String> sortedTargetColumns = Arrays.stream(queryColumns).sorted()
.filter(indexedColumns::contains)
.collect(Collectors.toCollection(TreeSet::new));
final Map<LogicalType, AvroToRowDataConverters.AvroToRowDataConverter> converters = new ConcurrentHashMap<>();
Map<StringData, List<RowData>> fileNameToRows = colStats.stream().parallel()
.filter(row -> sortedTargetColumns.contains(row.getString(ORD_COL_NAME).toString()))
.map(row -> {
if (row.isNullAt(ORD_MIN_VAL) && row.isNullAt(ORD_MAX_VAL)) {
// Corresponding row could be null in either of the 2 cases
// - Column contains only null values (in that case both min/max have to be nulls)
// - This is a stubbed Column Stats record (used as a tombstone)
return row;
} else {
String colName = row.getString(ORD_COL_NAME).toString();
LogicalType colType = tableFieldTypeMap.get(colName);
return unpackMinMaxVal(row, colType, converters);
}
}).collect(Collectors.groupingBy(rowData -> rowData.getString(ORD_FILE_NAME)));
return Pair.of(foldRowsByFiles(sortedTargetColumns, fileNameToRows), sortedTargetColumns.toArray(new String[0]));
} | 3.68 |
hudi_SanitizationUtils_parseSanitizedAvroSchemaNoThrow | /**
* Sanitizes illegal field names in the schema using recursive calls to transformMap and transformList
*/
private static Option<Schema> parseSanitizedAvroSchemaNoThrow(String schemaStr, String invalidCharMask) {
try {
OM.enable(JsonParser.Feature.ALLOW_COMMENTS);
Map<String, Object> objMap = OM.readValue(schemaStr, Map.class);
Map<String, Object> modifiedMap = transformMap(objMap, invalidCharMask);
return Option.of(new Schema.Parser().parse(OM.writeValueAsString(modifiedMap)));
} catch (Exception ex) {
return Option.empty();
}
} | 3.68 |
hbase_TimestampsFilter_getMin | /**
* Gets the minimum timestamp requested by filter.
* @return minimum timestamp requested by filter.
*/
public long getMin() {
return minTimestamp;
} | 3.68 |
framework_Tree_setDragMode | /**
* Sets the drag mode that controls how Tree behaves as a
* {@link DragSource}.
*
* @param dragMode
* the drag mode to set
*/
public void setDragMode(TreeDragMode dragMode) {
this.dragMode = dragMode;
markAsDirty();
} | 3.68 |
flink_Path_getParent | /**
* Returns the parent of a path, i.e., everything that precedes the last separator or <code>null
* </code> if at root.
*
* @return the parent of a path or <code>null</code> if at root.
*/
public Path getParent() {
final String path = uri.getPath();
final int lastSlash = path.lastIndexOf('/');
final int start = hasWindowsDrive(path, true) ? 3 : 0;
if ((path.length() == start)
|| // empty path
(lastSlash == start && path.length() == start + 1)) { // at root
return null;
}
String parent;
if (lastSlash == -1) {
parent = CUR_DIR;
} else {
final int end = hasWindowsDrive(path, true) ? 3 : 0;
parent = path.substring(0, lastSlash == end ? end + 1 : lastSlash);
}
return new Path(uri.getScheme(), uri.getAuthority(), parent);
} | 3.68 |
flink_LimitedConnectionsFileSystem_getNumberOfOpenInputStreams | /** Gets the number of currently open input streams. */
public int getNumberOfOpenInputStreams() {
return numReservedInputStreams;
} | 3.68 |
framework_SharedStateWriter_write | /**
* Writes a JSON object containing the pending state changes of the dirty
* connectors of the given UI.
*
* @param ui
* The UI whose state changes should be written.
* @param writer
* The writer to use.
* @return a set of connector ids with state changes
* @throws IOException
* If the serialization fails.
*/
public Set<String> write(UI ui, Writer writer) throws IOException {
Collection<ClientConnector> dirtyVisibleConnectors = ui
.getConnectorTracker().getDirtyVisibleConnectors();
Set<String> writtenConnectors = new HashSet<>();
JsonObject sharedStates = Json.createObject();
for (ClientConnector connector : dirtyVisibleConnectors) {
// encode and send shared state
String connectorId = connector.getConnectorId();
try {
JsonObject stateJson = connector.encodeState();
if (stateJson != null && stateJson.keys().length != 0) {
sharedStates.put(connectorId, stateJson);
writtenConnectors.add(connectorId);
}
} catch (JsonException e) {
throw new PaintException(
"Failed to serialize shared state for connector "
+ connector.getClass().getName() + " ("
+ connectorId + "): " + e.getMessage(),
e);
}
}
writer.write(JsonUtil.stringify(sharedStates));
return writtenConnectors;
} | 3.68 |
framework_VaadinSession_getForSession | /**
* Loads the VaadinSession for the given service and WrappedSession from the
* HTTP session.
*
* @param service
* The service the VaadinSession is associated with
* @param underlyingSession
* The wrapped HTTP session for the user
* @return A VaadinSession instance for the service, session combination or
* null if none was found.
* @deprecated as of 7.6, call
* {@link VaadinService#loadSession(WrappedSession)} instead
*/
@Deprecated
public static VaadinSession getForSession(VaadinService service,
WrappedSession underlyingSession) {
return service.loadSession(underlyingSession);
} | 3.68 |
morf_GraphBasedUpgrade_getRoot | /**
* @return no-op upgrade node which is the root of the graph
*/
public GraphBasedUpgradeNode getRoot() {
return root;
} | 3.68 |
framework_WebBrowser_isChromeOS | /**
* Tests if the browser is run on ChromeOS (e.g. a Chromebook).
*
* @return true if run on ChromeOS false if the user is not using ChromeOS
* or if no information on the browser is present
* @since 8.1.1
*/
public boolean isChromeOS() {
return browserDetails.isChromeOS();
} | 3.68 |
flink_PendingCheckpoint_getCompletionFuture | /**
* Returns the completion future.
*
* @return A future to the completed checkpoint
*/
public CompletableFuture<CompletedCheckpoint> getCompletionFuture() {
return onCompletionPromise;
} | 3.68 |
hadoop_BlockDispatcher_moveBlock | /**
* Moves the given block replica to the given target node and wait for the
* response.
*
* @param blkMovingInfo
* block to storage info
* @param saslClient
* SASL for DataTransferProtocol on behalf of a client
* @param eb
* extended block info
* @param sock
* target node's socket
* @param km
* for creation of an encryption key
* @param accessToken
* connection block access token
* @return status of the block movement
*/
public BlockMovementStatus moveBlock(BlockMovingInfo blkMovingInfo,
SaslDataTransferClient saslClient, ExtendedBlock eb, Socket sock,
DataEncryptionKeyFactory km, Token<BlockTokenIdentifier> accessToken) throws IOException {
LOG.info("Start moving block:{} from src:{} to destin:{} to satisfy "
+ "storageType, sourceStoragetype:{} and destinStoragetype:{}",
blkMovingInfo.getBlock(), blkMovingInfo.getSource(),
blkMovingInfo.getTarget(), blkMovingInfo.getSourceStorageType(),
blkMovingInfo.getTargetStorageType());
DataOutputStream out = null;
DataInputStream in = null;
try {
NetUtils.connect(sock,
NetUtils.createSocketAddr(
blkMovingInfo.getTarget().getXferAddr(connectToDnViaHostname)),
socketTimeout);
// Set read timeout so that it doesn't hang forever against
// unresponsive nodes. Datanode normally sends IN_PROGRESS response
// twice within the client read timeout period (every 30 seconds by
// default). Here, we make it give up after "socketTimeout * 5" period
// of no response.
sock.setSoTimeout(socketTimeout * 5);
sock.setKeepAlive(true);
OutputStream unbufOut = sock.getOutputStream();
InputStream unbufIn = sock.getInputStream();
LOG.debug("Connecting to datanode {}", blkMovingInfo.getTarget());
IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut,
unbufIn, km, accessToken, blkMovingInfo.getTarget());
unbufOut = saslStreams.out;
unbufIn = saslStreams.in;
out = new DataOutputStream(
new BufferedOutputStream(unbufOut, ioFileBufferSize));
in = new DataInputStream(
new BufferedInputStream(unbufIn, ioFileBufferSize));
sendRequest(out, eb, accessToken, blkMovingInfo.getSource(),
blkMovingInfo.getTargetStorageType());
receiveResponse(in);
LOG.info(
"Successfully moved block:{} from src:{} to destin:{} for"
+ " satisfying storageType:{}",
blkMovingInfo.getBlock(), blkMovingInfo.getSource(),
blkMovingInfo.getTarget(), blkMovingInfo.getTargetStorageType());
return BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_SUCCESS;
} catch (BlockPinningException e) {
// Pinned block won't be able to move to a different node. So, its not
// required to do retries, just marked as SUCCESS.
LOG.debug("Pinned block can't be moved, so skipping block:{}",
blkMovingInfo.getBlock(), e);
return BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_SUCCESS;
} finally {
IOUtils.closeStream(out);
IOUtils.closeStream(in);
IOUtils.closeSocket(sock);
}
} | 3.68 |
rocketmq-connect_JsonConverter_asConnectSchema | /**
* convert json to schema if not empty
*
* @param jsonSchema
* @return
*/
public Schema asConnectSchema(JSONObject jsonSchema) {
// schema null
if (jsonSchema == null) {
return null;
}
Schema cached = toConnectSchemaCache.get(jsonSchema);
if (cached != null) {
return cached;
}
String schemaType = String.valueOf(jsonSchema.get(JsonSchema.SCHEMA_TYPE_FIELD_NAME));
if (StringUtils.isEmpty(schemaType)) {
throw new ConnectException("Schema must contain 'type' field");
}
final SchemaBuilder builder;
switch (schemaType) {
case JsonSchema.BOOLEAN_TYPE_NAME:
builder = SchemaBuilder.bool();
break;
case JsonSchema.INT8_TYPE_NAME:
builder = SchemaBuilder.int8();
break;
case JsonSchema.INT16_TYPE_NAME:
builder = SchemaBuilder.int16();
break;
case JsonSchema.INT32_TYPE_NAME:
builder = SchemaBuilder.int32();
break;
case JsonSchema.INT64_TYPE_NAME:
builder = SchemaBuilder.int64();
break;
case JsonSchema.FLOAT_TYPE_NAME:
builder = SchemaBuilder.float32();
break;
case JsonSchema.DOUBLE_TYPE_NAME:
builder = SchemaBuilder.float64();
break;
case JsonSchema.BYTES_TYPE_NAME:
builder = SchemaBuilder.bytes();
break;
case JsonSchema.STRING_TYPE_NAME:
builder = SchemaBuilder.string();
break;
case JsonSchema.ARRAY_TYPE_NAME:
JSONObject elemSchema = (JSONObject) jsonSchema.get(JsonSchema.ARRAY_ITEMS_FIELD_NAME);
if (Objects.isNull(elemSchema)) {
throw new ConnectException("Array schema did not specify the element type");
}
builder = SchemaBuilder.array(asConnectSchema(elemSchema));
break;
case JsonSchema.MAP_TYPE_NAME:
JSONObject keySchema = (JSONObject) jsonSchema.get(JsonSchema.MAP_KEY_FIELD_NAME);
if (keySchema == null) {
throw new ConnectException("Map schema did not specify the key type");
}
JSONObject valueSchema = (JSONObject) jsonSchema.get(JsonSchema.MAP_VALUE_FIELD_NAME);
if (valueSchema == null) {
throw new ConnectException("Map schema did not specify the value type");
}
builder = SchemaBuilder.map(asConnectSchema(keySchema), asConnectSchema(valueSchema));
break;
case JsonSchema.STRUCT_TYPE_NAME:
builder = SchemaBuilder.struct();
List<JSONObject> fields = (List<JSONObject>) jsonSchema.get(JsonSchema.STRUCT_FIELDS_FIELD_NAME);
if (Objects.isNull(fields)) {
throw new ConnectException("Struct schema's \"fields\" argument is not an array.");
}
for (JSONObject field : fields) {
String jsonFieldName = field.getString(JsonSchema.STRUCT_FIELD_NAME_FIELD_NAME);
if (jsonFieldName == null) {
throw new ConnectException("Struct schema's field name not specified properly");
}
builder.field(jsonFieldName, asConnectSchema(field));
}
break;
default:
throw new ConnectException("Unknown schema type: " + schemaType);
}
// optional
Boolean isOptional = jsonSchema.getBoolean(JsonSchema.SCHEMA_OPTIONAL_FIELD_NAME);
if (isOptional != null && isOptional) {
builder.optional();
}
// schema name
String schemaName = jsonSchema.getString(JsonSchema.SCHEMA_NAME_FIELD_NAME);
builder.name(schemaName);
// schema version
Object version = jsonSchema.get(JsonSchema.SCHEMA_VERSION_FIELD_NAME);
if (version != null && version instanceof Integer) {
builder.version(Integer.parseInt(version.toString()));
}
// schema doc
String doc = jsonSchema.getString(JsonSchema.SCHEMA_DOC_FIELD_NAME);
if (StringUtils.isNotEmpty(doc)) {
builder.doc(doc);
}
// schema parameter
JSONObject schemaParams = (JSONObject) jsonSchema.get(JsonSchema.SCHEMA_PARAMETERS_FIELD_NAME);
if (schemaParams != null) {
Map<String, Object> paramsIt = schemaParams.getInnerMap();
paramsIt.forEach((k, v) -> {
builder.parameter(k, String.valueOf(v));
});
}
Object schemaDefaultNode = jsonSchema.get(JsonSchema.SCHEMA_DEFAULT_FIELD_NAME);
if (schemaDefaultNode != null) {
builder.defaultValue(convertToConnect(builder.build(), schemaDefaultNode));
}
Schema result = builder.build();
toConnectSchemaCache.put(jsonSchema, result);
return result;
} | 3.68 |
hbase_OrderedBytes_encodeBlobVar | /**
* Encode a blob value using a modified varint encoding scheme.
* @return the number of bytes written.
* @see #encodeBlobVar(PositionedByteRange, byte[], int, int, Order)
*/
public static int encodeBlobVar(PositionedByteRange dst, byte[] val, Order ord) {
return encodeBlobVar(dst, val, 0, null != val ? val.length : 0, ord);
} | 3.68 |
framework_UIDL_getAttributeNames | /**
* Gets the names of the attributes available.
*
* @return the names of available attributes
*/
public Set<String> getAttributeNames() {
Set<String> keySet = attr().getKeySet();
keySet.remove("v");
return keySet;
} | 3.68 |
dubbo_TTable_replaceTab | /**
* replace tab to four spaces
*
* @param string the original string
* @return the replaced string
*/
private static String replaceTab(String string) {
return replace(string, "\t", " ");
} | 3.68 |
flink_GuavaFlinkConnectorRateLimiter_open | /**
* Creates a rate limiter with the runtime context provided.
*
* @param runtimeContext
*/
@Override
public void open(RuntimeContext runtimeContext) {
this.runtimeContext = runtimeContext;
localRateBytesPerSecond =
globalRateBytesPerSecond / runtimeContext.getNumberOfParallelSubtasks();
this.rateLimiter = RateLimiter.create(localRateBytesPerSecond);
} | 3.68 |
flink_StreamExecutionEnvironment_isChainingEnabled | /**
* Returns whether operator chaining is enabled.
*
* @return {@code true} if chaining is enabled, false otherwise.
*/
@PublicEvolving
public boolean isChainingEnabled() {
return isChainingEnabled;
} | 3.68 |
hbase_MemorySizeUtil_getOnheapGlobalMemStoreSize | /**
* Returns the onheap global memstore limit based on the config
* 'hbase.regionserver.global.memstore.size'.
* @return the onheap global memstore limt
*/
public static long getOnheapGlobalMemStoreSize(Configuration conf) {
long max = -1L;
final MemoryUsage usage = safeGetHeapMemoryUsage();
if (usage != null) {
max = usage.getMax();
}
float globalMemStorePercent = getGlobalMemStoreHeapPercent(conf, true);
return ((long) (max * globalMemStorePercent));
} | 3.68 |
framework_ConnectorTracker_cleanStreamVariable | /**
* Removes any StreamVariable of the given name from the indicated
* connector.
*
* @param connectorId
* @param variableName
*/
public void cleanStreamVariable(String connectorId, String variableName) {
if (pidToNameToStreamVariable == null) {
return;
}
Map<String, StreamVariable> nameToStreamVar = pidToNameToStreamVariable
.get(connectorId);
if (nameToStreamVar != null) {
StreamVariable streamVar = nameToStreamVar.remove(variableName);
streamVariableToSeckey.remove(streamVar);
if (nameToStreamVar.isEmpty()) {
pidToNameToStreamVariable.remove(connectorId);
}
}
} | 3.68 |
pulsar_PulsarAdminImpl_resourceQuotas | /**
* @return the resource quota management object
*/
public ResourceQuotas resourceQuotas() {
return resourceQuotas;
} | 3.68 |
hbase_Result_loadValue | /**
* Loads the latest version of the specified column into the provided <code>ByteBuffer</code>.
* <p>
* Does not clear or flip the buffer.
* @param family family name
* @param foffset family offset
* @param flength family length
* @param qualifier column qualifier
* @param qoffset qualifier offset
* @param qlength qualifier length
* @param dst the buffer where to write the value
* @return <code>true</code> if a value was found, <code>false</code> otherwise
* @throws BufferOverflowException there is insufficient space remaining in the buffer
*/
public boolean loadValue(byte[] family, int foffset, int flength, byte[] qualifier, int qoffset,
int qlength, ByteBuffer dst) throws BufferOverflowException {
Cell kv = getColumnLatestCell(family, foffset, flength, qualifier, qoffset, qlength);
if (kv == null) {
return false;
}
dst.put(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
return true;
} | 3.68 |
morf_OracleDialect_makeColumnNotNull | /**
* It returns the SQL statement to make the column not null. The function
* catches the exception with SQL error code ORA-01442: column to be modified
* to NOT NULL is already NOT NULL.
*
* <p>Example of the generated SQL statement:</p>
* <pre>
* DECLARE
* e EXCEPTION;
* pragma exception_init(e,-1442);
* BEGIN
* EXECUTE immediate 'alter table sandbox.genericglposting modify (version not null)';
* EXCEPTION
* WHEN e THEN
* NULL;
* END;
* </pre>
*
* @param tableName Table name to be altered
* @param columnName Column name to make it not null
* @return The SQL statement to make the column not null
*/
private String makeColumnNotNull(String tableName, String columnName) {
StringBuilder statement = new StringBuilder();
statement.append("DECLARE \n").append(" e EXCEPTION; \n").append(" pragma exception_init(e,-1442); \n").append("BEGIN \n")
.append(" EXECUTE immediate 'ALTER TABLE ").append(schemaNamePrefix()).append(tableName).append(" MODIFY (")
.append(columnName).append(" NOT NULL)'; \n").append("EXCEPTION \n").append("WHEN e THEN \n").append(" NULL; \n")
.append("END;");
if (log.isDebugEnabled()) {
log.debug(statement.toString());
}
return statement.toString();
} | 3.68 |
flink_ResolvedSchema_toSourceRowDataType | /**
* Converts all columns of this schema into a (possibly nested) row data type.
*
* <p>This method returns the <b>source-to-query schema</b>.
*
* <p>Note: The returned row data type contains physical, computed, and metadata columns. Be
* careful when using this method in a table source or table sink. In many cases, {@link
* #toPhysicalRowDataType()} might be more appropriate.
*
* @see DataTypes#ROW(DataTypes.Field...)
* @see #toPhysicalRowDataType()
* @see #toSinkRowDataType()
*/
public DataType toSourceRowDataType() {
return toRowDataType(c -> true);
} | 3.68 |
framework_AbstractComponent_isRequiredIndicatorVisible | /**
* Checks whether the required indicator is visible or not. <strong>NOTE:
* Does not apply for all components!</strong>.
* <p>
* This method will throw a {@link IllegalStateException} if the component
* state (returned by {@link #getState()}) does not inherit
* {@link AbstractFieldState}.
*
* @return <code>true</code> if visible, <code>false</code> if not
* @see #setRequiredIndicatorVisible(boolean)
* @since 8.0
*/
protected boolean isRequiredIndicatorVisible() {
if (getState(false) instanceof AbstractFieldState) {
return ((AbstractFieldState) getState(false)).required;
}
throw new IllegalStateException(
"This component does not support the required indicator, since state is of type "
+ getStateType().getSimpleName()
+ " and does not inherit "
+ AbstractFieldState.class.getSimpleName());
} | 3.68 |
hbase_FlushTableSubprocedure_acquireBarrier | /**
* Flush the online regions on this rs for the target table.
*/
@Override
public void acquireBarrier() throws ForeignException {
flushRegions();
} | 3.68 |
morf_SqlUtils_selectFirst | /**
* Constructs a Select First Statement.
*
* <p>Usage is discouraged; this method will be deprecated at some point. Use
* {@link SelectFirstStatement#selectFirst(AliasedFieldBuilder)} for preference.</p>
*
* @param field the field that should be selected
* @return {@link SelectStatement}
*/
public static SelectFirstStatement selectFirst(AliasedFieldBuilder field) {
return new SelectFirstStatement(field);
} | 3.68 |
hadoop_ResourceRequest_capability | /**
* Set the <code>capability</code> of the request.
* @see ResourceRequest#setCapability(Resource)
* @param capability <code>capability</code> of the request
* @return {@link ResourceRequestBuilder}
*/
@Public
@Stable
public ResourceRequestBuilder capability(Resource capability) {
resourceRequest.setCapability(capability);
return this;
} | 3.68 |
hadoop_AppStoreController_register | /**
* Register an application.
*
* @apiGroup AppStoreController
* @apiName register
* @api {post} /app_store/register Register an application in appstore.
* @apiParam {Object} app Application definition.
* @apiParamExample {json} Request-Example:
* {
* "name": "Jenkins",
* "organization": "Jenkins-ci.org",
* "description": "The leading open source automation server",
* "icon": "/css/img/jenkins.png",
* "lifetime": "3600",
* "components": [
* {
* "name": "jenkins",
* "number_of_containers": 1,
* "artifact": {
* "id": "eyang-1.openstacklocal:5000/jenkins:latest",
* "type": "DOCKER"
* },
* "launch_command": "",
* "resource": {
* "cpus": 1,
* "memory": "2048"
* },
* "configuration": {
* "env": {
* },
* "files": [
* ]
* }
* }
* ],
* "quicklinks": {
* "Jenkins UI": "http://jenkins.${SERVICE_NAME}.${USER}.${DOMAIN}:8080/"
* }
* }
* @apiSuccess {String} Response Application register result.
* @apiError BadRequest Error in process application registration.
* @param app - Yarnfile in JSON form
* @return Web response
*/
@POST
@Path("register")
@Produces(MediaType.APPLICATION_JSON)
public Response register(Application app) {
try {
if (app.getName()==null) {
throw new IOException("Application name can not be empty.");
}
if (app.getOrganization()==null) {
throw new IOException("Application organization can not be empty.");
}
if (app.getDescription()==null) {
throw new IOException("Application description can not be empty.");
}
AppCatalogSolrClient sc = new AppCatalogSolrClient();
sc.register(app);
} catch (IOException e) {
return Response.status(Status.BAD_REQUEST).entity(e.getMessage()).build();
}
return Response.status(Status.ACCEPTED).build();
} | 3.68 |
dubbo_TTable_set | /**
* set border style
*
* @param border border style
* @return this
*/
public Border set(int border) {
this.borders = border;
return this;
} | 3.68 |
hadoop_OBSPosixBucketUtils_fsNonRecursivelyDelete | // List all sub objects at first, delete sub objects in batch secondly.
private static void fsNonRecursivelyDelete(final OBSFileSystem owner,
final Path parent)
throws IOException, ObsException {
// List sub objects sorted by path depth.
FileStatus[] arFileStatus = OBSCommonUtils.innerListStatus(owner,
parent, true);
// Remove sub objects one depth by one depth to avoid that parents and
// children in a same batch.
fsRemoveKeys(owner, arFileStatus);
// Delete parent folder that should has become empty.
OBSCommonUtils.deleteObject(owner,
OBSCommonUtils.pathToKey(owner, parent));
} | 3.68 |
hbase_MultiVersionConcurrencyControl_advanceTo | /**
* Step the MVCC forward on to a new read/write basis.
*/
public void advanceTo(long newStartPoint) {
while (true) {
long seqId = this.getWritePoint();
if (seqId >= newStartPoint) {
break;
}
if (this.tryAdvanceTo(newStartPoint, seqId)) {
break;
}
}
} | 3.68 |
framework_ApplicationConfiguration_loadFromDOM | /**
* Reads the configuration values defined by the bootstrap javascript.
*/
private void loadFromDOM() {
JsoConfiguration jsoConfiguration = getJsoConfiguration(id);
serviceUrl = jsoConfiguration
.getConfigString(ApplicationConstants.SERVICE_URL);
if (serviceUrl == null || serviceUrl.isEmpty()) {
/*
* Use the current url without query parameters and fragment as the
* default value.
*/
serviceUrl = Window.Location.getHref().replaceFirst("[?#].*", "");
} else {
/*
* Resolve potentially relative URLs to ensure they point to the
* desired locations even if the base URL of the page changes later
* (e.g. with pushState)
*/
serviceUrl = WidgetUtil.getAbsoluteUrl(serviceUrl);
}
// Ensure there's an ending slash (to make appending e.g. UIDL work)
if (!useServiceUrlPathParam() && !serviceUrl.endsWith("/")) {
serviceUrl += '/';
}
contextRootUrl = jsoConfiguration
.getConfigString(ApplicationConstants.CONTEXT_ROOT_URL);
vaadinDirUrl = WidgetUtil.getAbsoluteUrl(jsoConfiguration
.getConfigString(ApplicationConstants.VAADIN_DIR_URL));
frontendUrl = WidgetUtil.getAbsoluteUrl(jsoConfiguration
.getConfigString(ApplicationConstants.FRONTEND_URL));
uiId = jsoConfiguration.getConfigInteger(UIConstants.UI_ID_PARAMETER)
.intValue();
// null -> false
standalone = jsoConfiguration
.getConfigBoolean("standalone") == Boolean.TRUE;
heartbeatInterval = jsoConfiguration
.getConfigInteger("heartbeatInterval");
communicationError = jsoConfiguration.getConfigError("comErrMsg");
authorizationError = jsoConfiguration.getConfigError("authErrMsg");
sessionExpiredError = jsoConfiguration.getConfigError("sessExpMsg");
rootElement = jsoConfiguration.getConfigElement("rootElement");
} | 3.68 |
flink_ThreadInfoRequestCoordinator_requestThreadInfo | /**
* Requests thread infos from given subtasks. The response would be ignored if it does not
* return within timeout.
*/
private void requestThreadInfo(
Map<ImmutableSet<ExecutionAttemptID>, CompletableFuture<TaskExecutorThreadInfoGateway>>
executionWithGateways,
ThreadInfoSamplesRequest requestParams,
Time timeout) {
// Trigger samples collection from all subtasks
for (Map.Entry<
ImmutableSet<ExecutionAttemptID>,
CompletableFuture<TaskExecutorThreadInfoGateway>>
executionWithGateway : executionWithGateways.entrySet()) {
CompletableFuture<TaskExecutorThreadInfoGateway> executorGatewayFuture =
executionWithGateway.getValue();
CompletableFuture<TaskThreadInfoResponse> threadInfo =
executorGatewayFuture.thenCompose(
executorGateway ->
executorGateway.requestThreadInfoSamples(
executionWithGateway.getKey(), requestParams, timeout));
threadInfo.whenCompleteAsync(
(TaskThreadInfoResponse threadInfoSamplesResponse, Throwable throwable) -> {
if (threadInfoSamplesResponse != null) {
handleSuccessfulResponse(
requestParams.getRequestId(),
executionWithGateway.getKey(),
threadInfoSamplesResponse.getSamples());
} else {
handleFailedResponse(requestParams.getRequestId(), throwable);
}
},
executor);
}
} | 3.68 |
framework_FieldGroup_buildAndBindMemberFields | /**
* Binds member fields found in the given object and optionally builds
* member fields that have not been initialized.
* <p>
* This method processes all (Java) member fields whose type extends
* {@link Field} and that can be mapped to a property id. Property ids are
* searched in the following order: @{@link PropertyId} annotations, exact
* field name matches and the case-insensitive matching that ignores
* underscores. Fields that are not initialized (null) are built using the
* field factory is buildFields is true. All non-null fields for which a
* property id can be determined are bound to the property id.
* </p>
*
* @param objectWithMemberFields
* The object that contains (Java) member fields to build and
* bind
* @throws BindException
* If there is a problem binding or building a field
*/
protected void buildAndBindMemberFields(Object objectWithMemberFields,
boolean buildFields) throws BindException {
Class<?> objectClass = objectWithMemberFields.getClass();
for (java.lang.reflect.Field memberField : getFieldsInDeclareOrder(
objectClass)) {
if (!Field.class.isAssignableFrom(memberField.getType())) {
// Process next field
continue;
}
PropertyId propertyIdAnnotation = memberField
.getAnnotation(PropertyId.class);
Class<? extends Field> fieldType = (Class<? extends Field>) memberField
.getType();
Object propertyId = null;
if (propertyIdAnnotation != null) {
// @PropertyId(propertyId) always overrides property id
propertyId = propertyIdAnnotation.value();
} else {
try {
propertyId = findPropertyId(memberField);
} catch (SearchException e) {
// Property id was not found, skip this field
continue;
}
if (propertyId == null) {
// Property id was not found, skip this field
continue;
}
}
// Ensure that the property id exists
Class<?> propertyType;
try {
propertyType = getPropertyType(propertyId);
} catch (BindException e) {
// Property id was not found, skip this field
continue;
}
Field<?> field;
try {
// Get the field from the object
field = (Field<?>) ReflectTools.getJavaFieldValue(
objectWithMemberFields, memberField, Field.class);
} catch (Exception e) {
// If we cannot determine the value, just skip the field and try
// the next one
continue;
}
if (field == null && buildFields) {
Caption captionAnnotation = memberField
.getAnnotation(Caption.class);
String caption;
if (captionAnnotation != null) {
caption = captionAnnotation.value();
} else {
caption = DefaultFieldFactory
.createCaptionByPropertyId(propertyId);
}
// Create the component (Field)
field = build(caption, propertyType, fieldType);
// Store it in the field
try {
ReflectTools.setJavaFieldValue(objectWithMemberFields,
memberField, field);
} catch (IllegalArgumentException e) {
throw new BindException("Could not assign value to field '"
+ memberField.getName() + "'", e);
} catch (IllegalAccessException e) {
throw new BindException("Could not assign value to field '"
+ memberField.getName() + "'", e);
} catch (InvocationTargetException e) {
throw new BindException("Could not assign value to field '"
+ memberField.getName() + "'", e);
}
}
if (field != null) {
// Bind it to the property id
bind(field, propertyId);
}
}
} | 3.68 |
hadoop_NodePlan_getPort | /**
* Gets the DataNode RPC Port.
*
* @return port
*/
public int getPort() {
return port;
} | 3.68 |
hbase_Bytes_unsignedBinarySearch | /**
* Search sorted array "a" for byte "key". I can't remember if I wrote this or copied it from
* somewhere. (mcorgan)
* @param a Array to search. Entries must be sorted and unique.
* @param fromIndex First index inclusive of "a" to include in the search.
* @param toIndex Last index exclusive of "a" to include in the search.
* @param key The byte to search for.
* @return The index of key if found. If not found, return -(index + 1), where negative indicates
* "not found" and the "index + 1" handles the "-0" case.
*/
public static int unsignedBinarySearch(byte[] a, int fromIndex, int toIndex, byte key) {
int unsignedKey = key & 0xff;
int low = fromIndex;
int high = toIndex - 1;
while (low <= high) {
int mid = low + ((high - low) >> 1);
int midVal = a[mid] & 0xff;
if (midVal < unsignedKey) {
low = mid + 1;
} else if (midVal > unsignedKey) {
high = mid - 1;
} else {
return mid; // key found
}
}
return -(low + 1); // key not found.
} | 3.68 |
hbase_MobUtils_getMobFileName | /**
* Gets the mob file name from the mob ref cell. A mob ref cell has a mob reference tag. The value
* of a mob ref cell consists of two parts, real mob value length and mob file name. The real mob
* value length takes 4 bytes. The remaining part is the mob file name.
* @param cell The mob ref cell.
* @return The mob file name.
*/
public static String getMobFileName(Cell cell) {
return Bytes.toString(cell.getValueArray(), cell.getValueOffset() + Bytes.SIZEOF_INT,
cell.getValueLength() - Bytes.SIZEOF_INT);
} | 3.68 |
flink_Module_getTableSourceFactory | /**
* Returns a {@link DynamicTableSourceFactory} for creating source tables.
*
* <p>A factory is determined with the following precedence rule:
*
* <ul>
* <li>1. Factory provided by the corresponding catalog of a persisted table.
* <li>2. Factory provided by a module.
* <li>3. Factory discovered using Java SPI.
* </ul>
*
* <p>This will be called on loaded modules in the order in which they have been loaded. The
* first factory returned will be used.
*
* <p>This method can be useful to disable Java SPI completely or influence how temporary table
* sources should be created without a corresponding catalog.
*/
default Optional<DynamicTableSourceFactory> getTableSourceFactory() {
return Optional.empty();
} | 3.68 |
rocketmq-connect_Worker_getWorkingTasks | /**
* Beaware that we are not creating a defensive copy of these tasks
* So developers should only use these references for read-only purposes.
* These variables should be immutable
*
* @return
*/
public Set<Runnable> getWorkingTasks() {
return runningTasks;
} | 3.68 |
hbase_RegionNormalizerWorkQueue_putAllFirst | /**
* Inserts the specified elements at the head of the queue.
* @param c the elements to add
*/
public void putAllFirst(Collection<? extends E> c) {
if (c == null) {
throw new NullPointerException();
}
lock.writeLock().lock();
try {
final LinkedHashSet<E> copy = new LinkedHashSet<>(c.size() + delegate.size());
copy.addAll(c);
copy.addAll(delegate);
delegate = copy;
if (!delegate.isEmpty()) {
notEmpty.signal();
}
} finally {
lock.writeLock().unlock();
}
} | 3.68 |
flink_ModuleFactory_requiredContext | /** @deprecated Implement the {@link Factory} based stack instead. */
@Deprecated
default Map<String, String> requiredContext() {
// Default implementation for modules implementing the new {@link Factory} stack instead.
return null;
} | 3.68 |
flink_FlinkRelMdCollation_values | /**
* Helper method to determine a {@link org.apache.calcite.rel.core.Values}'s collation.
*
* <p>We actually under-report the collations. A Values with 0 or 1 rows - an edge case, but
* legitimate and very common - is ordered by every permutation of every subset of the columns.
*
* <p>So, our algorithm aims to:
*
* <ul>
* <li>produce at most N collations (where N is the number of columns);
* <li>make each collation as long as possible;
* <li>do not repeat combinations already emitted - if we've emitted {@code (a, b)} do not
* later emit {@code (b, a)};
* <li>probe the actual values and make sure that each collation is consistent with the data
* </ul>
*
* <p>So, for an empty Values with 4 columns, we would emit {@code (a, b, c, d), (b, c, d), (c,
* d), (d)}.
*/
public static List<RelCollation> values(
RelMetadataQuery mq,
RelDataType rowType,
com.google.common.collect.ImmutableList<
com.google.common.collect.ImmutableList<RexLiteral>>
tuples) {
Util.discard(mq); // for future use
final List<RelCollation> list = new ArrayList<>();
final int n = rowType.getFieldCount();
final List<Pair<RelFieldCollation, com.google.common.collect.Ordering<List<RexLiteral>>>>
pairs = new ArrayList<>();
outer:
for (int i = 0; i < n; i++) {
pairs.clear();
for (int j = i; j < n; j++) {
final RelFieldCollation fieldCollation = new RelFieldCollation(j);
com.google.common.collect.Ordering<List<RexLiteral>> comparator =
comparator(fieldCollation);
com.google.common.collect.Ordering<List<RexLiteral>> ordering;
if (pairs.isEmpty()) {
ordering = comparator;
} else {
ordering = Util.last(pairs).right.compound(comparator);
}
pairs.add(Pair.of(fieldCollation, ordering));
if (!ordering.isOrdered(tuples)) {
if (j == i) {
continue outer;
}
pairs.remove(pairs.size() - 1);
}
}
if (!pairs.isEmpty()) {
list.add(RelCollations.of(Pair.left(pairs)));
}
}
return list;
} | 3.68 |
hudi_BaseHoodieTableServiceClient_scheduleTableService | /**
* Schedule table services such as clustering, compaction & cleaning.
*
* @param extraMetadata Metadata to pass onto the scheduled service instant
* @param tableServiceType Type of table service to schedule
* @return
*/
public Option<String> scheduleTableService(String instantTime, Option<Map<String, String>> extraMetadata,
TableServiceType tableServiceType) {
// A lock is required to guard against race conditions between an ongoing writer and scheduling a table service.
final Option<HoodieInstant> inflightInstant = Option.of(new HoodieInstant(HoodieInstant.State.REQUESTED,
tableServiceType.getAction(), instantTime));
try {
this.txnManager.beginTransaction(inflightInstant, Option.empty());
LOG.info("Scheduling table service " + tableServiceType);
return scheduleTableServiceInternal(instantTime, extraMetadata, tableServiceType);
} finally {
this.txnManager.endTransaction(inflightInstant);
}
} | 3.68 |
hbase_ReplicationSourceManager_getSource | /**
* Get the normal source for a given peer
* @return the normal source for the give peer if it exists, otherwise null.
*/
public ReplicationSourceInterface getSource(String peerId) {
return this.sources.get(peerId);
} | 3.68 |
flink_RawValueData_fromBytes | /** Creates an instance of {@link RawValueData} from the given byte array. */
static <T> RawValueData<T> fromBytes(byte[] bytes) {
return BinaryRawValueData.fromBytes(bytes);
} | 3.68 |
Activiti_ThrowMessage_builder | /**
* Creates builder to build {@link ThrowMessage}.
* @return created builder
*/
public static INameStage builder() {
return new ThrowMessagBuilder();
} | 3.68 |
hbase_GlobalQuotaSettingsImpl_toQuotas | /**
* Constructs a new {@link Quotas} message from {@code this}.
*/
protected Quotas toQuotas() {
QuotaProtos.Quotas.Builder builder = QuotaProtos.Quotas.newBuilder();
if (getThrottleProto() != null) {
builder.setThrottle(getThrottleProto());
}
if (getBypassGlobals() != null) {
builder.setBypassGlobals(getBypassGlobals());
}
if (getSpaceProto() != null) {
builder.setSpace(getSpaceProto());
}
return builder.build();
} | 3.68 |
flink_ContextResolvedTable_copy | /** Copy the {@link ContextResolvedTable}, replacing the underlying {@link ResolvedSchema}. */
public ContextResolvedTable copy(ResolvedSchema newSchema) {
return new ContextResolvedTable(
objectIdentifier,
catalog,
new ResolvedCatalogTable((CatalogTable) resolvedTable.getOrigin(), newSchema),
false);
} | 3.68 |
hbase_HRegionFileSystem_deleteFamily | /**
* Remove the region family from disk, archiving the store files.
* @param familyName Column Family Name
* @throws IOException if an error occours during the archiving
*/
public void deleteFamily(final String familyName) throws IOException {
// archive family store files
HFileArchiver.archiveFamily(fs, conf, regionInfoForFs, tableDir, Bytes.toBytes(familyName));
// delete the family folder
Path familyDir = getStoreDir(familyName);
if (fs.exists(familyDir) && !deleteDir(familyDir))
throw new IOException("Could not delete family " + familyName + " from FileSystem for region "
+ regionInfoForFs.getRegionNameAsString() + "(" + regionInfoForFs.getEncodedName() + ")");
} | 3.68 |
streampipes_EpProperties_timestampProperty | /**
* Creates a new primitive property of type timestamp (with data type long and domain property schema.org/DateTime
*
* @param runtimeName The field identifier of the event property at runtime.
* @return {@link org.apache.streampipes.model.schema.EventPropertyPrimitive}
*/
public static EventPropertyPrimitive timestampProperty(String runtimeName) {
// TODO we need a real timestamp property!
EventPropertyPrimitive ep = ep(Labels.from("", "Timestamp", "The current timestamp value"),
XSD.LONG.toString(), runtimeName, "http://schema.org/DateTime");
ep.setPropertyScope(PropertyScope.HEADER_PROPERTY.name());
return ep;
} | 3.68 |
framework_CalendarConnector_registerListeners | /**
* Registers listeners on the calendar so server can be notified of the
* events.
*/
protected void registerListeners() {
VCalendar calendar = getWidget();
calendar.setListener(new DateClickListener() {
@Override
public void dateClick(String date) {
if (!calendar.isDisabled()
&& hasEventListener(CalendarEventId.DATECLICK)) {
rpc.dateClick(date);
}
}
});
calendar.setListener(new ForwardListener() {
@Override
public void forward() {
if (hasEventListener(CalendarEventId.FORWARD)) {
rpc.forward();
}
}
});
calendar.setListener(new BackwardListener() {
@Override
public void backward() {
if (hasEventListener(CalendarEventId.BACKWARD)) {
rpc.backward();
}
}
});
calendar.setListener(new RangeSelectListener() {
@Override
public void rangeSelected(String value) {
if (hasEventListener(CalendarEventId.RANGESELECT)) {
rpc.rangeSelect(value);
}
}
});
calendar.setListener(new WeekClickListener() {
@Override
public void weekClick(String event) {
if (!calendar.isDisabled()
&& hasEventListener(CalendarEventId.WEEKCLICK)) {
rpc.weekClick(event);
}
}
});
calendar.setListener(new EventMovedListener() {
@Override
public void eventMoved(CalendarEvent event) {
if (hasEventListener(CalendarEventId.EVENTMOVE)) {
StringBuilder sb = new StringBuilder();
sb.append(DateUtil.formatClientSideDate(event.getStart()));
sb.append('-');
sb.append(DateUtil
.formatClientSideTime(event.getStartTime()));
rpc.eventMove(event.getIndex(), sb.toString());
}
}
});
calendar.setListener(new EventResizeListener() {
@Override
public void eventResized(CalendarEvent event) {
if (hasEventListener(CalendarEventId.EVENTRESIZE)) {
StringBuilder buffer = new StringBuilder();
buffer.append(
DateUtil.formatClientSideDate(event.getStart()));
buffer.append('-');
buffer.append(DateUtil
.formatClientSideTime(event.getStartTime()));
String newStartDate = buffer.toString();
buffer = new StringBuilder();
buffer.append(
DateUtil.formatClientSideDate(event.getEnd()));
buffer.append('-');
buffer.append(
DateUtil.formatClientSideTime(event.getEndTime()));
String newEndDate = buffer.toString();
rpc.eventResize(event.getIndex(), newStartDate, newEndDate);
}
}
});
calendar.setListener(new VCalendar.ScrollListener() {
@Override
public void scroll(int scrollPosition) {
// This call is @Delayed (== non-immediate)
rpc.scroll(scrollPosition);
}
});
calendar.setListener(new EventClickListener() {
@Override
public void eventClick(CalendarEvent event) {
if (hasEventListener(CalendarEventId.EVENTCLICK)) {
rpc.eventClick(event.getIndex());
}
}
});
calendar.setListener(new MouseEventListener() {
@Override
public void contextMenu(ContextMenuEvent event,
final Widget widget) {
final NativeEvent ne = event.getNativeEvent();
int left = ne.getClientX();
int top = ne.getClientY();
top += Window.getScrollTop();
left += Window.getScrollLeft();
getClient().getContextMenu().showAt(new ActionOwner() {
@Override
public String getPaintableId() {
return CalendarConnector.this.getPaintableId();
}
@Override
public ApplicationConnection getClient() {
return CalendarConnector.this.getClient();
}
@Override
@SuppressWarnings("deprecation")
public Action[] getActions() {
if (widget instanceof SimpleDayCell) {
/*
* Month view
*/
SimpleDayCell cell = (SimpleDayCell) widget;
Date start = new Date(cell.getDate().getYear(),
cell.getDate().getMonth(),
cell.getDate().getDate(), 0, 0, 0);
Date end = new Date(cell.getDate().getYear(),
cell.getDate().getMonth(),
cell.getDate().getDate(), 23, 59, 59);
return CalendarConnector.this
.getActionsBetween(start, end);
} else if (widget instanceof MonthEventLabel) {
MonthEventLabel mel = (MonthEventLabel) widget;
CalendarEvent event = mel.getCalendarEvent();
Action[] actions = CalendarConnector.this
.getActionsBetween(event.getStartTime(),
event.getEndTime());
for (Action action : actions) {
((VCalendarAction) action).setEvent(event);
}
return actions;
} else if (widget instanceof DateCell) {
/*
* Week and Day view
*/
DateCell cell = (DateCell) widget;
int slotIndex = DOM.getChildIndex(cell.getElement(),
(Element) ne.getEventTarget().cast());
DateCellSlot slot = cell.getSlot(slotIndex);
return CalendarConnector.this.getActionsBetween(
slot.getFrom(), slot.getTo());
} else if (widget instanceof DateCellDayEvent) {
/*
* Context menu on event
*/
DateCellDayEvent dayEvent = (DateCellDayEvent) widget;
CalendarEvent event = dayEvent.getCalendarEvent();
Action[] actions = CalendarConnector.this
.getActionsBetween(event.getStartTime(),
event.getEndTime());
for (Action action : actions) {
((VCalendarAction) action).setEvent(event);
}
return actions;
}
return null;
}
}, left, top);
}
});
} | 3.68 |
hbase_Mutation_setDurability | /**
* Set the durability for this mutation
*/
public Mutation setDurability(Durability d) {
this.durability = d;
return this;
} | 3.68 |
hbase_HRegion_compactStore | /**
* This is a helper function that compact the given store.
* <p>
* It is used by utilities and testing
*/
void compactStore(byte[] family, ThroughputController throughputController) throws IOException {
HStore s = getStore(family);
Optional<CompactionContext> compaction = s.requestCompaction();
if (compaction.isPresent()) {
compact(compaction.get(), s, throughputController, null);
}
} | 3.68 |
hbase_Compactor_getProgress | /**
* Return the aggregate progress for all currently active compactions.
*/
public CompactionProgress getProgress() {
synchronized (progressSet) {
long totalCompactingKVs = 0;
long currentCompactedKVs = 0;
long totalCompactedSize = 0;
for (CompactionProgress progress : progressSet) {
totalCompactingKVs += progress.totalCompactingKVs;
currentCompactedKVs += progress.currentCompactedKVs;
totalCompactedSize += progress.totalCompactedSize;
}
CompactionProgress result = new CompactionProgress(totalCompactingKVs);
result.currentCompactedKVs = currentCompactedKVs;
result.totalCompactedSize = totalCompactedSize;
return result;
}
} | 3.68 |
hadoop_OBSDataBlocks_verifyState | /**
* Verify that the block is in the declared state.
*
* @param expected expected state.
* @throws IllegalStateException if the DataBlock is in the wrong state
*/
protected final void verifyState(final DestState expected)
throws IllegalStateException {
if (expected != null && state != expected) {
throw new IllegalStateException(
"Expected stream state " + expected
+ " -but actual state is " + state + " in " + this);
}
} | 3.68 |
flink_CollectionUtil_entry | /** Returns an immutable {@link Map.Entry}. */
public static <K, V> Map.Entry<K, V> entry(K k, V v) {
return new AbstractMap.SimpleImmutableEntry<>(k, v);
} | 3.68 |
flink_IOManager_getSpillingDirectoriesPaths | /**
* Gets the directories that the I/O manager spills to, as path strings.
*
* @return The directories that the I/O manager spills to, as path strings.
*/
public String[] getSpillingDirectoriesPaths() {
File[] paths = fileChannelManager.getPaths();
String[] strings = new String[paths.length];
for (int i = 0; i < strings.length; i++) {
strings[i] = paths[i].getAbsolutePath();
}
return strings;
} | 3.68 |
hbase_UnsafeAccess_getAsShort | /**
* Reads bytes at the given offset as a short value.
* @return short value at offset
*/
private static short getAsShort(ByteBuffer buf, int offset) {
if (buf.isDirect()) {
return HBasePlatformDependent.getShort(directBufferAddress(buf) + offset);
}
return HBasePlatformDependent.getShort(buf.array(),
BYTE_ARRAY_BASE_OFFSET + buf.arrayOffset() + offset);
} | 3.68 |
flink_Execution_setAccumulators | /**
* Update accumulators (discarded when the Execution has already been terminated).
*
* @param userAccumulators the user accumulators
*/
public void setAccumulators(Map<String, Accumulator<?, ?>> userAccumulators) {
synchronized (accumulatorLock) {
if (!state.isTerminal()) {
this.userAccumulators = userAccumulators;
}
}
} | 3.68 |
hmily_HmilySQLRevertEngineFactory_newInstance | /**
* New instance hmily SQL revert engine.
*
* @return the hmily SQL revert engine
*/
public static HmilySQLRevertEngine newInstance() {
if (hmilySqlRevertEngine == null) {
synchronized (HmilySQLRevertEngineFactory.class) {
if (hmilySqlRevertEngine == null) {
HmilyConfig config = ConfigEnv.getInstance().getConfig(HmilyConfig.class);
hmilySqlRevertEngine = ExtensionLoaderFactory.load(HmilySQLRevertEngine.class, config.getSqlRevert());
}
}
}
return hmilySqlRevertEngine;
} | 3.68 |
hudi_HoodieGlobalBloomIndex_loadColumnRangesFromFiles | /**
* Load all involved files as <Partition, filename> pairs from all partitions in the table.
*/
@Override
List<Pair<String, BloomIndexFileInfo>> loadColumnRangesFromFiles(List<String> partitions, final HoodieEngineContext context,
final HoodieTable hoodieTable) {
HoodieTableMetaClient metaClient = hoodieTable.getMetaClient();
List<String> allPartitionPaths = FSUtils.getAllPartitionPaths(context, config.getMetadataConfig(), metaClient.getBasePath());
return super.loadColumnRangesFromFiles(allPartitionPaths, context, hoodieTable);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.