name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_SqlDialect_getSqlForWindowFunction | /**
* Generates standards compliant SQL from the function within a window function
* @param function The field to convert
* @return The resulting SQL
*/
protected String getSqlForWindowFunction(Function function) {
return getSqlFrom(function);
} | 3.68 |
framework_GridSingleSelect_getSelectedItem | /**
* Returns the currently selected item, or an empty optional if no item is
* selected.
*
* @return an optional of the selected item if any, an empty optional
* otherwise
*/
public Optional<T> getSelectedItem() {
return model.getSelectedItem();
} | 3.68 |
framework_GridDragSource_addGridDragStartListener | /**
* Attaches dragstart listener for the current drag source grid.
*
* @param listener
* Listener to handle the dragstart event.
* @return Handle to be used to remove this listener.
* @see GridDragStartEvent
*/
public Registration addGridDragStartListener(
GridDragStartListener<T> listener) {
return addListener(DragSourceState.EVENT_DRAGSTART,
GridDragStartEvent.class, listener,
GridDragStartListener.DRAG_START_METHOD);
} | 3.68 |
hadoop_S3APrefetchingInputStream_seek | /**
* Updates internal data such that the next read will take place at the given {@code pos}.
*
* @param pos new read position.
* @throws IOException if there is an IO error during this operation.
*/
@Override
public synchronized void seek(long pos) throws IOException {
throwIfClosed();
inputStream.seek(pos);
} | 3.68 |
hbase_MetricsSource_getReplicableEdits | /**
* Gets the number of edits eligible for replication read from this source queue logs so far.
* @return replicableEdits total number of replicable edits read from this queue logs.
*/
public long getReplicableEdits() {
return this.singleSourceSource.getWALEditsRead() - this.singleSourceSource.getEditsFiltered();
} | 3.68 |
rocketmq-connect_RetryWithToleranceOperator_execute | /**
* Execute the recoverable operation. If the operation is already in a failed state, then simply return
* with the existing failure.
*/
public <V> V execute(Operation<V> operation, ErrorReporter.Stage stage, Class<?> executingClass) {
context.currentContext(stage, executingClass);
if (context.failed()) {
log.debug("ProcessingContext is already in failed state. Ignoring requested operation.");
return null;
}
try {
Class<? extends Exception> ex = TOLERABLE_EXCEPTIONS.getOrDefault(context.stage(), RetriableException.class);
return execAndHandleError(operation, ex);
} finally {
if (context.failed()) {
errorMetricsGroup.recordError();
context.report();
}
}
} | 3.68 |
morf_H2Dialect_getSqlForWindowFunction | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForWindowFunction(Function)
*/
@Override
protected String getSqlForWindowFunction(Function function) {
FunctionType functionType = function.getType();
switch (functionType) {
case ROW_NUMBER:
return "ROW_NUMBER()";
default:
return super.getSqlForWindowFunction(function);
}
} | 3.68 |
hbase_SaslServerAuthenticationProviders_reset | /**
* Removes the cached singleton instance of {@link SaslServerAuthenticationProviders}.
*/
public static void reset() {
synchronized (holder) {
holder.set(null);
}
} | 3.68 |
pulsar_HeapDumpUtil_getHotSpotDiagnosticMXBean | // Utility method to get the HotSpotDiagnosticMXBean
private static HotSpotDiagnosticMXBean getHotSpotDiagnosticMXBean() {
try {
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
return ManagementFactory.newPlatformMXBeanProxy(server, HOTSPOT_BEAN_NAME, HotSpotDiagnosticMXBean.class);
} catch (Exception e) {
throw new RuntimeException(e);
}
} | 3.68 |
graphhopper_ArrayUtil_permutation | /**
* Creates an IntArrayList filled with a permutation of the numbers 0,1,2,...,size-1
*/
public static IntArrayList permutation(int size, Random rnd) {
IntArrayList result = iota(size);
shuffle(result, rnd);
return result;
} | 3.68 |
flink_SkipListUtils_putValueData | /**
* Puts the value data into value space.
*
* @param memorySegment memory segment for value space.
* @param offset offset of value space in memory segment.
* @param value value data.
*/
public static void putValueData(MemorySegment memorySegment, int offset, byte[] value) {
MemorySegment valueSegment = MemorySegmentFactory.wrap(value);
valueSegment.copyTo(0, memorySegment, offset + getValueMetaLen(), value.length);
} | 3.68 |
hbase_OrderedBlobVar_encode | /**
* Write a subset of {@code val} to {@code dst}.
* @param dst the {@link PositionedByteRange} to write to
* @param val the value to write to {@code dst}
* @param voff the offset in {@code dst} where to write {@code val} to
* @param vlen the lenght of {@code val}
* @return the number of bytes written
*/
public int encode(PositionedByteRange dst, byte[] val, int voff, int vlen) {
return OrderedBytes.encodeBlobVar(dst, val, voff, vlen, order);
} | 3.68 |
morf_SqlDialect_buildSQLToStartTracing | /**
* @param identifier Unique identifier for trace file name, can be null.
* @return Sql required to turn on tracing, or null if tracing is not
* supported.
*/
public List<String> buildSQLToStartTracing(@SuppressWarnings("unused") String identifier) {
return null;
} | 3.68 |
dubbo_DubboBootstrap_initialize | /**
* Initialize
*/
public void initialize() {
applicationDeployer.initialize();
} | 3.68 |
hbase_FileIOEngine_shutdown | /**
* Close the file
*/
@Override
public void shutdown() {
for (int i = 0; i < filePaths.length; i++) {
try {
if (fileChannels[i] != null) {
fileChannels[i].close();
}
if (rafs[i] != null) {
rafs[i].close();
}
} catch (IOException ex) {
LOG.error("Failed closing " + filePaths[i] + " when shudown the IOEngine", ex);
}
}
} | 3.68 |
framework_VaadinService_ensureAccessQueuePurged | /**
* Makes sure the pending access queue is purged for the provided session.
* If the session is currently locked by the current thread or some other
* thread, the queue will be purged when the session is unlocked. If the
* lock is not held by any thread, it is acquired and the queue is purged
* right away.
*
* @since 7.1.2
* @param session
* the session for which the access queue should be purged
*/
public void ensureAccessQueuePurged(VaadinSession session) {
/*
* If no thread is currently holding the lock, pending changes for UIs
* with automatic push would not be processed and pushed until the next
* time there is a request or someone does an explicit push call.
*
* To remedy this, we try to get the lock at this point. If the lock is
* currently held by another thread, we just back out as the queue will
* get purged once it is released. If the lock is held by the current
* thread, we just release it knowing that the queue gets purged once
* the lock is ultimately released. If the lock is not held by any
* thread and we acquire it, we just release it again to purge the queue
* right away.
*/
try {
// tryLock() would be shorter, but it does not guarantee fairness
if (session.getLockInstance().tryLock(0, TimeUnit.SECONDS)) {
// unlock triggers runPendingAccessTasks
session.unlock();
}
} catch (InterruptedException e) {
// Just ignore
}
} | 3.68 |
hadoop_HdfsFileStatus_owner | /**
* Set the owner for this entity (default = null).
* @param owner Owner
* @return This Builder instance
*/
public Builder owner(String owner) {
this.owner = owner;
return this;
} | 3.68 |
framework_ComplexRenderer_destroy | /**
* Called when the renderer is deemed to be destroyed and no longer used by
* the Grid.
*/
public void destroy() {
// Implement if needed
} | 3.68 |
flink_SqlJsonValueFunctionWrapper_explicitTypeSpec | /**
* Copied and modified from the original {@link SqlJsonValueFunction}.
*
* <p>Changes: Instead of returning {@link Optional} this method returns null directly.
*/
private static RelDataType explicitTypeSpec(SqlOperatorBinding opBinding) {
if (opBinding.getOperandCount() > 2
&& opBinding.isOperandLiteral(2, false)
&& opBinding.getOperandLiteralValue(2, Object.class)
instanceof SqlJsonValueReturning) {
return opBinding.getOperandType(3);
}
return null;
} | 3.68 |
framework_ReflectTools_checkClassAccessibility | /**
* Makes a check whether the provided class is externally accessible for
* instantiation (e.g. it's not inner class (nested and not static) and is
* not a local class).
*
* @param cls
* type to check
*/
private static void checkClassAccessibility(Class<?> cls) {
if (cls.isMemberClass() && !Modifier.isStatic(cls.getModifiers())) {
throw new IllegalArgumentException(String.format(
CREATE_INSTANCE_FAILED_FOR_NON_STATIC_MEMBER_CLASS,
cls.getName()));
} else if (cls.isLocalClass()) {
throw new IllegalArgumentException(String
.format(CREATE_INSTANCE_FAILED_LOCAL_CLASS, cls.getName()));
}
} | 3.68 |
flink_HiveConfUtils_create | /**
* Create HiveConf instance via Hadoop configuration. Since {@link
* HiveConf#HiveConf(org.apache.hadoop.conf.Configuration, java.lang.Class)} will override
* properties in Hadoop configuration with Hive default values ({@link org.apache
* .hadoop.hive.conf.HiveConf.ConfVars}), so we should use this method to create HiveConf
* instance via Hadoop configuration.
*
* @param conf Hadoop configuration
* @return HiveConf instance
*/
public static HiveConf create(Configuration conf) {
HiveConf hiveConf = new HiveConf(conf, HiveConf.class);
// to make sure Hive configuration properties in conf not be overridden
hiveConf.addResource(conf);
return hiveConf;
} | 3.68 |
hudi_HoodieLogFileReader_readBlock | // TODO : convert content and block length to long by using ByteBuffer, raw byte [] allows
// for max of Integer size
private HoodieLogBlock readBlock() throws IOException {
int blockSize;
long blockStartPos = inputStream.getPos();
try {
// 1 Read the total size of the block
blockSize = (int) inputStream.readLong();
} catch (EOFException | CorruptedLogFileException e) {
// An exception reading any of the above indicates a corrupt block
// Create a corrupt block by finding the next MAGIC marker or EOF
return createCorruptBlock(blockStartPos);
}
// We may have had a crash which could have written this block partially
// Skip blockSize in the stream and we should either find a sync marker (start of the next
// block) or EOF. If we did not find either of it, then this block is a corrupted block.
boolean isCorrupted = isBlockCorrupted(blockSize);
if (isCorrupted) {
return createCorruptBlock(blockStartPos);
}
// 2. Read the version for this log format
HoodieLogFormat.LogFormatVersion nextBlockVersion = readVersion();
// 3. Read the block type for a log block
HoodieLogBlockType blockType = tryReadBlockType(nextBlockVersion);
// 4. Read the header for a log block, if present
Map<HeaderMetadataType, String> header =
nextBlockVersion.hasHeader() ? HoodieLogBlock.getLogMetadata(inputStream) : null;
// 5. Read the content length for the content
// Fallback to full-block size if no content-length
// TODO replace w/ hasContentLength
int contentLength =
nextBlockVersion.getVersion() != HoodieLogFormatVersion.DEFAULT_VERSION ? (int) inputStream.readLong() : blockSize;
// 6. Read the content or skip content based on IO vs Memory trade-off by client
long contentPosition = inputStream.getPos();
boolean shouldReadLazily = readBlockLazily && nextBlockVersion.getVersion() != HoodieLogFormatVersion.DEFAULT_VERSION;
Option<byte[]> content = HoodieLogBlock.tryReadContent(inputStream, contentLength, shouldReadLazily);
// 7. Read footer if any
Map<HeaderMetadataType, String> footer =
nextBlockVersion.hasFooter() ? HoodieLogBlock.getLogMetadata(inputStream) : null;
// 8. Read log block length, if present. This acts as a reverse pointer when traversing a
// log file in reverse
if (nextBlockVersion.hasLogBlockLength()) {
inputStream.readLong();
}
// 9. Read the log block end position in the log file
long blockEndPos = inputStream.getPos();
HoodieLogBlock.HoodieLogBlockContentLocation logBlockContentLoc =
new HoodieLogBlock.HoodieLogBlockContentLocation(hadoopConf, logFile, contentPosition, contentLength, blockEndPos);
switch (Objects.requireNonNull(blockType)) {
case AVRO_DATA_BLOCK:
if (nextBlockVersion.getVersion() == HoodieLogFormatVersion.DEFAULT_VERSION) {
return HoodieAvroDataBlock.getBlock(content.get(), readerSchema, internalSchema);
} else {
return new HoodieAvroDataBlock(inputStream, content, readBlockLazily, logBlockContentLoc,
getTargetReaderSchemaForBlock(), header, footer, keyField);
}
case HFILE_DATA_BLOCK:
checkState(nextBlockVersion.getVersion() != HoodieLogFormatVersion.DEFAULT_VERSION,
String.format("HFile block could not be of version (%d)", HoodieLogFormatVersion.DEFAULT_VERSION));
return new HoodieHFileDataBlock(inputStream, content, readBlockLazily, logBlockContentLoc,
Option.ofNullable(readerSchema), header, footer, enableRecordLookups, logFile.getPath());
case PARQUET_DATA_BLOCK:
checkState(nextBlockVersion.getVersion() != HoodieLogFormatVersion.DEFAULT_VERSION,
String.format("Parquet block could not be of version (%d)", HoodieLogFormatVersion.DEFAULT_VERSION));
return new HoodieParquetDataBlock(inputStream, content, readBlockLazily, logBlockContentLoc,
getTargetReaderSchemaForBlock(), header, footer, keyField);
case DELETE_BLOCK:
return new HoodieDeleteBlock(content, inputStream, readBlockLazily, Option.of(logBlockContentLoc), header, footer);
case COMMAND_BLOCK:
return new HoodieCommandBlock(content, inputStream, readBlockLazily, Option.of(logBlockContentLoc), header, footer);
case CDC_DATA_BLOCK:
return new HoodieCDCDataBlock(inputStream, content, readBlockLazily, logBlockContentLoc, readerSchema, header, keyField);
default:
throw new HoodieNotSupportedException("Unsupported Block " + blockType);
}
} | 3.68 |
pulsar_ProducerConfiguration_setMaxPendingMessagesAcrossPartitions | /**
* Set the number of max pending messages across all the partitions
* <p>
* This setting will be used to lower the max pending messages for each partition
* ({@link #setMaxPendingMessages(int)}), if the total exceeds the configured value.
*
* @param maxPendingMessagesAcrossPartitions
*/
public void setMaxPendingMessagesAcrossPartitions(int maxPendingMessagesAcrossPartitions) {
conf.setMaxPendingMessagesAcrossPartitions(maxPendingMessagesAcrossPartitions);
} | 3.68 |
framework_VAccordion_getWidgetWidth | /**
* Returns the offset width of the wrapped widget.
*
* @return the offset width in pixels, or zero if no widget is set
*/
public int getWidgetWidth() {
if (widget == null) {
return 0;
}
return widget.getOffsetWidth();
} | 3.68 |
hbase_RegionReplicationSink_replicated | /**
* Should be called regardless of the result of the replicating operation. Unless you still want
* to reuse this entry, otherwise you must call this method to release the possible off heap
* memories.
*/
void replicated() {
if (rpcCall != null) {
rpcCall.releaseByWAL();
}
} | 3.68 |
hadoop_AsyncDataService_execute | /**
* Execute the task sometime in the future.
*/
synchronized void execute(Runnable task) {
if (executor == null) {
throw new RuntimeException("AsyncDataService is already shutdown");
}
if (LOG.isDebugEnabled()) {
LOG.debug("Current active thread number: " + executor.getActiveCount()
+ " queue size: " + executor.getQueue().size()
+ " scheduled task number: " + executor.getTaskCount());
}
executor.execute(task);
} | 3.68 |
hadoop_UriUtils_containsAbfsUrl | /**
* Checks whether a string includes abfs url.
* @param string the string to check.
* @return true if string has abfs url.
*/
public static boolean containsAbfsUrl(final String string) {
if (string == null || string.isEmpty()) {
return false;
}
return ABFS_URI_PATTERN.matcher(string).matches();
} | 3.68 |
graphhopper_FindMinMax_findMinMax | /**
* This method returns the smallest value possible in "min" and the smallest value that cannot be
* exceeded by any edge in max.
*/
static MinMax findMinMax(Set<String> createdObjects, MinMax minMax, List<Statement> statements, EncodedValueLookup lookup) {
// 'blocks' of the statements are applied one after the other. A block consists of one (if) or more statements (elseif+else)
List<List<Statement>> blocks = splitIntoBlocks(statements);
for (List<Statement> block : blocks) findMinMaxForBlock(createdObjects, minMax, block, lookup);
return minMax;
} | 3.68 |
Activiti_DefaultDeploymentCache_size | // For testing purposes only
public int size() {
return cache.size();
} | 3.68 |
flink_OneInputStateTransformation_keyBy | /**
* Partitions the operator state of a {@link OperatorTransformation} using field expressions. A
* field expression is either the name of a public field or a getter method with parentheses of
* the {@code OperatorTransformation}'s underlying type. A dot can be used to drill down into
* objects, as in {@code "field1.getInnerField2()" }.
*
* @param fields One or more field expressions on which the state of the {@link
* OperatorTransformation} operators will be partitioned.
* @return The {@code OperatorTransformation} with partitioned state (i.e. KeyedStream)
*/
public KeyedStateTransformation<Tuple, T> keyBy(String... fields) {
return keyBy(new Keys.ExpressionKeys<>(fields, stream.getType()));
} | 3.68 |
hadoop_KerberosAuthException_getKeytabFile | /** @return The keytab file path, or null if not set. */
public String getKeytabFile() {
return keytabFile;
} | 3.68 |
flink_FileCache_shutdown | /** Shuts down the file cache by cancelling all. */
public void shutdown() {
synchronized (lock) {
// first shutdown the thread pool
ScheduledExecutorService es = this.executorService;
if (es != null) {
es.shutdown();
try {
es.awaitTermination(cleanupInterval, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
// may happen
}
}
entries.clear();
jobRefHolders.clear();
// clean up the all storage directories
for (File dir : storageDirectories) {
try {
FileUtils.deleteDirectory(dir);
LOG.info("removed file cache directory {}", dir.getAbsolutePath());
} catch (IOException e) {
LOG.error(
"File cache could not properly clean up storage directory: {}",
dir.getAbsolutePath(),
e);
}
}
// Remove shutdown hook to prevent resource leaks
ShutdownHookUtil.removeShutdownHook(shutdownHook, getClass().getSimpleName(), LOG);
}
} | 3.68 |
hbase_ServerName_parseVersionedServerName | /**
* Use this method instantiating a {@link ServerName} from bytes gotten from a call to
* {@link #getVersionedBytes()}. Will take care of the case where bytes were written by an earlier
* version of hbase.
* @param versionedBytes Pass bytes gotten from a call to {@link #getVersionedBytes()}
* @return A ServerName instance.
* @see #getVersionedBytes()
*/
public static ServerName parseVersionedServerName(final byte[] versionedBytes) {
// Version is a short.
short version = Bytes.toShort(versionedBytes);
if (version == VERSION) {
int length = versionedBytes.length - Bytes.SIZEOF_SHORT;
return valueOf(Bytes.toString(versionedBytes, Bytes.SIZEOF_SHORT, length));
}
// Presume the bytes were written with an old version of hbase and that the
// bytes are actually a String of the form "'<hostname>' ':' '<port>'".
return valueOf(Bytes.toString(versionedBytes), NON_STARTCODE);
} | 3.68 |
flink_ExtractionUtils_isStructuredFieldDirectlyWritable | /** Checks whether a field is directly writable without a setter or constructor. */
public static boolean isStructuredFieldDirectlyWritable(Field field) {
final int m = field.getModifiers();
// field is immutable
if (Modifier.isFinal(m)) {
return false;
}
// field is directly writable
return Modifier.isPublic(m);
} | 3.68 |
hbase_PrivateCellUtil_createFirstOnRowCol | /**
* Create a Cell that is smaller than all other possible Cells for the given Cell's rk:cf and
* passed qualifier.
* @return Last possible Cell on passed Cell's rk:cf and passed qualifier.
*/
public static Cell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffest, int qlength) {
if (cell instanceof ByteBufferExtendedCell) {
return new FirstOnRowColByteBufferExtendedCell(
((ByteBufferExtendedCell) cell).getRowByteBuffer(),
((ByteBufferExtendedCell) cell).getRowPosition(), cell.getRowLength(),
((ByteBufferExtendedCell) cell).getFamilyByteBuffer(),
((ByteBufferExtendedCell) cell).getFamilyPosition(), cell.getFamilyLength(),
ByteBuffer.wrap(qArray), qoffest, qlength);
}
return new FirstOnRowColCell(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), qArray, qoffest,
qlength);
} | 3.68 |
graphhopper_MatrixResponse_getDistance | /**
* Returns the distance for the specific entry (from -> to) in meter or {@link Double#MAX_VALUE} in case
* no connection was found (and {@link GHMRequest#setFailFast(boolean)} was set to true).
*/
public double getDistance(int from, int to) {
if (hasErrors()) {
throw new IllegalStateException("Cannot return distance (" + from + "," + to + ") if errors occurred " + getErrors());
}
if (from >= distances.length) {
throw new IllegalStateException("Cannot get 'from' " + from + " from distances with size " + distances.length);
} else if (to >= distances[from].length) {
throw new IllegalStateException("Cannot get 'to' " + to + " from distances with size " + distances[from].length);
}
return distances[from][to] == Integer.MAX_VALUE ? Double.MAX_VALUE : distances[from][to];
} | 3.68 |
framework_VTwinColSelect_getCaptionWrapper | /**
* For internal use only. May be removed or replaced in the future.
*
* @return the caption wrapper widget
*/
public Widget getCaptionWrapper() {
return captionWrapper;
} | 3.68 |
framework_Upload_addProgressListener | /**
* Adds the upload progress event listener.
*
* @param listener
* the progress listener to be added
* @since 8.0
*/
public Registration addProgressListener(ProgressListener listener) {
Objects.requireNonNull(listener, "Listener must not be null.");
if (progressListeners == null) {
progressListeners = new LinkedHashSet<>();
}
progressListeners.add(listener);
return () -> {
if (progressListeners != null) {
progressListeners.remove(listener);
}
};
} | 3.68 |
hbase_TableInputFormatBase_getTable | /**
* Allows subclasses to get the {@link Table}.
*/
protected Table getTable() {
if (table == null) {
throw new IllegalStateException(NOT_INITIALIZED);
}
return this.table;
} | 3.68 |
flink_SerializedCompositeKeyBuilder_buildCompositeKeyUserKey | /**
* Returns a serialized composite key, from the key and key-group provided in a previous call to
* {@link #setKeyAndKeyGroup(Object, int)} and the namespace provided in {@link
* #setNamespace(Object, TypeSerializer)}, followed by the given user-key.
*
* @param userKey the user-key to concatenate for the serialized composite key, after the
* namespace.
* @param userKeySerializer the serializer to obtain the serialized form of the user-key.
* @param <UK> the type of the user-key.
* @return the bytes for the serialized composite key of key-group, key, namespace.
*/
@Nonnull
public <UK> byte[] buildCompositeKeyUserKey(
@Nonnull UK userKey, @Nonnull TypeSerializer<UK> userKeySerializer) throws IOException {
// this should only be called when there is already a namespace written.
assert isNamespaceWritten();
resetToNamespace();
userKeySerializer.serialize(userKey, keyOutView);
return keyOutView.getCopyOfBuffer();
} | 3.68 |
hadoop_FederationRegistryClient_loadStateFromRegistry | /**
* Load the information of one application from registry.
*
* @param appId application id
* @return the sub-cluster to UAM token mapping
*/
public synchronized Map<String, Token<AMRMTokenIdentifier>>
loadStateFromRegistry(ApplicationId appId) {
Map<String, Token<AMRMTokenIdentifier>> retMap = new HashMap<>();
// Suppress the exception here because it is valid that the entry does not
// exist
List<String> subclusters = null;
try {
subclusters = listDirRegistry(this.registry, this.user,
getRegistryKey(appId, null), false);
} catch (YarnException e) {
LOG.warn("Unexpected exception from listDirRegistry", e);
}
if (subclusters == null) {
LOG.info("Application {} does not exist in registry", appId);
return retMap;
}
// Read the amrmToken for each sub-cluster with an existing UAM
for (String scId : subclusters) {
LOG.info("Reading amrmToken for subcluster {} for {}", scId, appId);
String key = getRegistryKey(appId, scId);
try {
String tokenString = readRegistry(this.registry, this.user, key, true);
if (tokenString == null) {
throw new YarnException("Null string from readRegistry key " + key);
}
Token<AMRMTokenIdentifier> amrmToken = new Token<>();
amrmToken.decodeFromUrlString(tokenString);
// Clear the service field, as if RM just issued the token
amrmToken.setService(new Text());
retMap.put(scId, amrmToken);
} catch (Exception e) {
LOG.error("Failed reading registry key {}, skipping subcluster {}.", key, scId, e);
}
}
// Override existing map if there
this.appSubClusterTokenMap.put(appId, new ConcurrentHashMap<>(retMap));
return retMap;
} | 3.68 |
flink_HiveParserSemanticAnalyzer_addCTEAsSubQuery | /*
* If a CTE is referenced in a QueryBlock:
* - add it as a SubQuery for now.
* - SQ.alias is the alias used in HiveParserQB. (if no alias is specified,
* it used the CTE name. Works just like table references)
* - Adding SQ done by:
* - copying AST of CTE
* - setting ASTOrigin on cloned AST.
* - trigger phase 1 on new HiveParserQBExpr.
* - update HiveParserQB data structs: remove this as a table reference, move it to a SQ invocation.
*/
private void addCTEAsSubQuery(HiveParserQB qb, String cteName, String cteAlias)
throws SemanticException {
cteAlias = cteAlias == null ? cteName : cteAlias;
HiveParserBaseSemanticAnalyzer.CTEClause cte = findCTEFromName(qb, cteName);
HiveParserASTNode cteQryNode = cte.cteNode;
HiveParserQBExpr cteQBExpr = new HiveParserQBExpr(cteAlias);
doPhase1QBExpr(cteQryNode, cteQBExpr, qb.getId(), cteAlias);
qb.rewriteCTEToSubq(cteAlias, cteName, cteQBExpr);
} | 3.68 |
hadoop_WordMedian_map | /**
* Emits a key-value pair for counting the word. Outputs are (IntWritable,
* IntWritable).
*
* @param value
* This will be a line of text coming in from our input file.
*/
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
String string = itr.nextToken();
length.set(string.length());
context.write(length, ONE);
}
} | 3.68 |
flink_StreamingJobGraphGenerator_buildVertexRegionSlotSharingGroups | /**
* Maps a vertex to its region slot sharing group. If {@link
* StreamGraph#isAllVerticesInSameSlotSharingGroupByDefault()} returns true, all regions will be
* in the same slot sharing group.
*/
private Map<JobVertexID, SlotSharingGroup> buildVertexRegionSlotSharingGroups() {
final Map<JobVertexID, SlotSharingGroup> vertexRegionSlotSharingGroups = new HashMap<>();
final SlotSharingGroup defaultSlotSharingGroup = new SlotSharingGroup();
streamGraph
.getSlotSharingGroupResource(StreamGraphGenerator.DEFAULT_SLOT_SHARING_GROUP)
.ifPresent(defaultSlotSharingGroup::setResourceProfile);
final boolean allRegionsInSameSlotSharingGroup =
streamGraph.isAllVerticesInSameSlotSharingGroupByDefault();
final Iterable<DefaultLogicalPipelinedRegion> regions =
DefaultLogicalTopology.fromJobGraph(jobGraph).getAllPipelinedRegions();
for (DefaultLogicalPipelinedRegion region : regions) {
final SlotSharingGroup regionSlotSharingGroup;
if (allRegionsInSameSlotSharingGroup) {
regionSlotSharingGroup = defaultSlotSharingGroup;
} else {
regionSlotSharingGroup = new SlotSharingGroup();
streamGraph
.getSlotSharingGroupResource(
StreamGraphGenerator.DEFAULT_SLOT_SHARING_GROUP)
.ifPresent(regionSlotSharingGroup::setResourceProfile);
}
for (LogicalVertex vertex : region.getVertices()) {
vertexRegionSlotSharingGroups.put(vertex.getId(), regionSlotSharingGroup);
}
}
return vertexRegionSlotSharingGroups;
} | 3.68 |
flink_NettyShuffleEnvironmentConfiguration_fromConfiguration | /**
* Utility method to extract network related parameters from the configuration and to sanity
* check them.
*
* @param configuration configuration object
* @param networkMemorySize the size of memory reserved for shuffle environment
* @param localTaskManagerCommunication true, to skip initializing the network stack
* @param taskManagerAddress identifying the IP address under which the TaskManager will be
* accessible
* @return NettyShuffleEnvironmentConfiguration
*/
public static NettyShuffleEnvironmentConfiguration fromConfiguration(
Configuration configuration,
MemorySize networkMemorySize,
boolean localTaskManagerCommunication,
InetAddress taskManagerAddress) {
final PortRange dataBindPortRange = getDataBindPortRange(configuration);
final int pageSize = ConfigurationParserUtils.getPageSize(configuration);
final NettyConfig nettyConfig =
createNettyConfig(
configuration,
localTaskManagerCommunication,
taskManagerAddress,
dataBindPortRange);
final int numberOfNetworkBuffers =
calculateNumberOfNetworkBuffers(configuration, networkMemorySize, pageSize);
int initialRequestBackoff =
configuration.getInteger(
NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_INITIAL);
int maxRequestBackoff =
configuration.getInteger(
NettyShuffleEnvironmentOptions.NETWORK_REQUEST_BACKOFF_MAX);
int listenerTimeout =
(int)
configuration
.get(
NettyShuffleEnvironmentOptions
.NETWORK_PARTITION_REQUEST_TIMEOUT)
.toMillis();
int buffersPerChannel =
configuration.getInteger(
NettyShuffleEnvironmentOptions.NETWORK_BUFFERS_PER_CHANNEL);
int extraBuffersPerGate =
configuration.getInteger(
NettyShuffleEnvironmentOptions.NETWORK_EXTRA_BUFFERS_PER_GATE);
Optional<Integer> maxRequiredBuffersPerGate =
configuration.getOptional(
NettyShuffleEnvironmentOptions.NETWORK_READ_MAX_REQUIRED_BUFFERS_PER_GATE);
int maxBuffersPerChannel =
configuration.getInteger(
NettyShuffleEnvironmentOptions.NETWORK_MAX_BUFFERS_PER_CHANNEL);
int maxOverdraftBuffersPerGate =
configuration.getInteger(
NettyShuffleEnvironmentOptions.NETWORK_MAX_OVERDRAFT_BUFFERS_PER_GATE);
long batchShuffleReadMemoryBytes =
configuration.get(TaskManagerOptions.NETWORK_BATCH_SHUFFLE_READ_MEMORY).getBytes();
int sortShuffleMinBuffers =
configuration.getInteger(
NettyShuffleEnvironmentOptions.NETWORK_SORT_SHUFFLE_MIN_BUFFERS);
int sortShuffleMinParallelism =
configuration.getInteger(
NettyShuffleEnvironmentOptions.NETWORK_SORT_SHUFFLE_MIN_PARALLELISM);
boolean isNetworkDetailedMetrics =
configuration.getBoolean(NettyShuffleEnvironmentOptions.NETWORK_DETAILED_METRICS);
String[] tempDirs = ConfigurationUtils.parseTempDirectories(configuration);
// Shuffle the data directories to make it fairer for directory selection between different
// TaskManagers, which is good for load balance especially when there are multiple disks.
List<String> shuffleDirs = Arrays.asList(tempDirs);
Collections.shuffle(shuffleDirs);
Duration requestSegmentsTimeout =
Duration.ofMillis(
configuration.getLong(
NettyShuffleEnvironmentOptions
.NETWORK_EXCLUSIVE_BUFFERS_REQUEST_TIMEOUT_MILLISECONDS));
BoundedBlockingSubpartitionType blockingSubpartitionType =
getBlockingSubpartitionType(configuration);
boolean batchShuffleCompressionEnabled =
configuration.get(NettyShuffleEnvironmentOptions.BATCH_SHUFFLE_COMPRESSION_ENABLED);
String compressionCodec =
configuration.getString(NettyShuffleEnvironmentOptions.SHUFFLE_COMPRESSION_CODEC);
int maxNumConnections =
Math.max(
1,
configuration.getInteger(
NettyShuffleEnvironmentOptions.MAX_NUM_TCP_CONNECTIONS));
boolean connectionReuseEnabled =
configuration.get(
NettyShuffleEnvironmentOptions.TCP_CONNECTION_REUSE_ACROSS_JOBS_ENABLED);
int hybridShuffleSpilledIndexSegmentSize =
configuration.get(
NettyShuffleEnvironmentOptions
.HYBRID_SHUFFLE_SPILLED_INDEX_REGION_GROUP_SIZE);
long hybridShuffleNumRetainedInMemoryRegionsMax =
configuration.get(
NettyShuffleEnvironmentOptions
.HYBRID_SHUFFLE_NUM_RETAINED_IN_MEMORY_REGIONS_MAX);
checkArgument(buffersPerChannel >= 0, "Must be non-negative.");
checkArgument(
!maxRequiredBuffersPerGate.isPresent() || maxRequiredBuffersPerGate.get() >= 1,
String.format(
"At least one buffer is required for each gate, please increase the value of %s.",
NettyShuffleEnvironmentOptions.NETWORK_READ_MAX_REQUIRED_BUFFERS_PER_GATE
.key()));
checkArgument(
extraBuffersPerGate >= 1,
String.format(
"The configured floating buffer should be at least 1, please increase the value of %s.",
NettyShuffleEnvironmentOptions.NETWORK_EXTRA_BUFFERS_PER_GATE.key()));
TieredStorageConfiguration tieredStorageConfiguration = null;
if ((configuration.get(BATCH_SHUFFLE_MODE) == ALL_EXCHANGES_HYBRID_FULL
|| configuration.get(BATCH_SHUFFLE_MODE) == ALL_EXCHANGES_HYBRID_SELECTIVE)
&& configuration.getBoolean(NETWORK_HYBRID_SHUFFLE_ENABLE_NEW_MODE)) {
tieredStorageConfiguration =
TieredStorageConfiguration.builder(
pageSize,
configuration.getString(
NETWORK_HYBRID_SHUFFLE_REMOTE_STORAGE_BASE_PATH))
.build();
}
return new NettyShuffleEnvironmentConfiguration(
numberOfNetworkBuffers,
pageSize,
initialRequestBackoff,
maxRequestBackoff,
listenerTimeout,
buffersPerChannel,
extraBuffersPerGate,
maxRequiredBuffersPerGate,
requestSegmentsTimeout,
isNetworkDetailedMetrics,
nettyConfig,
shuffleDirs.toArray(tempDirs),
blockingSubpartitionType,
batchShuffleCompressionEnabled,
compressionCodec,
maxBuffersPerChannel,
batchShuffleReadMemoryBytes,
sortShuffleMinBuffers,
sortShuffleMinParallelism,
BufferDebloatConfiguration.fromConfiguration(configuration),
maxNumConnections,
connectionReuseEnabled,
maxOverdraftBuffersPerGate,
hybridShuffleSpilledIndexSegmentSize,
hybridShuffleNumRetainedInMemoryRegionsMax,
tieredStorageConfiguration);
} | 3.68 |
flink_BufferBuilder_commit | /**
* Make the change visible to the readers. This is costly operation (volatile access) thus in
* case of bulk writes it's better to commit them all together instead one by one.
*/
public void commit() {
positionMarker.commit();
} | 3.68 |
framework_AutomaticImmediate_fireValueChange | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractField#fireValueChange(boolean)
*/
@Override
protected void fireValueChange(boolean repaintIsNotNeeded) {
log("fireValueChange");
super.fireValueChange(repaintIsNotNeeded);
} | 3.68 |
hbase_HFileSystem_getStoragePolicyForOldHDFSVersion | /**
* Before Hadoop 2.8.0, there's no getStoragePolicy method for FileSystem interface, and we need
* to keep compatible with it. See HADOOP-12161 for more details.
* @param path Path to get storage policy against
* @return the storage policy name
*/
private String getStoragePolicyForOldHDFSVersion(Path path) {
try {
if (this.fs instanceof DistributedFileSystem) {
DistributedFileSystem dfs = (DistributedFileSystem) this.fs;
HdfsFileStatus status = dfs.getClient().getFileInfo(path.toUri().getPath());
if (null != status) {
if (unspecifiedStoragePolicyId < 0) {
// Get the unspecified id field through reflection to avoid compilation error.
// In later version BlockStoragePolicySuite#ID_UNSPECIFIED is moved to
// HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED
Field idUnspecified = BlockStoragePolicySuite.class.getField("ID_UNSPECIFIED");
unspecifiedStoragePolicyId = idUnspecified.getByte(BlockStoragePolicySuite.class);
}
byte storagePolicyId = status.getStoragePolicy();
if (storagePolicyId != unspecifiedStoragePolicyId) {
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
for (BlockStoragePolicy policy : policies) {
if (policy.getId() == storagePolicyId) {
return policy.getName();
}
}
}
}
}
} catch (Throwable e) {
LOG.warn("failed to get block storage policy of [" + path + "]", e);
}
return null;
} | 3.68 |
hadoop_FilePosition_bufferFullyRead | /**
* Determines whether the current buffer has been fully read.
*
* @return true if the current buffer has been fully read, false otherwise.
*/
public boolean bufferFullyRead() {
throwIfInvalidBuffer();
return (bufferStartOffset == readStartOffset)
&& (relative() == buffer.limit())
&& (numBytesRead == buffer.limit());
} | 3.68 |
querydsl_PathBuilder_getString | /**
* Create a new String typed path
*
* @param property property name
* @return property path
*/
public StringPath getString(String property) {
validate(property, String.class);
return super.createString(property);
} | 3.68 |
framework_GridElement_getTableWrapper | /**
* Get the element wrapping the table element.
*
* @return The element that wraps the table element
*/
public TestBenchElement getTableWrapper() {
List<WebElement> rootElements = findElements(By.xpath("./div"));
return (TestBenchElement) rootElements.get(2);
} | 3.68 |
hadoop_WriteOperationHelper_retry | /**
* Execute a function with retry processing.
* Also activates the current span.
* @param <T> type of return value
* @param action action to execute (used in error messages)
* @param path path of work (used in error messages)
* @param idempotent does the operation have semantics
* which mean that it can be retried even if was already executed?
* @param operation operation to execute
* @return the result of the call
* @throws IOException any IOE raised, or translated exception
*/
public <T> T retry(String action,
String path,
boolean idempotent,
CallableRaisingIOE<T> operation)
throws IOException {
activateAuditSpan();
return invoker.retry(action, path, idempotent, operation);
} | 3.68 |
hbase_LeaseManager_removeLease | /**
* Remove named lease. Lease is removed from the map of leases.
* @param leaseName name of lease
* @return Removed lease
*/
Lease removeLease(final String leaseName) throws LeaseException {
Lease lease = leases.remove(leaseName);
if (lease == null) {
throw new LeaseException("lease '" + leaseName + "' does not exist");
}
return lease;
} | 3.68 |
dubbo_ScopeModelAware_setFrameworkModel | /**
* Override this method if you just need framework model
* @param frameworkModel
*/
default void setFrameworkModel(FrameworkModel frameworkModel) {} | 3.68 |
hadoop_Chain_addReducer | /**
* Add reducer that reads from context and writes to a queue
*/
@SuppressWarnings("unchecked")
void addReducer(TaskInputOutputContext inputContext,
ChainBlockingQueue<KeyValuePair<?, ?>> outputQueue) throws IOException,
InterruptedException {
Class<?> keyOutClass = rConf.getClass(REDUCER_OUTPUT_KEY_CLASS,
Object.class);
Class<?> valueOutClass = rConf.getClass(REDUCER_OUTPUT_VALUE_CLASS,
Object.class);
RecordWriter rw = new ChainRecordWriter(keyOutClass, valueOutClass,
outputQueue, rConf);
Reducer.Context reducerContext = createReduceContext(rw,
(ReduceContext) inputContext, rConf);
ReduceRunner runner = new ReduceRunner(reducerContext, reducer, rw);
threads.add(runner);
} | 3.68 |
hudi_Registry_getRegistry | /**
* Get (or create) the registry for a provided name and given class.
*
* @param registryName Name of the registry.
* @param clazz The fully qualified name of the registry class to create.
*/
static Registry getRegistry(String registryName, String clazz) {
synchronized (Registry.class) {
if (!REGISTRY_MAP.containsKey(registryName)) {
Registry registry = (Registry)ReflectionUtils.loadClass(clazz, registryName);
REGISTRY_MAP.put(registryName, registry);
}
return REGISTRY_MAP.get(registryName);
}
} | 3.68 |
hmily_HmilyColumnSegment_getQualifiedName | /**
* Get qualified name with quote characters.
* i.e. `field1`, `table1`, field1, table1, `table1`.`field1`, `table1`.field1, table1.`field1` or table1.field1
*
* @return qualified name with quote characters
*/
public String getQualifiedName() {
return null == owner
? identifier.getValueWithQuoteCharacters()
: String.join(".", owner.getIdentifier().getValueWithQuoteCharacters(), identifier.getValueWithQuoteCharacters());
} | 3.68 |
pulsar_ReaderConfiguration_getSubscriptionRolePrefix | /**
* @return the subscription role prefix for subscription auth
*/
public String getSubscriptionRolePrefix() {
return conf.getSubscriptionRolePrefix();
} | 3.68 |
hibernate-validator_ConstraintDescriptorImpl_getCompositionType | /**
* @return the compositionType
*/
public CompositionType getCompositionType() {
return compositionType;
} | 3.68 |
dubbo_DefaultExecutorRepository_createExecutorIfAbsent | /**
* Get called when the server or client instance initiating.
*
* @param url
* @return
*/
@Override
public synchronized ExecutorService createExecutorIfAbsent(URL url) {
String executorKey = getExecutorKey(url);
ConcurrentMap<String, ExecutorService> executors =
ConcurrentHashMapUtils.computeIfAbsent(data, executorKey, k -> new ConcurrentHashMap<>());
String executorCacheKey = getExecutorSecondKey(url);
url = setThreadNameIfAbsent(url, executorCacheKey);
URL finalUrl = url;
ExecutorService executor =
ConcurrentHashMapUtils.computeIfAbsent(executors, executorCacheKey, k -> createExecutor(finalUrl));
// If executor has been shut down, create a new one
if (executor.isShutdown() || executor.isTerminated()) {
executors.remove(executorCacheKey);
executor = createExecutor(url);
executors.put(executorCacheKey, executor);
}
dataStore.put(executorKey, executorCacheKey, executor);
return executor;
} | 3.68 |
morf_AbstractSqlDialectTest_testRenameIndexStatements | /**
* Tests that the syntax is correct for renaming an index.
*/
@SuppressWarnings("unchecked")
@Test
public void testRenameIndexStatements() {
AlteredTable alteredTable = new AlteredTable(testTable, null, null,
FluentIterable.from(testTable.indexes()).transform(Index::getName).filter(i -> !i.equals(TEST_1)).append(TEST_2),
ImmutableList.of(index(TEST_2).columns(INT_FIELD, FLOAT_FIELD).unique())
);
compareStatements(expectedRenameIndexStatements(), testDialect.renameIndexStatements(alteredTable, TEST_1, TEST_2));
} | 3.68 |
pulsar_ManagedLedgerImpl_asyncCreateLedger | /**
* Create ledger async and schedule a timeout task to check ledger-creation is complete else it fails the callback
* with TimeoutException.
*
* @param bookKeeper
* @param config
* @param digestType
* @param cb
* @param metadata
*/
protected void asyncCreateLedger(BookKeeper bookKeeper, ManagedLedgerConfig config, DigestType digestType,
CreateCallback cb, Map<String, byte[]> metadata) {
CompletableFuture<LedgerHandle> ledgerFutureHook = new CompletableFuture<>();
Map<String, byte[]> finalMetadata = new HashMap<>();
finalMetadata.putAll(ledgerMetadata);
finalMetadata.putAll(metadata);
if (config.getBookKeeperEnsemblePlacementPolicyClassName() != null
&& config.getBookKeeperEnsemblePlacementPolicyProperties() != null) {
try {
finalMetadata.putAll(LedgerMetadataUtils.buildMetadataForPlacementPolicyConfig(
config.getBookKeeperEnsemblePlacementPolicyClassName(),
config.getBookKeeperEnsemblePlacementPolicyProperties()
));
} catch (EnsemblePlacementPolicyConfig.ParseEnsemblePlacementPolicyConfigException e) {
log.error("[{}] Serialize the placement configuration failed", name, e);
cb.createComplete(Code.UnexpectedConditionException, null, ledgerFutureHook);
return;
}
}
createdLedgerCustomMetadata = finalMetadata;
try {
bookKeeper.asyncCreateLedger(config.getEnsembleSize(), config.getWriteQuorumSize(),
config.getAckQuorumSize(), digestType, config.getPassword(), cb, ledgerFutureHook, finalMetadata);
} catch (Throwable cause) {
log.error("[{}] Encountered unexpected error when creating ledger",
name, cause);
ledgerFutureHook.completeExceptionally(cause);
cb.createComplete(Code.UnexpectedConditionException, null, ledgerFutureHook);
return;
}
ScheduledFuture timeoutChecker = scheduledExecutor.schedule(() -> {
if (!ledgerFutureHook.isDone()
&& ledgerFutureHook.completeExceptionally(new TimeoutException(name + " Create ledger timeout"))) {
if (log.isDebugEnabled()) {
log.debug("[{}] Timeout creating ledger", name);
}
cb.createComplete(BKException.Code.TimeoutException, null, ledgerFutureHook);
} else {
if (log.isDebugEnabled()) {
log.debug("[{}] Ledger already created when timeout task is triggered", name);
}
}
}, config.getMetadataOperationsTimeoutSeconds(), TimeUnit.SECONDS);
ledgerFutureHook.whenComplete((ignore, ex) -> {
timeoutChecker.cancel(false);
});
} | 3.68 |
flink_BlockerSync_awaitBlocker | /**
* Waits until the blocking thread has entered the method {@link #block()} or {@link
* #blockNonInterruptible()}.
*/
public void awaitBlocker() throws InterruptedException {
synchronized (lock) {
while (!blockerReady) {
lock.wait();
}
}
} | 3.68 |
graphhopper_VectorTile_addKeys | /**
* <pre>
* Dictionary encoding for keys
* </pre>
*
* <code>repeated string keys = 3;</code>
*/
public Builder addKeys(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureKeysIsMutable();
keys_.add(value);
onChanged();
return this;
} | 3.68 |
hibernate-validator_StringHelper_join | /**
* Joins the elements of the given iterable to a string, separated by the given separator string.
*
* @param iterable the iterable to join
* @param separator the separator string
*
* @return a string made up of the string representations of the given iterable members, separated by the given separator
* string
*/
public static String join(Iterable<?> iterable, String separator) {
if ( iterable == null ) {
return null;
}
StringBuilder sb = new StringBuilder();
boolean isFirst = true;
for ( Object object : iterable ) {
if ( !isFirst ) {
sb.append( separator );
}
else {
isFirst = false;
}
sb.append( object );
}
return sb.toString();
} | 3.68 |
hadoop_EditLogInputStream_getCachedOp | /**
* return the cachedOp, and reset it to null.
*/
FSEditLogOp getCachedOp() {
FSEditLogOp op = this.cachedOp;
cachedOp = null;
return op;
} | 3.68 |
open-banking-gateway_FintechConsentSpecSecureStorage_registerFintechUser | /**
* Registers FinTech user
* @param user User entity
* @param password Datasafe password for the user
*/
public void registerFintechUser(FintechUser user, Supplier<char[]> password) {
this.userProfile()
.createDocumentKeystore(
user.getUserIdAuth(password),
config.defaultPrivateTemplate(user.getUserIdAuth(password)).buildPrivateProfile()
);
} | 3.68 |
flink_CoGroupOperatorBase_getGroupOrder | /**
* Gets the value order for an input, i.e. the order of elements within a group. If no such
* order has been set, this method returns null.
*
* @param inputNum The number of the input (here either <i>0</i> or <i>1</i>).
* @return The group order.
*/
public Ordering getGroupOrder(int inputNum) {
if (inputNum == 0) {
return this.groupOrder1;
} else if (inputNum == 1) {
return this.groupOrder2;
} else {
throw new IndexOutOfBoundsException();
}
} | 3.68 |
hbase_CompactingMemStore_preUpdate | /**
* Issue any synchronization and test needed before applying the update For compacting memstore
* this means checking the update can increase the size without overflow
* @param currentActive the segment to be updated
* @param cell the cell to be added
* @param memstoreSizing object to accumulate region size changes
* @return true iff can proceed with applying the update
*/
@Override
protected boolean preUpdate(MutableSegment currentActive, Cell cell,
MemStoreSizing memstoreSizing) {
if (currentActive.sharedLock()) {
if (checkAndAddToActiveSize(currentActive, cell, memstoreSizing)) {
return true;
}
currentActive.sharedUnlock();
}
return false;
} | 3.68 |
hadoop_TimelineDomains_setDomains | /**
* Set the domain list to the given list of domains
*
* @param domains
* a list of domains
*/
public void setDomains(List<TimelineDomain> domains) {
this.domains = domains;
} | 3.68 |
hadoop_SnappyCodec_getDefaultExtension | /**
* Get the default filename extension for this kind of compression.
*
* @return <code>.snappy</code>.
*/
@Override
public String getDefaultExtension() {
return CodecConstants.SNAPPY_CODEC_EXTENSION;
} | 3.68 |
flink_AsyncDataStream_addOperator | /**
* Add an AsyncWaitOperator.
*
* @param in The {@link DataStream} where the {@link AsyncWaitOperator} will be added.
* @param func {@link AsyncFunction} wrapped inside {@link AsyncWaitOperator}.
* @param timeout for the asynchronous operation to complete
* @param bufSize The max number of inputs the {@link AsyncWaitOperator} can hold inside.
* @param mode Processing mode for {@link AsyncWaitOperator}.
* @param asyncRetryStrategy AsyncRetryStrategy for {@link AsyncFunction}.
* @param <IN> Input type.
* @param <OUT> Output type.
* @return A new {@link SingleOutputStreamOperator}
*/
private static <IN, OUT> SingleOutputStreamOperator<OUT> addOperator(
DataStream<IN> in,
AsyncFunction<IN, OUT> func,
long timeout,
int bufSize,
OutputMode mode,
AsyncRetryStrategy<OUT> asyncRetryStrategy) {
if (asyncRetryStrategy != NO_RETRY_STRATEGY) {
Preconditions.checkArgument(
timeout > 0, "Timeout should be configured when do async with retry.");
}
TypeInformation<OUT> outTypeInfo =
TypeExtractor.getUnaryOperatorReturnType(
func,
AsyncFunction.class,
0,
1,
new int[] {1, 0},
in.getType(),
Utils.getCallLocationName(),
true);
// create transform
AsyncWaitOperatorFactory<IN, OUT> operatorFactory =
new AsyncWaitOperatorFactory<>(
in.getExecutionEnvironment().clean(func),
timeout,
bufSize,
mode,
asyncRetryStrategy);
return in.transform("async wait operator", outTypeInfo, operatorFactory);
} | 3.68 |
hudi_FiveToSixUpgradeHandler_deleteCompactionRequestedFileFromAuxiliaryFolder | /**
* See HUDI-6040.
*/
private void deleteCompactionRequestedFileFromAuxiliaryFolder(HoodieTable table) {
HoodieTableMetaClient metaClient = table.getMetaClient();
HoodieTimeline compactionTimeline = metaClient.getActiveTimeline().filterPendingCompactionTimeline()
.filter(instant -> instant.getState() == HoodieInstant.State.REQUESTED);
compactionTimeline.getInstantsAsStream().forEach(
deleteInstant -> {
LOG.info("Deleting instant " + deleteInstant + " in auxiliary meta path " + metaClient.getMetaAuxiliaryPath());
Path metaFile = new Path(metaClient.getMetaAuxiliaryPath(), deleteInstant.getFileName());
try {
if (metaClient.getFs().exists(metaFile)) {
metaClient.getFs().delete(metaFile, false);
LOG.info("Deleted instant file in auxiliary meta path : " + metaFile);
}
} catch (IOException e) {
throw new HoodieUpgradeDowngradeException(HoodieTableVersion.FIVE.versionCode(), HoodieTableVersion.SIX.versionCode(), true, e);
}
}
);
} | 3.68 |
flink_Types_GENERIC | /**
* Returns generic type information for any Java object. The serialization logic will use the
* general purpose serializer Kryo.
*
* <p>Generic types are black-boxes for Flink, but allow any object and null values in fields.
*
* <p>By default, serialization of this type is not very efficient. Please read the
* documentation about how to improve efficiency (namely by pre-registering classes).
*
* @param genericClass any Java class
*/
public static <T> TypeInformation<T> GENERIC(Class<T> genericClass) {
return new GenericTypeInfo<>(genericClass);
} | 3.68 |
hbase_DataBlockEncoding_getEncoder | /**
* Return new data block encoder for given algorithm type.
* @return data block encoder if algorithm is specified, null if none is selected.
*/
public DataBlockEncoder getEncoder() {
if (encoder == null && id != 0) {
// lazily create the encoder
encoder = createEncoder(encoderCls);
}
return encoder;
} | 3.68 |
shardingsphere-elasticjob_JobAPIFactory_createJobConfigurationAPI | /**
* Create job configuration API.
*
* @param connectString registry center connect string
* @param namespace registry center namespace
* @param digest registry center digest
* @return job configuration API
*/
public static JobConfigurationAPI createJobConfigurationAPI(final String connectString, final String namespace, final String digest) {
return new JobConfigurationAPIImpl(RegistryCenterFactory.createCoordinatorRegistryCenter(connectString, namespace, digest));
} | 3.68 |
hadoop_SerialJobFactory_update | /**
* SERIAL. Once you get notification from StatsCollector about the job
* completion ,simply notify the waiting thread.
*
* @param item
*/
@Override
public void update(Statistics.JobStats item) {
//simply notify in case of serial submissions. We are just bothered
//if submitted job is completed or not.
lock.lock();
try {
jobCompleted.signalAll();
} finally {
lock.unlock();
}
} | 3.68 |
morf_DataSetProducerBuilderImpl_records | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#records(java.lang.String)
*/
@Override
public List<Record> records(String tableName) {
List<Record> records = recordMap.get(tableName.toUpperCase());
if (records == null) {
throw new IllegalStateException("No record data has been provided for table [" + tableName + "]");
}
return records;
} | 3.68 |
hadoop_PutTracker_getDestKey | /**
* get the destination key. The default implementation returns the
* key passed in: there is no adjustment of the destination.
* @return the destination to use in PUT requests.
*/
public String getDestKey() {
return destKey;
} | 3.68 |
dubbo_ServiceInstancesChangedListener_onEvent | /**
* On {@link ServiceInstancesChangedEvent the service instances change event}
*
* @param event {@link ServiceInstancesChangedEvent}
*/
public void onEvent(ServiceInstancesChangedEvent event) {
if (destroyed.get() || !accept(event) || isRetryAndExpired(event)) {
return;
}
doOnEvent(event);
} | 3.68 |
framework_DragSourceExtensionConnector_fixDragImageOffsetsForDesktop | /**
* Fixes missing or offset drag image caused by using css transform:
* translate (or such) by using a cloned drag image element, for which the
* property has been cleared.
* <p>
* This bug only occurs on Desktop with Safari (gets offset and clips the
* element for the parts that are not inside the element start & end
* coordinates) and Firefox (gets offset), and calling this method is NOOP
* for any other browser.
* <p>
* This fix is not needed if custom drag image has been used.
*
* @param dragStartEvent
* the drag start event
* @param draggedElement
* the element being dragged
*/
protected void fixDragImageOffsetsForDesktop(NativeEvent dragStartEvent,
Element draggedElement) {
BrowserInfo browserInfo = BrowserInfo.get();
final boolean isSafari = browserInfo.isSafari();
if (browserInfo.isTouchDevice()
|| !(isSafari || browserInfo.isFirefox())) {
return;
}
Element clonedElement = (Element) draggedElement.cloneNode(true);
Style clonedStyle = clonedElement.getStyle();
clonedStyle.clearProperty("transform");
// only relative, absolute and fixed positions work for safari or no
// drag image is set
clonedStyle.setPosition(Position.RELATIVE);
int transformXOffset = 0;
if (isSafari) {
transformXOffset = fixDragImageTransformForSafari(draggedElement,
clonedStyle);
}
// need to use z-index -1 or otherwise the cloned node will flash
clonedStyle.setZIndex(-1);
draggedElement.getParentElement().appendChild(clonedElement);
dragStartEvent.getDataTransfer().setDragImage(clonedElement,
WidgetUtil.getRelativeX(draggedElement, dragStartEvent)
- transformXOffset,
WidgetUtil.getRelativeY(draggedElement, dragStartEvent));
AnimationScheduler.get().requestAnimationFrame(
timestamp -> clonedElement.removeFromParent(), clonedElement);
} | 3.68 |
streampipes_PrimitivePropertyBuilder_description | /**
* Assigns a human-readable description to the event property. The description is used in the StreamPipes UI for
* better explaining users the meaning of the property.
*
* @param description
* @return this
*/
public PrimitivePropertyBuilder description(String description) {
this.eventProperty.setDescription(description);
return this;
} | 3.68 |
framework_VTabsheet_updateContentNodeHeight | /** For internal use only. May be removed or replaced in the future. */
public void updateContentNodeHeight() {
if (!isDynamicHeight()) {
int contentHeight = getOffsetHeight();
contentHeight -= deco.getOffsetHeight();
contentHeight -= tb.getOffsetHeight();
ComputedStyle cs = new ComputedStyle(contentNode);
contentHeight -= Math.ceil(cs.getPaddingHeight());
contentHeight -= Math.ceil(cs.getBorderHeight());
if (contentHeight < 0) {
contentHeight = 0;
}
// Set proper values for content element
contentNode.getStyle().setHeight(contentHeight, Unit.PX);
} else {
contentNode.getStyle().clearHeight();
}
} | 3.68 |
hadoop_AbstractTask_getTaskCmd | /**
* Get TaskCmd for a Task.
* @return TaskCMD: Its a task command line such as sleep 10
*/
@Override
public final String getTaskCmd() {
return taskCmd;
} | 3.68 |
hadoop_TwoColumnLayout_content | /**
* @return the class that will render the content of the page.
*/
protected Class<? extends SubView> content() {
return LipsumBlock.class;
} | 3.68 |
flink_JobEdge_getShipStrategyName | /**
* Gets the name of the ship strategy for the represented input, like "forward", "partition
* hash", "rebalance", "broadcast", ...
*
* @return The name of the ship strategy for the represented input, or null, if none was set.
*/
public String getShipStrategyName() {
return shipStrategyName;
} | 3.68 |
flink_LegacySinkTransformation_setStateKeySelector | /**
* Sets the {@link KeySelector} that must be used for partitioning keyed state of this Sink.
*
* @param stateKeySelector The {@code KeySelector} to set
*/
public void setStateKeySelector(KeySelector<T, ?> stateKeySelector) {
this.stateKeySelector = stateKeySelector;
updateManagedMemoryStateBackendUseCase(stateKeySelector != null);
} | 3.68 |
framework_ContainerHierarchicalWrapper_addListener | /**
* @deprecated As of 7.0, replaced by
* {@link #addPropertySetChangeListener(Container.PropertySetChangeListener)}
*/
@Override
@Deprecated
public void addListener(Container.PropertySetChangeListener listener) {
addPropertySetChangeListener(listener);
} | 3.68 |
hadoop_ServiceLauncher_getServiceException | /**
* Get the exit exception used to end this service.
* @return an exception, which will be null until the service
* has exited (and {@code System.exit} has not been called)
*/
public final ExitUtil.ExitException getServiceException() {
return serviceException;
} | 3.68 |
morf_SqlServerDialect_getSqlForLeftPad | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForLeftPad(org.alfasoftware.morf.sql.element.AliasedField, org.alfasoftware.morf.sql.element.AliasedField, org.alfasoftware.morf.sql.element.AliasedField)
*/
@Override
protected String getSqlForLeftPad(AliasedField field, AliasedField length, AliasedField character) {
String strField = getSqlFrom(field);
String strLength = getSqlFrom(length);
String strCharacter = getSqlFrom(character);
return String.format("CASE " +
"WHEN LEN(%s) > %s THEN " +
"LEFT(%s, %s) " +
"ELSE " +
"RIGHT(REPLICATE(%s, %s) + %s, %s) " +
"END",
strField, strLength,
strField, strLength,
strCharacter, strLength, strField, strLength);
} | 3.68 |
graphhopper_AbstractPathDetailsBuilder_endInterval | /**
* Ending intervals multiple times is safe, we only write the interval if it was open and not empty.
* Writes the interval to the pathDetails
*
* @param lastIndex the index the PathDetail ends
*/
public void endInterval(int lastIndex) {
if (isOpen) {
currentDetail.setLast(lastIndex);
pathDetails.add(currentDetail);
}
isOpen = false;
} | 3.68 |
flink_ThreadInfoSamplesRequest_getMaxStackTraceDepth | /**
* Returns the configured maximum depth of the collected stack traces.
*
* @return the maximum depth of the collected stack traces.
*/
public int getMaxStackTraceDepth() {
return maxStackTraceDepth;
} | 3.68 |
hadoop_AHSLogsPage_content | /**
* The content of this page is the AggregatedLogsBlock
*
* @return AggregatedLogsBlock.class
*/
@Override
protected Class<? extends SubView> content() {
return AggregatedLogsBlock.class;
} | 3.68 |
framework_ApplicationConfiguration_startApplication | /**
* Starts the application with a given id by reading the configuration
* options stored by the bootstrap javascript.
*
* @param applicationId
* id of the application to load, this is also the id of the html
* element into which the application should be rendered.
*/
public static void startApplication(final String applicationId) {
Scheduler.get().scheduleDeferred(() -> {
Profiler.enter("ApplicationConfiguration.startApplication");
ApplicationConfiguration appConf = getConfigFromDOM(applicationId);
ApplicationConnection a = GWT.create(ApplicationConnection.class);
a.init(widgetSet, appConf);
runningApplications.add(a);
Profiler.leave("ApplicationConfiguration.startApplication");
a.start();
});
} | 3.68 |
hbase_AbstractFSWALProvider_findArchivedLog | /**
* Find the archived WAL file path if it is not able to locate in WALs dir.
* @param path - active WAL file path
* @param conf - configuration
* @return archived path if exists, null - otherwise
* @throws IOException exception
*/
public static Path findArchivedLog(Path path, Configuration conf) throws IOException {
// If the path contains oldWALs keyword then exit early.
if (path.toString().contains(HConstants.HREGION_OLDLOGDIR_NAME)) {
return null;
}
Path walRootDir = CommonFSUtils.getWALRootDir(conf);
FileSystem fs = path.getFileSystem(conf);
// Try finding the log in old dir
Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
Path archivedLogLocation = new Path(oldLogDir, path.getName());
if (fs.exists(archivedLogLocation)) {
LOG.info("Log " + path + " was moved to " + archivedLogLocation);
return archivedLogLocation;
}
ServerName serverName = getServerNameFromWALDirectoryName(path);
if (serverName == null) {
LOG.warn("Can not extract server name from path {}, "
+ "give up searching the separated old log dir", path);
return null;
}
// Try finding the log in separate old log dir
oldLogDir = new Path(walRootDir, new StringBuilder(HConstants.HREGION_OLDLOGDIR_NAME)
.append(Path.SEPARATOR).append(serverName.getServerName()).toString());
archivedLogLocation = new Path(oldLogDir, path.getName());
if (fs.exists(archivedLogLocation)) {
LOG.info("Log " + path + " was moved to " + archivedLogLocation);
return archivedLogLocation;
}
LOG.error("Couldn't locate log: " + path);
return null;
} | 3.68 |
flink_PatternStream_sideOutputLateData | /**
* Send late arriving data to the side output identified by the given {@link OutputTag}. A
* record is considered late after the watermark has passed its timestamp.
*
* <p>You can get the stream of late data using {@link
* SingleOutputStreamOperator#getSideOutput(OutputTag)} on the {@link
* SingleOutputStreamOperator} resulting from the pattern processing operations.
*/
public PatternStream<T> sideOutputLateData(OutputTag<T> lateDataOutputTag) {
return new PatternStream<>(builder.withLateDataOutputTag(lateDataOutputTag));
} | 3.68 |
rocketmq-connect_ConnectUtil_searchOffsetsByTimestamp | /** Search offsets by timestamp */
public static Map<MessageQueue, Long> searchOffsetsByTimestamp(
WorkerConfig config,
Collection<MessageQueue> messageQueues,
Long timestamp) {
Map<MessageQueue, Long> offsets = Maps.newConcurrentMap();
DefaultMQAdminExt adminClient = null;
try {
adminClient = startMQAdminTool(config);
for (MessageQueue messageQueue : messageQueues) {
long offset = adminClient.searchOffset(messageQueue, timestamp);
offsets.put(messageQueue, offset);
}
return offsets;
} catch (MQClientException e) {
throw new RuntimeException(e);
} finally {
if (adminClient != null) {
adminClient.shutdown();
}
}
} | 3.68 |
hudi_LockManager_unlock | /**
* We need to take care of the scenarios that current thread may not be the holder of this lock
* and tries to call unlock()
*/
public void unlock() {
getLockProvider().unlock();
metrics.updateLockHeldTimerMetrics();
close();
} | 3.68 |
hbase_WALEdit_getFamilies | /**
* For use by FSWALEntry ONLY. An optimization.
* @return All families in {@link #getCells()}; may be null.
*/
public Set<byte[]> getFamilies() {
return this.families;
} | 3.68 |
framework_AbstractTextField_setPlaceholder | /**
* Sets the placeholder text. The placeholder is text that is displayed when
* the field would otherwise be empty, to prompt the user for input.
*
* @param placeholder
* the placeholder text to set
* @since 8.0
*/
public void setPlaceholder(String placeholder) {
getState().placeholder = placeholder;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.