name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_RestoreSnapshotHelper_restoreHdfsRegions | /**
* Restore specified regions by restoring content to the snapshot state.
*/
private void restoreHdfsRegions(final ThreadPoolExecutor exec,
final Map<String, SnapshotRegionManifest> regionManifests, final List<RegionInfo> regions)
throws IOException {
if (regions == null || regions.isEmpty()) return;
ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
@Override
public void editRegion(final RegionInfo hri) throws IOException {
restoreRegion(hri, regionManifests.get(hri.getEncodedName()));
}
});
} | 3.68 |
flink_SkipListUtils_getValueLen | /**
* Return the length of value data.
*
* @param memorySegment memory segment for value space.
* @param offset offset of value space in memory segment.
*/
public static int getValueLen(MemorySegment memorySegment, int offset) {
return memorySegment.getInt(offset + VALUE_LEN_OFFSET);
} | 3.68 |
Activiti_ComposedTransformer_primTransform | /**
* {@inheritDoc}
*/
@Override
protected Object primTransform(Object anObject) throws Exception {
Object current = anObject;
for (Transformer transformer : this.transformers) {
current = transformer.transform(current);
}
return current;
} | 3.68 |
framework_Escalator_recalculateElementSizes | /**
* Recalculates the dimensions for all elements that require manual
* calculations. Also updates the dimension caches.
* <p>
* <em>Note:</em> This method has the <strong>side-effect</strong>
* automatically makes sure that an appropriate amount of escalator rows are
* present. So, if the body area grows, more <strong>escalator rows might be
* inserted</strong>. Conversely, if the body area shrinks,
* <strong>escalator rows might be removed</strong>.
*/
private void recalculateElementSizes() {
if (!isAttached()) {
return;
}
Profiler.enter("Escalator.recalculateElementSizes");
widthOfEscalator = Math.max(0, WidgetUtil
.getRequiredWidthBoundingClientRectDouble(getElement()));
heightOfEscalator = Math.max(0, WidgetUtil
.getRequiredHeightBoundingClientRectDouble(getElement()));
header.recalculateSectionHeight();
body.recalculateSectionHeight();
footer.recalculateSectionHeight();
scroller.recalculateScrollbarsForVirtualViewport();
body.verifyEscalatorCount();
body.reapplySpacerWidths();
Profiler.leave("Escalator.recalculateElementSizes");
} | 3.68 |
hbase_BlockCache_getBlockSize | /**
* Returns an Optional containing the size of the block related to the passed key. If the block is
* not in the cache, returned optional will be empty. Also, this method may not be overridden by
* all implementing classes. In such cases, the returned Optional will be empty.
* @param key for the block we want to check if it's already in the cache.
* @return empty optional if this method is not supported, otherwise the returned optional
* contains the boolean value informing if the block is already cached.
*/
default Optional<Integer> getBlockSize(BlockCacheKey key) {
return Optional.empty();
} | 3.68 |
flink_SplitEnumeratorContext_assignSplit | /**
* Assigns a single split.
*
* <p>When assigning multiple splits, it is more efficient to assign all of them in a single
* call to the {@link #assignSplits(SplitsAssignment)} method.
*
* @param split The new split
* @param subtask The index of the operator's parallel subtask that shall receive the split.
*/
default void assignSplit(SplitT split, int subtask) {
assignSplits(new SplitsAssignment<>(split, subtask));
} | 3.68 |
framework_DesignContext_setComponentLocalId | /**
* Creates a mapping between the given local id and the component. Returns
* true if localId was already mapped to some component or if component was
* mapped to some string. Otherwise returns false.
*
* If the string was mapped to a component c different from the given
* component, the mapping from c to the string is removed. Similarly, if
* component was mapped to some string s different from localId, the mapping
* from s to component is removed.
*
* @since 7.5.0
*
* @param component
* The component whose local id is to be set.
* @param localId
* The new local id of the component.
*
* @return true, if there already was a local id mapping from the string to
* some component or from the component to some string. Otherwise
* returns false.
*/
public boolean setComponentLocalId(Component component, String localId) {
return twoWayMap(localId, component, localIdToComponent,
componentToLocalId);
} | 3.68 |
hbase_BackupManager_decorateRegionServerConfiguration | /**
* This method modifies the Region Server configuration in order to inject backup-related features
* TESTs only.
* @param conf configuration
*/
public static void decorateRegionServerConfiguration(Configuration conf) {
if (!isBackupEnabled(conf)) {
return;
}
String classes = conf.get(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY);
String regionProcedureClass = LogRollRegionServerProcedureManager.class.getName();
if (classes == null) {
conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY, regionProcedureClass);
} else if (!classes.contains(regionProcedureClass)) {
conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY,
classes + "," + regionProcedureClass);
}
String coproc = conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY);
String regionObserverClass = BackupObserver.class.getName();
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
(coproc == null ? "" : coproc + ",") + regionObserverClass);
if (LOG.isDebugEnabled()) {
LOG.debug("Added region procedure manager: {}. Added region observer: {}",
regionProcedureClass, regionObserverClass);
}
} | 3.68 |
pulsar_PulsarAdminImpl_resourcegroups | /**
* @return the resourcegroups management object
*/
public ResourceGroups resourcegroups() {
return resourcegroups;
} | 3.68 |
flink_LogicalTypeUtils_getAtomicName | /** Returns a unique name for an atomic type. */
public static String getAtomicName(List<String> existingNames) {
int i = 0;
String fieldName = ATOMIC_FIELD_NAME;
while ((null != existingNames) && existingNames.contains(fieldName)) {
fieldName = ATOMIC_FIELD_NAME + "_" + i++;
}
return fieldName;
} | 3.68 |
rocketmq-connect_DebeziumOracleSource_getTaskClass | /**
* get task class
*/
@Override
public String getTaskClass() {
return DEFAULT_TASK;
} | 3.68 |
hbase_HMobStore_createScanner | /**
* Gets the MobStoreScanner or MobReversedStoreScanner. In these scanners, a additional seeks in
* the mob files should be performed after the seek in HBase is done.
*/
@Override
protected KeyValueScanner createScanner(Scan scan, ScanInfo scanInfo,
NavigableSet<byte[]> targetCols, long readPt) throws IOException {
if (MobUtils.isRefOnlyScan(scan)) {
Filter refOnlyFilter = new MobReferenceOnlyFilter();
Filter filter = scan.getFilter();
if (filter != null) {
scan.setFilter(new FilterList(filter, refOnlyFilter));
} else {
scan.setFilter(refOnlyFilter);
}
}
return scan.isReversed()
? new ReversedMobStoreScanner(this, scanInfo, scan, targetCols, readPt)
: new MobStoreScanner(this, scanInfo, scan, targetCols, readPt);
} | 3.68 |
hadoop_AbstractDelegationTokenBinding_getTokenIssuingPolicy | /**
* Predicate: will this binding issue a DT?
* That is: should the filesystem declare that it is issuing
* delegation tokens? If true
* @return a declaration of what will happen when asked for a token.
*/
public S3ADelegationTokens.TokenIssuingPolicy getTokenIssuingPolicy() {
return S3ADelegationTokens.TokenIssuingPolicy.RequestNewToken;
} | 3.68 |
flink_SocketStreamIterator_next | /**
* Returns the next element of the DataStream. (Blocks if it is not available yet.)
*
* @return The element
* @throws NoSuchElementException if the stream has already ended
*/
@Override
public T next() {
if (hasNext()) {
T current = next;
next = null;
return current;
} else {
throw new NoSuchElementException();
}
} | 3.68 |
hadoop_StripedDataStreamer_getFollowingBlock | /**
* The upper level DFSStripedOutputStream will allocate the new block group.
* All the striped data streamer only needs to fetch from the queue, which
* should be already be ready.
*/
private LocatedBlock getFollowingBlock() throws IOException {
if (!this.isHealthy()) {
// No internal block for this streamer, maybe no enough healthy DN.
// Throw the exception which has been set by the StripedOutputStream.
this.getLastException().check(false);
}
return coordinator.getFollowingBlocks().poll(index);
} | 3.68 |
hbase_DelayedUtil_takeWithoutInterrupt | /** Returns null (if an interrupt) or an instance of E; resets interrupt on calling thread. */
public static <E extends Delayed> E takeWithoutInterrupt(final DelayQueue<E> queue,
final long timeout, final TimeUnit timeUnit) {
try {
return queue.poll(timeout, timeUnit);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return null;
}
} | 3.68 |
flink_AbstractStreamTableEnvironmentImpl_execEnv | /**
* This is a temporary workaround for Python API. Python API should not use
* StreamExecutionEnvironment at all.
*/
@Internal
public StreamExecutionEnvironment execEnv() {
return executionEnvironment;
} | 3.68 |
flink_ModuleManager_getFunctionDefinition | /**
* Get an optional of {@link FunctionDefinition} by a given name. Function will be resolved to
* modules in the used order, and the first match will be returned. If no match is found in all
* modules, return an optional.
*
* <p>It includes hidden functions even though not listed in {@link #listFunctions()}.
*
* @param name name of the function
* @return an optional of {@link FunctionDefinition}
*/
public Optional<FunctionDefinition> getFunctionDefinition(String name) {
for (String moduleName : usedModules) {
if (loadedModules.get(moduleName).listFunctions(true).stream()
.anyMatch(name::equalsIgnoreCase)) {
LOG.debug("Got FunctionDefinition '{}' from '{}' module.", name, moduleName);
return loadedModules.get(moduleName).getFunctionDefinition(name);
}
}
LOG.debug("Cannot find FunctionDefinition '{}' from any loaded modules.", name);
return Optional.empty();
} | 3.68 |
hadoop_Chunk_writeBufData | /**
* Write out a chunk that is a concatenation of the internal buffer plus
* user supplied data. This will never be the last block.
*
* @param data
* User supplied data buffer.
* @param offset
* Offset to user data buffer.
* @param len
* User data buffer size.
*/
private void writeBufData(byte[] data, int offset, int len)
throws IOException {
if (count + len > 0) {
Utils.writeVInt(out, -(count + len));
out.write(buf, 0, count);
count = 0;
out.write(data, offset, len);
}
} | 3.68 |
hbase_QuotaFilter_addTypeFilter | /**
* Add a type to the filter list
* @param type the type to filter on
* @return the quota filter object
*/
public QuotaFilter addTypeFilter(final QuotaType type) {
this.types.add(type);
hasFilters |= true;
return this;
} | 3.68 |
framework_RpcDataProviderExtension_refreshCache | /**
* Pushes a new version of all the rows in the active cache range.
*/
public void refreshCache() {
if (!refreshCache) {
refreshCache = true;
markAsDirty();
}
} | 3.68 |
hbase_WALEntryBatch_getHeapSize | /** Returns the heap size of this batch */
public long getHeapSize() {
return heapSize;
} | 3.68 |
hbase_Segment_close | /**
* Closing a segment before it is being discarded
*/
public void close() {
if (this.memStoreLAB != null) {
this.memStoreLAB.close();
}
// do not set MSLab to null as scanners may still be reading the data here and need to decrease
// the counter when they finish
} | 3.68 |
hibernate-validator_TypeHelper_isReferenceType | /**
* Gets whether the specified type is a <em>reference type</em>.
* <p>
* More specifically, this method returns {@code true} if the specified type is one of the following:
* <ul>
* <li>a class type</li>
* <li>an interface type</li>
* <li>an array type</li>
* <li>a parameterized type</li>
* <li>a type variable</li>
* <li>the null type</li>
* <li>a wildcard type</li>
* </ul>
*
* @param type the type to check
*
* @return {@code true} if the specified type is a reference type
*
* @see <a href="http://docs.oracle.com/javase/specs/jls/se8/html/jls-4.html#jls-4.3">4.3 Reference Types and Values</a>
*/
private static boolean isReferenceType(Type type) {
return type == null
|| type instanceof Class<?>
|| type instanceof ParameterizedType
|| type instanceof TypeVariable<?>
|| type instanceof GenericArrayType
|| type instanceof WildcardType;
} | 3.68 |
flink_PatternStream_flatSelect | /**
* Applies a flat select function to the detected pattern sequence. For each pattern sequence
* the provided {@link PatternFlatSelectFunction} is called. The pattern flat select function
* can produce an arbitrary number of resulting elements.
*
* <p>Applies a timeout function to a partial pattern sequence which has timed out. For each
* partial pattern sequence the provided {@link PatternFlatTimeoutFunction} is called. The
* pattern timeout function can produce an arbitrary number of resulting elements.
*
* @param patternFlatTimeoutFunction The pattern flat timeout function which is called for each
* partial pattern sequence which has timed out.
* @param patternFlatSelectFunction The pattern flat select function which is called for each
* detected pattern sequence.
* @param <L> Type of the resulting timeout events
* @param <R> Type of the resulting events
* @deprecated Use {@link PatternStream#flatSelect(OutputTag, PatternFlatTimeoutFunction,
* PatternFlatSelectFunction)} that returns timed out events as a side-output
* @return {@link DataStream} which contains the resulting events from the pattern flat select
* function or the resulting timeout events from the pattern flat timeout function wrapped
* in an {@link Either} type.
*/
@Deprecated
public <L, R> SingleOutputStreamOperator<Either<L, R>> flatSelect(
final PatternFlatTimeoutFunction<T, L> patternFlatTimeoutFunction,
final PatternFlatSelectFunction<T, R> patternFlatSelectFunction) {
final TypeInformation<L> timedOutTypeInfo =
TypeExtractor.getUnaryOperatorReturnType(
patternFlatTimeoutFunction,
PatternFlatTimeoutFunction.class,
0,
1,
new int[] {2, 0},
builder.getInputType(),
null,
false);
final TypeInformation<R> mainTypeInfo =
TypeExtractor.getUnaryOperatorReturnType(
patternFlatSelectFunction,
PatternFlatSelectFunction.class,
0,
1,
new int[] {1, 0},
builder.getInputType(),
null,
false);
final OutputTag<L> outputTag =
new OutputTag<>(UUID.randomUUID().toString(), timedOutTypeInfo);
final PatternProcessFunction<T, R> processFunction =
fromFlatSelect(builder.clean(patternFlatSelectFunction))
.withTimeoutHandler(outputTag, builder.clean(patternFlatTimeoutFunction))
.build();
final SingleOutputStreamOperator<R> mainStream = process(processFunction, mainTypeInfo);
final DataStream<L> timedOutStream = mainStream.getSideOutput(outputTag);
final TypeInformation<Either<L, R>> outTypeInfo =
new EitherTypeInfo<>(timedOutTypeInfo, mainTypeInfo);
return mainStream.connect(timedOutStream).map(new CoMapTimeout<>()).returns(outTypeInfo);
} | 3.68 |
hbase_SortedCompactionPolicy_throttleCompaction | /**
* @param compactionSize Total size of some compaction
* @return whether this should be a large or small compaction
*/
@Override
public boolean throttleCompaction(long compactionSize) {
return compactionSize > comConf.getThrottlePoint();
} | 3.68 |
querydsl_Alias_alias | /**
* Create a new alias proxy of the given type for the given variable
*
* @param cl type of the alias
* @param var variable name for the underlying expression
* @return alias instance
*/
public static <A> A alias(Class<A> cl, String var) {
return aliasFactory.createAliasForVariable(cl, var);
} | 3.68 |
hadoop_AzureBlobFileSystem_setXAttr | /**
* Set the value of an attribute for a path.
*
* @param path The path on which to set the attribute
* @param name The attribute to set
* @param value The byte value of the attribute to set (encoded in latin-1)
* @param flag The mode in which to set the attribute
* @throws IOException If there was an issue setting the attribute on Azure
* @throws IllegalArgumentException If name is null or empty or if value is null
*/
@Override
public void setXAttr(final Path path,
final String name,
final byte[] value,
final EnumSet<XAttrSetFlag> flag)
throws IOException {
LOG.debug("AzureBlobFileSystem.setXAttr path: {}", path);
if (name == null || name.isEmpty() || value == null) {
throw new IllegalArgumentException("A valid name and value must be specified.");
}
Path qualifiedPath = makeQualified(path);
try {
TracingContext tracingContext = new TracingContext(clientCorrelationId,
fileSystemId, FSOperationType.SET_ATTR, true, tracingHeaderFormat,
listener);
Hashtable<String, String> properties;
String xAttrName = ensureValidAttributeName(name);
if (path.isRoot()) {
properties = abfsStore.getFilesystemProperties(tracingContext);
} else {
properties = abfsStore.getPathStatus(qualifiedPath, tracingContext);
}
boolean xAttrExists = properties.containsKey(xAttrName);
XAttrSetFlag.validate(name, xAttrExists, flag);
String xAttrValue = abfsStore.decodeAttribute(value);
properties.put(xAttrName, xAttrValue);
if (path.isRoot()) {
abfsStore.setFilesystemProperties(properties, tracingContext);
} else {
abfsStore.setPathProperties(qualifiedPath, properties, tracingContext);
}
} catch (AzureBlobFileSystemException ex) {
checkException(path, ex);
}
} | 3.68 |
flink_TypeSerializerSchemaCompatibility_getReconfiguredSerializer | /**
* Gets the reconfigured serializer. This throws an exception if {@link
* #isCompatibleWithReconfiguredSerializer()} is {@code false}.
*/
public TypeSerializer<T> getReconfiguredSerializer() {
Preconditions.checkState(
isCompatibleWithReconfiguredSerializer(),
"It is only possible to get a reconfigured serializer if the compatibility type is %s, but the type is %s",
Type.COMPATIBLE_WITH_RECONFIGURED_SERIALIZER,
resultType);
return reconfiguredNewSerializer;
} | 3.68 |
framework_OptionGroup_setHtmlContentAllowed | /**
* Sets whether html is allowed in the item captions. If set to true, the
* captions are passed to the browser as html and the developer is
* responsible for ensuring no harmful html is used. If set to false, the
* content is passed to the browser as plain text.
*
* @param htmlContentAllowed
* true if the captions are used as html, false if used as plain
* text
*/
public void setHtmlContentAllowed(boolean htmlContentAllowed) {
this.htmlContentAllowed = htmlContentAllowed;
markAsDirty();
} | 3.68 |
framework_VTabsheet_onBlur | /**
* Delegate method for the onBlur event occurring on Tab.
*
* @param blurSource
* the source of the blur.
*
* @see #onFocus(Tab)
*/
public void onBlur(Tab blurSource) {
if (focusedTab != null && focusedTab == blurSource) {
if (connector.hasEventListener(EventId.BLUR)) {
scheduleBlur(focusedTab);
}
}
} | 3.68 |
druid_Resources_classForName | /**
* Loads a class
*
* @param className - the class to load
* @return The loaded class
* @throws ClassNotFoundException If the class cannot be found (duh!)
*/
public static Class<?> classForName(String className) throws ClassNotFoundException {
Class<?> clazz = null;
try {
clazz = getClassLoader().loadClass(className);
} catch (Exception e) {
// Ignore. Failsafe below.
}
if (clazz == null) {
clazz = Class.forName(className);
}
return clazz;
} | 3.68 |
flink_Printer_close | /** Close the resource of the {@link Printer}. */
@Override
default void close() {} | 3.68 |
pulsar_DLOutputStream_writeAsync | /**
* Write all input stream data to the distribute log.
*
* @param inputStream the data we need to write
* @return
*/
CompletableFuture<DLOutputStream> writeAsync(InputStream inputStream) {
return getRecords(inputStream)
.thenCompose(this::writeAsync);
} | 3.68 |
morf_XmlDataSetProducer_isNullable | /**
* @see org.alfasoftware.morf.metadata.Column#isNullable()
*/
@Override
public boolean isNullable() {
if (nullable == null) {
return false;
}
return nullable;
} | 3.68 |
morf_CreateDeployedViews_getJiraId | /**
* @see org.alfasoftware.morf.upgrade.UpgradeStep#getJiraId()
*/
@Override
public String getJiraId() {
return "WEB-18348";
} | 3.68 |
hadoop_ShadedProtobufHelper_getByteString | /**
* Get the byte string of a non-null byte array.
* If the array is 0 bytes long, return a singleton to reduce object allocation.
* @param bytes bytes to convert.
* @return the protobuf byte string representation of the array.
*/
public static ByteString getByteString(byte[] bytes) {
// return singleton to reduce object allocation
return (bytes.length == 0)
? ByteString.EMPTY
: ByteString.copyFrom(bytes);
} | 3.68 |
hadoop_WordListAnonymizerUtility_extractSuffix | /**
* Extracts a known suffix from the given data.
*
* @throws RuntimeException if the data doesn't have a suffix.
* Use {@link #hasSuffix(String, String[])} to make sure that the
* given data has a suffix.
*/
public static String[] extractSuffix(String data, String[] suffixes) {
// check if they end in known suffixes
String suffix = "";
for (String ks : suffixes) {
if (data.endsWith(ks)) {
suffix = ks;
// stripe off the suffix which will get appended later
data = data.substring(0, data.length() - suffix.length());
return new String[] {data, suffix};
}
}
// throw exception
throw new RuntimeException("Data [" + data + "] doesn't have a suffix from"
+ " known suffixes [" + StringUtils.join(suffixes, ',') + "]");
} | 3.68 |
hbase_HBaseCommonTestingUtility_getRandomDir | /**
* @return A dir with a random (uuid) name under the test dir
* @see #getBaseTestDir()
*/
public Path getRandomDir() {
return new Path(getBaseTestDir(), getRandomUUID().toString());
} | 3.68 |
hadoop_CSQueuePreemptionSettings_isQueueHierarchyPreemptionDisabled | /**
* The specified queue is cross-queue preemptable if system-wide cross-queue
* preemption is turned on unless any queue in the <em>qPath</em> hierarchy
* has explicitly turned cross-queue preemption off.
* NOTE: Cross-queue preemptability is inherited from a queue's parent.
*
* @param q queue to check preemption state
* @param configuration capacity scheduler config
* @return true if queue has cross-queue preemption disabled, false otherwise
*/
private boolean isQueueHierarchyPreemptionDisabled(CSQueue q,
CapacitySchedulerConfiguration configuration) {
boolean systemWidePreemption =
configuration
.getBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS,
YarnConfiguration.DEFAULT_RM_SCHEDULER_ENABLE_MONITORS);
CSQueue parentQ = q.getParent();
// If the system-wide preemption switch is turned off, all of the queues in
// the qPath hierarchy have preemption disabled, so return true.
if (!systemWidePreemption) return true;
// If q is the root queue and the system-wide preemption switch is turned
// on, then q does not have preemption disabled (default=false, below)
// unless the preemption_disabled property is explicitly set.
if (parentQ == null) {
return configuration.getPreemptionDisabled(q.getQueuePath(), false);
}
// If this is not the root queue, inherit the default value for the
// preemption_disabled property from the parent. Preemptability will be
// inherited from the parent's hierarchy unless explicitly overridden at
// this level.
return configuration.getPreemptionDisabled(q.getQueuePath(),
parentQ.getPreemptionDisabled());
} | 3.68 |
druid_TableStat_getDataType | /**
* @since 1.0.20
*/
public String getDataType() {
return dataType;
} | 3.68 |
framework_FreeformQuery_removeRow | /*
* (non-Javadoc)
*
* @see com.vaadin.data.util.sqlcontainer.query.QueryDelegate#removeRow(com.
* vaadin .data.util.sqlcontainer.RowItem)
*/
@Override
public boolean removeRow(RowItem row) throws SQLException {
if (!isInTransaction()) {
throw new IllegalStateException("No transaction is active!");
} else if (primaryKeyColumns.isEmpty()) {
throw new UnsupportedOperationException(
"Cannot remove items fetched with a read-only freeform query!");
}
if (delegate != null) {
return delegate.removeRow(getConnection(), row);
} else {
throw new UnsupportedOperationException(
"FreeFormQueryDelegate not set!");
}
} | 3.68 |
hadoop_NamenodeStatusReport_getAvailableSpace | /**
* Get the available space.
*
* @return The available space.
*/
public long getAvailableSpace() {
return this.availableSpace;
} | 3.68 |
flink_RocksDBIncrementalRestoreOperation_restoreWithoutRescaling | /** Recovery from a single remote incremental state without rescaling. */
@SuppressWarnings("unchecked")
private void restoreWithoutRescaling(KeyedStateHandle keyedStateHandle) throws Exception {
logger.info(
"Starting to restore from state handle: {} without rescaling.", keyedStateHandle);
if (keyedStateHandle instanceof IncrementalRemoteKeyedStateHandle) {
IncrementalRemoteKeyedStateHandle incrementalRemoteKeyedStateHandle =
(IncrementalRemoteKeyedStateHandle) keyedStateHandle;
restorePreviousIncrementalFilesStatus(incrementalRemoteKeyedStateHandle);
restoreBaseDBFromRemoteState(incrementalRemoteKeyedStateHandle);
} else if (keyedStateHandle instanceof IncrementalLocalKeyedStateHandle) {
IncrementalLocalKeyedStateHandle incrementalLocalKeyedStateHandle =
(IncrementalLocalKeyedStateHandle) keyedStateHandle;
restorePreviousIncrementalFilesStatus(incrementalLocalKeyedStateHandle);
restoreBaseDBFromLocalState(incrementalLocalKeyedStateHandle);
} else {
throw unexpectedStateHandleException(
new Class[] {
IncrementalRemoteKeyedStateHandle.class,
IncrementalLocalKeyedStateHandle.class
},
keyedStateHandle.getClass());
}
logger.info(
"Finished restoring from state handle: {} without rescaling.", keyedStateHandle);
} | 3.68 |
flink_RestServerEndpointConfiguration_getUploadDir | /** Returns the directory used to temporarily store multipart/form-data uploads. */
public Path getUploadDir() {
return uploadDir;
} | 3.68 |
framework_VTabsheet_stopSchedule | /**
* Stop the command from being executed.
*
* @since 7.4
*/
public void stopSchedule() {
blurSource = null;
} | 3.68 |
framework_Escalator_setHeight | /**
* {@inheritDoc}
* <p>
* If Escalator is currently not in {@link HeightMode#CSS}, the given value
* is remembered, and applied once the mode is applied.
*
* @see #setHeightMode(HeightMode)
*/
@Override
public void setHeight(String height) {
/*
* TODO remove method once RequiresResize and the Vaadin layoutmanager
* listening mechanisms are implemented
*/
if (height != null && !height.isEmpty()) {
heightByCss = height;
} else {
if (getHeightMode() == HeightMode.UNDEFINED) {
heightByRows = body.getRowCount();
applyHeightByRows();
return;
} else {
heightByCss = DEFAULT_HEIGHT;
}
}
if (getHeightMode() == HeightMode.CSS) {
setHeightInternal(height);
}
} | 3.68 |
hadoop_ZookeeperUtils_splitToHostsAndPorts | /**
* Split a quorum list into a list of hostnames and ports
* @param hostPortQuorumList split to a list of hosts and ports
* @return a list of values
*/
public static List<HostAndPort> splitToHostsAndPorts(String hostPortQuorumList) {
// split an address hot
String[] strings = StringUtils.getStrings(hostPortQuorumList);
int len = 0;
if (strings != null) {
len = strings.length;
}
List<HostAndPort> list = new ArrayList<HostAndPort>(len);
if (strings != null) {
for (String s : strings) {
list.add(HostAndPort.fromString(s.trim()).withDefaultPort(DEFAULT_PORT));
}
}
return list;
} | 3.68 |
framework_AbstractProperty_fireValueChange | /**
* Sends a value change event to all registered listeners.
*/
protected void fireValueChange() {
if (valueChangeListeners != null) {
final Property.ValueChangeEvent event = new ValueChangeEvent(this);
for (Object l : valueChangeListeners.toArray()) {
((Property.ValueChangeListener) l).valueChange(event);
}
}
} | 3.68 |
flink_CsvReader_pojoType | /**
* Configures the reader to read the CSV data and parse it to the given type. The all fields of
* the type must be public or able to set value. The type information for the fields is obtained
* from the type class.
*
* @param pojoType The class of the target POJO.
* @param pojoFields The fields of the POJO which are mapped to CSV fields.
* @return The DataSet representing the parsed CSV data.
*/
public <T> DataSource<T> pojoType(Class<T> pojoType, String... pojoFields) {
Preconditions.checkNotNull(pojoType, "The POJO type class must not be null.");
Preconditions.checkArgument(
pojoFields != null && pojoFields.length > 0,
"POJO fields must be specified (not null) if output type is a POJO.");
final TypeInformation<T> ti = TypeExtractor.createTypeInfo(pojoType);
if (!(ti instanceof PojoTypeInfo)) {
throw new IllegalArgumentException(
"The specified class is not a POJO. The type class must meet the POJO requirements. Found: "
+ ti);
}
final PojoTypeInfo<T> pti = (PojoTypeInfo<T>) ti;
CsvInputFormat<T> inputFormat =
new PojoCsvInputFormat<T>(
path,
this.lineDelimiter,
this.fieldDelimiter,
pti,
pojoFields,
this.includedMask);
configureInputFormat(inputFormat);
return new DataSource<T>(executionContext, inputFormat, pti, Utils.getCallLocationName());
} | 3.68 |
hbase_BlockType_put | /**
* Put the magic record out to the specified byte array position.
* @param bytes the byte array
* @param offset position in the array
* @return incremented offset
*/
// System.arraycopy is static native. We can't do anything about this until minimum JDK is 9.
@SuppressWarnings("UnsafeFinalization")
public int put(byte[] bytes, int offset) {
System.arraycopy(magic, 0, bytes, offset, MAGIC_LENGTH);
return offset + MAGIC_LENGTH;
} | 3.68 |
flink_SocketStreamIterator_hasNext | /**
* Returns true if the DataStream has more elements. (Note: blocks if there will be more
* elements, but they are not available yet.)
*
* @return true if the DataStream has more elements
*/
@Override
public boolean hasNext() {
if (next == null) {
try {
next = readNextFromStream();
} catch (Exception e) {
throw new RuntimeException("Failed to receive next element: " + e.getMessage(), e);
}
}
return next != null;
} | 3.68 |
dubbo_DefaultFuture_newFuture | /**
* init a DefaultFuture
* 1.init a DefaultFuture
* 2.timeout check
*
* @param channel channel
* @param request the request
* @param timeout timeout
* @return a new DefaultFuture
*/
public static DefaultFuture newFuture(Channel channel, Request request, int timeout, ExecutorService executor) {
final DefaultFuture future = new DefaultFuture(channel, request, timeout);
future.setExecutor(executor);
// timeout check
timeoutCheck(future);
return future;
} | 3.68 |
framework_FocusableGrid_addKeyDownHandler | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.HasKeyDownHandlers#addKeyDownHandler(
* com.google.gwt.event.dom.client.KeyDownHandler)
*/
@Override
public HandlerRegistration addKeyDownHandler(KeyDownHandler handler) {
return addDomHandler(handler, KeyDownEvent.getType());
} | 3.68 |
hadoop_AllocateResponse_setRejectedSchedulingRequests | /**
* Add a list of rejected SchedulingRequests to the AllocateResponse.
* @param rejectedRequests List of Rejected Scheduling Requests.
*/
@Private
@Unstable
public void setRejectedSchedulingRequests(
List<RejectedSchedulingRequest> rejectedRequests) {
} | 3.68 |
morf_AbstractSqlDialectTest_checkDatabaseSafeStringToRecordValue | /**
* Format a value through the result set record for testing.
*
* @param value The value to format.
* @return The formatted value.
*/
private String checkDatabaseSafeStringToRecordValue(DataType dataType, String value) throws SQLException {
ResultSet resultSet = mock(ResultSet.class);
when(resultSet.getBigDecimal(anyInt())).thenReturn(value == null ? null : new BigDecimal(value));
if (value == null) {
when(resultSet.wasNull()).thenReturn(true);
} else {
when(resultSet.getBoolean(anyInt())).thenReturn(value.equals("1"));
}
return testDialect.resultSetToRecord(resultSet, ImmutableList.of(column("a", dataType))).getString("a");
} | 3.68 |
dubbo_MonitorFilter_getConcurrent | /**
* concurrent counter
*
* @param invoker
* @param invocation
* @return
*/
private AtomicInteger getConcurrent(Invoker<?> invoker, Invocation invocation) {
String key = invoker.getInterface().getName() + "." + RpcUtils.getMethodName(invocation);
return ConcurrentHashMapUtils.computeIfAbsent(concurrents, key, k -> new AtomicInteger());
} | 3.68 |
open-banking-gateway_FintechSecureStorage_psuAspspKeyFromInbox | /**
* Retrieves PSU/FinTech users' private key from FinTechs' inbox.
* @param authSession Authorization session for this PSU/Fintech user
* @param password Fintechs' Datasafe/KeyStore password
* @return Keys to access PSU/FinTech users' key to read consent and its data
*/
@SneakyThrows
public PubAndPrivKey psuAspspKeyFromInbox(AuthSession authSession, Supplier<char[]> password) {
try (InputStream is = datasafeServices.inboxService().read(
ReadRequest.forDefaultPrivate(
authSession.getFintechUser().getFintech().getUserIdAuth(password),
new FintechPsuAspspTuple(authSession).toDatasafePathWithoutParent()))
) {
return serde.readKey(is);
}
} | 3.68 |
morf_RemoveColumn_reverse | /**
* @see org.alfasoftware.morf.upgrade.SchemaChange#reverse(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema reverse(Schema schema) {
Table original = schema.getTable(tableName);
List<String> columns = new ArrayList<>();
for (Column column : original.columns()) {
columns.add(column.getName());
}
columns.add(columnDefinition.getName());
return new TableOverrideSchema(schema, new AlteredTable(original, columns, Arrays.asList(new Column[] {columnDefinition})));
} | 3.68 |
flink_MemorySegment_getArray | /**
* Returns the byte array of on-heap memory segments.
*
* @return underlying byte array
* @throws IllegalStateException if the memory segment does not represent on-heap memory
*/
public byte[] getArray() {
if (heapMemory != null) {
return heapMemory;
} else {
throw new IllegalStateException("Memory segment does not represent heap memory");
}
} | 3.68 |
hbase_RpcServer_channelRead | /**
* This is a wrapper around
* {@link java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)}. If the amount of data
* is large, it writes to channel in smaller chunks. This is to avoid jdk from creating many
* direct buffers as the size of ByteBuffer increases. There should not be any performance
* degredation.
* @param channel writable byte channel to write on
* @param buffer buffer to write
* @return number of bytes written
* @throws java.io.IOException e
* @see java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)
*/
protected int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException {
int count = (buffer.remaining() <= NIO_BUFFER_LIMIT)
? channel.read(buffer)
: channelIO(channel, null, buffer);
if (count > 0) {
metrics.receivedBytes(count);
}
return count;
} | 3.68 |
morf_DataSetConnectorMultiThreaded_calculateThreadCount | /**
* Calculates the number of threads to use
*
* @return The number of threads to use for dumping the database.
*/
private int calculateThreadCount() {
int processorCount = Runtime.getRuntime().availableProcessors();
switch(processorCount) {
case 0:
throw new RuntimeException("Could not find at least 1 processor");
case 1:
return 1;
case 2:
return 2;
default:
return 8;
}
} | 3.68 |
flink_PojoSerializerSnapshot_getCompatibilityOfPreExistingFields | /**
* Finds which Pojo fields exists both in the new {@link PojoSerializer} as well as in the
* previous one (represented by this snapshot), and returns an {@link
* IntermediateCompatibilityResult} of the serializers of those preexisting fields.
*/
private static <T> IntermediateCompatibilityResult<T> getCompatibilityOfPreExistingFields(
PojoSerializer<T> newPojoSerializer,
LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots) {
// the present entries dictates the preexisting fields, because removed fields would be
// represented as absent keys in the optional map.
final Set<LinkedOptionalMap.KeyValue<Field, TypeSerializerSnapshot<?>>>
presentFieldSnapshots = fieldSerializerSnapshots.getPresentEntries();
final ArrayList<TypeSerializerSnapshot<?>> associatedFieldSerializerSnapshots =
new ArrayList<>(presentFieldSnapshots.size());
final ArrayList<TypeSerializer<?>> associatedNewFieldSerializers =
new ArrayList<>(presentFieldSnapshots.size());
final Map<Field, TypeSerializer<?>> newFieldSerializersIndex =
buildNewFieldSerializersIndex(newPojoSerializer);
for (LinkedOptionalMap.KeyValue<Field, TypeSerializerSnapshot<?>> presentFieldEntry :
presentFieldSnapshots) {
TypeSerializer<?> associatedNewFieldSerializer =
newFieldSerializersIndex.get(presentFieldEntry.getKey());
checkState(
associatedNewFieldSerializer != null,
"a present field should have its associated new field serializer available.");
associatedFieldSerializerSnapshots.add(presentFieldEntry.getValue());
associatedNewFieldSerializers.add(associatedNewFieldSerializer);
}
return CompositeTypeSerializerUtil.constructIntermediateCompatibilityResult(
associatedNewFieldSerializers.toArray(
new TypeSerializer<?>[associatedNewFieldSerializers.size()]),
associatedFieldSerializerSnapshots.toArray(
new TypeSerializerSnapshot<?>[associatedFieldSerializerSnapshots.size()]));
} | 3.68 |
framework_TabsheetConnector_renderContent | /**
* (Re-)render the content of the active tab.
*/
protected void renderContent() {
ComponentConnector contentConnector = null;
if (!getChildComponents().isEmpty()) {
contentConnector = getChildComponents().get(0);
}
if (null != contentConnector) {
getWidget().renderContent(contentConnector.getWidget());
} else {
getWidget().renderContent(null);
}
} | 3.68 |
hbase_MonitoredRPCHandlerImpl_getClient | /**
* If an RPC call is currently running, produces a String representation of the connection from
* which it was received.
* @return A human-readable string representation of the address and port of the client.
*/
@Override
public String getClient() {
return clientAddress + ":" + remotePort;
} | 3.68 |
morf_SqlServerDialect_getSqlFrom | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlFrom(org.alfasoftware.morf.sql.SelectFirstStatement)
*/
@Override
protected String getSqlFrom(SelectFirstStatement stmt) {
StringBuilder result = new StringBuilder("SELECT TOP 1 ");
// Start by adding the field
result.append(getSqlFrom(stmt.getFields().get(0)));
appendFrom(result, stmt);
appendJoins(result, stmt, innerJoinKeyword(stmt));
appendWhere(result, stmt);
appendOrderBy(result, stmt);
return result.toString().trim();
} | 3.68 |
flink_PrioritizedDeque_iterator | /** @return read-only iterator */
public Iterator<T> iterator() {
return Collections.unmodifiableCollection(deque).iterator();
} | 3.68 |
hadoop_CosNFileSystem_mkDirRecursively | /**
* Recursively create a directory.
*
* @param f Absolute path to the directory.
* @param permission Directory permissions. Permission does not work for
* the CosN filesystem currently.
* @return Return true if the creation was successful, throw a IOException.
* @throws IOException The specified path already exists or an error
* creating the path.
*/
public boolean mkDirRecursively(Path f, FsPermission permission)
throws IOException {
Path absolutePath = makeAbsolute(f);
List<Path> paths = new ArrayList<>();
do {
paths.add(absolutePath);
absolutePath = absolutePath.getParent();
} while (absolutePath != null);
for (Path path : paths) {
if (path.equals(new Path(CosNFileSystem.PATH_DELIMITER))) {
break;
}
try {
FileStatus fileStatus = getFileStatus(path);
if (fileStatus.isFile()) {
throw new FileAlreadyExistsException(
String.format("Can't make directory for path: %s, "
+ "since it is a file.", f));
}
if (fileStatus.isDirectory()) {
break;
}
} catch (FileNotFoundException e) {
LOG.debug("Making dir: [{}] in COS", f);
String folderPath = pathToKey(makeAbsolute(f));
if (!folderPath.endsWith(PATH_DELIMITER)) {
folderPath += PATH_DELIMITER;
}
store.storeEmptyFile(folderPath);
}
}
return true;
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_fileDeleted | /**
* Indicate that we just deleted a file through WASB.
*/
public void fileDeleted() {
numberOfFilesDeleted.incr();
} | 3.68 |
framework_AbstractComponent_isReadOnly | /**
* Returns the read-only status from the state of this
* {@code AbstractComponent}. This method should be made public in
* {@link Component Components} that implement {@link HasValue}.
*
* @return {@code true} if state has read-only on; {@code false} if not
* @see #setReadOnly(boolean)
*/
protected boolean isReadOnly() {
if (getState(false) instanceof AbstractFieldState) {
return ((AbstractFieldState) getState(false)).readOnly;
}
throw new IllegalStateException(
"This component does not support the read-only mode, since state is of type "
+ getStateType().getSimpleName()
+ " and does not inherit "
+ AbstractFieldState.class.getSimpleName());
} | 3.68 |
framework_VComboBox_selectNextItem | /**
* Selects the next item in the filtered selections.
*/
public void selectNextItem() {
debug("VComboBox.SP: selectNextItem()");
final int index = menu.getSelectedIndex() + 1;
if (menu.getItems().size() > index) {
selectItem(menu.getItems().get(index));
} else {
selectNextPage();
}
} | 3.68 |
hudi_CompactionUtil_setPreCombineField | /**
* Sets up the preCombine field into the given configuration {@code conf}
* through reading from the hoodie table metadata.
* <p>
* This value is non-null as compaction can only be performed on MOR tables.
* Of which, MOR tables will have non-null precombine fields.
*
* @param conf The configuration
*/
public static void setPreCombineField(Configuration conf, HoodieTableMetaClient metaClient) {
String preCombineField = metaClient.getTableConfig().getPreCombineField();
if (preCombineField != null) {
conf.setString(FlinkOptions.PRECOMBINE_FIELD, preCombineField);
}
} | 3.68 |
framework_VComboBox_onBrowserEvent | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.user.client.ui.Composite#onBrowserEvent(com.google.gwt
* .user.client.Event)
*/
@Override
public void onBrowserEvent(Event event) {
super.onBrowserEvent(event);
if (event.getTypeInt() == Event.ONPASTE) {
if (textInputEnabled && connector.isEnabled()
&& !connector.isReadOnly()) {
Scheduler.get()
.scheduleDeferred(() -> filterOptions(currentPage));
}
}
} | 3.68 |
hbase_PrivateCellUtil_matchingQualifier | /**
* Finds if the qualifier part of the cell and the KV serialized byte[] are equal
* @param left the cell with which we need to match the qualifier
* @param buf the serialized keyvalue format byte[]
* @param offset the offset of the qualifier in the byte[]
* @param length the length of the qualifier in the byte[]
* @return true if the qualifier matches, false otherwise
*/
public static boolean matchingQualifier(final Cell left, final byte[] buf, final int offset,
final int length) {
if (buf == null) {
return left.getQualifierLength() == 0;
}
if (left instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.equals(((ByteBufferExtendedCell) left).getQualifierByteBuffer(),
((ByteBufferExtendedCell) left).getQualifierPosition(), left.getQualifierLength(), buf,
offset, length);
}
return Bytes.equals(left.getQualifierArray(), left.getQualifierOffset(),
left.getQualifierLength(), buf, offset, length);
} | 3.68 |
hbase_AbstractByteRange_getBytes | //
// methods for managing the backing array and range viewport
//
@Override
public byte[] getBytes() {
return bytes;
} | 3.68 |
hbase_BucketCache_evictBlockIfNoRpcReferenced | /**
* NOTE: This method is only for test.
*/
public boolean evictBlockIfNoRpcReferenced(BlockCacheKey blockCacheKey) {
BucketEntry bucketEntry = backingMap.get(blockCacheKey);
if (bucketEntry == null) {
return false;
}
return evictBucketEntryIfNoRpcReferenced(blockCacheKey, bucketEntry);
} | 3.68 |
hbase_Region_checkAndMutate | /**
* Atomically checks if a row matches the filter and if it does, it performs the mutation. See
* checkAndRowMutate to do many checkAndPuts at a time on a single row.
* @param row to check
* @param filter the filter
* @param mutation data to put if check succeeds
* @return true if mutation was applied, false otherwise
* @deprecated since 3.0.0 and will be removed in 4.0.0. Use
* {@link #checkAndMutate(CheckAndMutate)} instead.
*/
@Deprecated
default boolean checkAndMutate(byte[] row, Filter filter, Mutation mutation) throws IOException {
return checkAndMutate(row, filter, TimeRange.allTime(), mutation);
} | 3.68 |
zxing_MatrixUtil_maybeEmbedVersionInfo | // Embed version information if need be. On success, modify the matrix and return true.
// See 8.10 of JISX0510:2004 (p.47) for how to embed version information.
static void maybeEmbedVersionInfo(Version version, ByteMatrix matrix) throws WriterException {
if (version.getVersionNumber() < 7) { // Version info is necessary if version >= 7.
return; // Don't need version info.
}
BitArray versionInfoBits = new BitArray();
makeVersionInfoBits(version, versionInfoBits);
int bitIndex = 6 * 3 - 1; // It will decrease from 17 to 0.
for (int i = 0; i < 6; ++i) {
for (int j = 0; j < 3; ++j) {
// Place bits in LSB (least significant bit) to MSB order.
boolean bit = versionInfoBits.get(bitIndex);
bitIndex--;
// Left bottom corner.
matrix.set(i, matrix.getHeight() - 11 + j, bit);
// Right bottom corner.
matrix.set(matrix.getHeight() - 11 + j, i, bit);
}
}
} | 3.68 |
flink_Tuple12_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> copy() {
return new Tuple12<>(
this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8,
this.f9, this.f10, this.f11);
} | 3.68 |
hbase_AccessControlFilter_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() {
// no implementation, server-side use only
throw new UnsupportedOperationException(
"Serialization not supported. Intended for server-side use only.");
} | 3.68 |
flink_PartitionRequestQueue_enqueueAvailableReader | /**
* Try to enqueue the reader once receiving credit notification from the consumer or receiving
* non-empty reader notification from the producer.
*
* <p>NOTE: Only one thread would trigger the actual enqueue after checking the reader's
* availability, so there is no race condition here.
*/
private void enqueueAvailableReader(final NetworkSequenceViewReader reader) throws Exception {
if (reader.isRegisteredAsAvailable()) {
return;
}
ResultSubpartitionView.AvailabilityWithBacklog availabilityWithBacklog =
reader.getAvailabilityAndBacklog();
if (!availabilityWithBacklog.isAvailable()) {
int backlog = availabilityWithBacklog.getBacklog();
if (backlog > 0 && reader.needAnnounceBacklog()) {
announceBacklog(reader, backlog);
}
return;
}
// Queue an available reader for consumption. If the queue is empty,
// we try trigger the actual write. Otherwise this will be handled by
// the writeAndFlushNextMessageIfPossible calls.
boolean triggerWrite = availableReaders.isEmpty();
registerAvailableReader(reader);
if (triggerWrite) {
writeAndFlushNextMessageIfPossible(ctx.channel());
}
} | 3.68 |
hbase_HFileWriterImpl_checkValue | /** Checks the given value for validity. */
protected void checkValue(final byte[] value, final int offset, final int length)
throws IOException {
if (value == null) {
throw new IOException("Value cannot be null");
}
} | 3.68 |
hadoop_AMRMTokenSecretManager_retrievePassword | /**
* Retrieve the password for the given {@link AMRMTokenIdentifier}.
* Used by RPC layer to validate a remote {@link AMRMTokenIdentifier}.
*/
@Override
public byte[] retrievePassword(AMRMTokenIdentifier identifier)
throws InvalidToken {
this.readLock.lock();
try {
ApplicationAttemptId applicationAttemptId =
identifier.getApplicationAttemptId();
LOG.debug("Trying to retrieve password for {}", applicationAttemptId);
if (!appAttemptSet.contains(applicationAttemptId)) {
throw new InvalidToken(applicationAttemptId
+ " not found in AMRMTokenSecretManager.");
}
if (identifier.getKeyId() == this.currentMasterKey.getMasterKey()
.getKeyId()) {
return createPassword(identifier.getBytes(),
this.currentMasterKey.getSecretKey());
} else if (nextMasterKey != null
&& identifier.getKeyId() == this.nextMasterKey.getMasterKey()
.getKeyId()) {
return createPassword(identifier.getBytes(),
this.nextMasterKey.getSecretKey());
}
throw new InvalidToken("Invalid AMRMToken from " + applicationAttemptId);
} finally {
this.readLock.unlock();
}
} | 3.68 |
flink_HighAvailabilityServicesUtils_getClusterHighAvailableStoragePath | /**
* Gets the cluster high available storage path from the provided configuration.
*
* <p>The format is {@code HA_STORAGE_PATH/HA_CLUSTER_ID}.
*
* @param configuration containing the configuration values
* @return Path under which all highly available cluster artifacts are being stored
*/
public static Path getClusterHighAvailableStoragePath(Configuration configuration) {
final String storagePath = configuration.getValue(HighAvailabilityOptions.HA_STORAGE_PATH);
if (isNullOrWhitespaceOnly(storagePath)) {
throw new IllegalConfigurationException(
"Configuration is missing the mandatory parameter: "
+ HighAvailabilityOptions.HA_STORAGE_PATH);
}
final Path path;
try {
path = new Path(storagePath);
} catch (Exception e) {
throw new IllegalConfigurationException(
"Invalid path for highly available storage ("
+ HighAvailabilityOptions.HA_STORAGE_PATH.key()
+ ')',
e);
}
final String clusterId = configuration.getValue(HighAvailabilityOptions.HA_CLUSTER_ID);
final Path clusterStoragePath;
try {
clusterStoragePath = new Path(path, clusterId);
} catch (Exception e) {
throw new IllegalConfigurationException(
String.format(
"Cannot create cluster high available storage path '%s/%s'. This indicates that an invalid cluster id (%s) has been specified.",
storagePath, clusterId, HighAvailabilityOptions.HA_CLUSTER_ID.key()),
e);
}
return clusterStoragePath;
} | 3.68 |
hbase_MetricsConnection_getRpcHistograms | /** rpcHistograms metric */
public ConcurrentMap<String, Histogram> getRpcHistograms() {
return rpcHistograms;
} | 3.68 |
hbase_FSDataInputStreamWrapper_prepareForBlockReader | /**
* Prepares the streams for block reader. NOT THREAD SAFE. Must be called once, after any reads
* finish and before any other reads start (what happens in reality is we read the tail, then call
* this based on what's in the tail, then read blocks).
* @param forceNoHBaseChecksum Force not using HBase checksum.
*/
public void prepareForBlockReader(boolean forceNoHBaseChecksum) throws IOException {
if (hfs == null) return;
assert this.stream != null && !this.useHBaseChecksumConfigured;
boolean useHBaseChecksum =
!forceNoHBaseChecksum && hfs.useHBaseChecksum() && (hfs.getNoChecksumFs() != hfs);
if (useHBaseChecksum) {
FileSystem fsNc = hfs.getNoChecksumFs();
this.streamNoFsChecksum = (link != null) ? link.open(fsNc) : fsNc.open(path);
setStreamOptions(streamNoFsChecksum);
this.useHBaseChecksumConfigured = this.useHBaseChecksum = useHBaseChecksum;
// Close the checksum stream; we will reopen it if we get an HBase checksum failure.
this.stream.close();
this.stream = null;
}
} | 3.68 |
morf_OracleMetaDataProvider_tableMap | /**
* Use to access the metadata for the tables in the specified connection.
* Lazily initialises the metadata, and only loads it once.
*
* @return Table metadata.
*/
private Map<String, Table> tableMap() {
if (tableMap != null) {
return tableMap;
}
tableMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
expensiveReadTableNames();
return tableMap;
} | 3.68 |
hadoop_NamenodeStatusReport_getNumInMaintenanceDeadDataNodes | /**
* Get the number of dead in maintenance nodes.
*
* @return The number of dead in maintenance nodes.
*/
public int getNumInMaintenanceDeadDataNodes() {
return this.inMaintenanceDeadDataNodes;
} | 3.68 |
flink_JobEdge_getTarget | /**
* Returns the vertex connected to this edge.
*
* @return The vertex connected to this edge.
*/
public JobVertex getTarget() {
return target;
} | 3.68 |
hbase_ThriftConnection_getTableBuilder | /**
* Get a TableBuider to build ThriftTable, ThriftTable is NOT thread safe
* @return a TableBuilder
* @throws IOException IOException
*/
@Override
public TableBuilder getTableBuilder(TableName tableName, ExecutorService pool) {
return new TableBuilder() {
@Override
public TableBuilder setOperationTimeout(int timeout) {
return this;
}
@Override
public TableBuilder setRpcTimeout(int timeout) {
return this;
}
@Override
public TableBuilder setReadRpcTimeout(int timeout) {
return this;
}
@Override
public TableBuilder setWriteRpcTimeout(int timeout) {
return this;
}
@Override
public TableBuilder setRequestAttribute(String key, byte[] value) {
return this;
}
@Override
public Table build() {
try {
Pair<THBaseService.Client, TTransport> client = clientBuilder.getClient();
return new ThriftTable(tableName, client.getFirst(), client.getSecond(), conf);
} catch (IOException ioE) {
throw new RuntimeException(ioE);
}
}
};
} | 3.68 |
morf_Upgrade_buildUpgradePath | /**
* Turn the information gathered so far into an {@code UpgradePath}.
*
* @param connectionResources Database connection resources.
* @param sourceSchema Source schema.
* @param targetSchema Target schema.
* @param upgradeStatements Upgrade statements identified.
* @param viewChanges Changes needed to the views.
* @param upgradesToApply Upgrade steps identified.
* @param graphBasedUpgradeBuilder Builder for the Graph Based Upgrade
* @param upgradeAuditCount Number of already applied upgrade steps
* @return An upgrade path.
*/
private UpgradePath buildUpgradePath(
ConnectionResources connectionResources, Schema sourceSchema, Schema targetSchema,
List<String> upgradeStatements, ViewChanges viewChanges,
List<UpgradeStep> upgradesToApply,
GraphBasedUpgradeBuilder graphBasedUpgradeBuilder,
long upgradeAuditCount) {
List<String> pathValidationSql = databaseUpgradePathValidationService.getPathValidationSql(upgradeAuditCount);
UpgradePath path = factory.create(upgradesToApply, connectionResources, graphBasedUpgradeBuilder, pathValidationSql);
path.writeSql(UpgradeHelper.preSchemaUpgrade(new UpgradeSchemas(sourceSchema, targetSchema), viewChanges, viewChangesDeploymentHelper));
path.writeSql(upgradeStatements);
path.writeSql(UpgradeHelper.postSchemaUpgrade(new UpgradeSchemas(sourceSchema, targetSchema), viewChanges, viewChangesDeploymentHelper));
// Since Oracle is not able to re-map schema references in trigger code, we need to rebuild all triggers
// for id column autonumbering when exporting and importing data between environments.
// We will drop-and-recreate triggers whenever there are upgrade steps to execute. Ideally we'd want to do
// this step once, however there's no easy way to do that with our upgrade framework.
if (!upgradesToApply.isEmpty()) {
AtomicBoolean first = new AtomicBoolean(true);
targetSchema.tables().stream()
.map(t -> connectionResources.sqlDialect().rebuildTriggers(t))
.filter(sql -> !sql.isEmpty())
.peek(sql -> {
if (first.compareAndSet(true, false)) {
path.writeSql(ImmutableList.of(
connectionResources.sqlDialect().convertCommentToSQL("Upgrades executed. Rebuilding all triggers to account for potential changes to autonumbered columns")
));
}
})
.forEach(path::writeSql);
}
return path;
} | 3.68 |
flink_AbstractOrcColumnVector_createHiveVectorFromConstant | /**
* Create a orc vector from partition spec value. See hive {@code
* VectorizedRowBatchCtx#addPartitionColsToBatch}.
*/
private static ColumnVector createHiveVectorFromConstant(
LogicalType type, Object value, int batchSize) {
switch (type.getTypeRoot()) {
case CHAR:
case VARCHAR:
case BINARY:
case VARBINARY:
return createBytesVector(batchSize, value);
case BOOLEAN:
return createLongVector(batchSize, (Boolean) value ? 1 : 0);
case TINYINT:
case SMALLINT:
case INTEGER:
case BIGINT:
return createLongVector(batchSize, value);
case DECIMAL:
DecimalType decimalType = (DecimalType) type;
return createDecimalVector(
batchSize, decimalType.getPrecision(), decimalType.getScale(), value);
case FLOAT:
case DOUBLE:
return createDoubleVector(batchSize, value);
case DATE:
if (value instanceof LocalDate) {
value = Date.valueOf((LocalDate) value);
}
return createLongVector(batchSize, toInternal((Date) value));
case TIMESTAMP_WITHOUT_TIME_ZONE:
return TimestampUtil.createVectorFromConstant(batchSize, value);
default:
throw new UnsupportedOperationException("Unsupported type: " + type);
}
} | 3.68 |
hbase_ChunkCreator_clearChunksInPool | /*
* Only used in testing
*/
void clearChunksInPool() {
if (dataChunksPool != null) {
dataChunksPool.reclaimedChunks.clear();
}
if (indexChunksPool != null) {
indexChunksPool.reclaimedChunks.clear();
}
} | 3.68 |
rocketmq-connect_WorkerSinkTask_resumeAll | // resume all consumer topic queue
private void resumeAll() {
for (MessageQueue queue : messageQueues) {
if (!sinkTaskContext.getPausedQueues().contains(queue)) {
consumer.resume(singleton(queue));
}
}
} | 3.68 |
flink_BinaryStringDataUtil_concat | /**
* Concatenates input strings together into a single string. Returns NULL if any argument is
* NULL.
*/
public static BinaryStringData concat(BinaryStringData... inputs) {
return concat(Arrays.asList(inputs));
} | 3.68 |
hadoop_ClientId_toString | /**
* @return Convert a clientId byte[] to string.
* @param clientId input clientId.
*/
public static String toString(byte[] clientId) {
// clientId can be null or an empty array
if (clientId == null || clientId.length == 0) {
return "";
}
// otherwise should be 16 bytes
Preconditions.checkArgument(clientId.length == BYTE_LENGTH);
long msb = getMsb(clientId);
long lsb = getLsb(clientId);
return (new UUID(msb, lsb)).toString();
} | 3.68 |
hbase_MultiByteBuff_rewind | /**
* Rewinds this MBB and the position is set to 0
* @return this object
*/
@Override
public MultiByteBuff rewind() {
checkRefCount();
for (int i = 0; i < this.items.length; i++) {
this.items[i].rewind();
}
this.curItemIndex = 0;
this.curItem = this.items[this.curItemIndex];
this.markedItemIndex = -1;
return this;
} | 3.68 |
pulsar_InetAddressUtils_isIPv6HexCompressedAddress | /**
* Checks whether the parameter is a valid compressed IPv6 address.
*
* @param input the address string to check for validity
* @return true if the input parameter is a valid compressed IPv6 address
*/
public static boolean isIPv6HexCompressedAddress(final String input) {
int colonCount = 0;
for (int i = 0; i < input.length(); i++) {
if (input.charAt(i) == COLON_CHAR) {
colonCount++;
}
}
return colonCount <= MAX_COLON_COUNT && IPV6_HEX_COMPRESSED_PATTERN.matcher(input).matches();
} | 3.68 |
hbase_MasterWalManager_getServerNamesFromWALDirPath | /** Returns listing of ServerNames found by parsing WAL directory paths in FS. */
public Set<ServerName> getServerNamesFromWALDirPath(final PathFilter filter) throws IOException {
FileStatus[] walDirForServerNames = getWALDirPaths(filter);
return Stream.of(walDirForServerNames).map(s -> {
ServerName serverName = AbstractFSWALProvider.getServerNameFromWALDirectoryName(s.getPath());
if (serverName == null) {
LOG.warn("Log folder {} doesn't look like its name includes a "
+ "region server name; leaving in place. If you see later errors about missing "
+ "write ahead logs they may be saved in this location.", s.getPath());
return null;
}
return serverName;
}).filter(s -> s != null).collect(Collectors.toSet());
} | 3.68 |
morf_JdbcUrlElements_equals | /**
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
JdbcUrlElements other = (JdbcUrlElements) obj;
if (databaseType == null) {
if (other.databaseType != null) return false;
} else if (!databaseType.equals(other.databaseType)) return false;
if (databaseName == null) {
if (other.databaseName != null) return false;
} else if (!databaseName.equals(other.databaseName)) return false;
if (hostName == null) {
if (other.hostName != null) return false;
} else if (!hostName.equals(other.hostName)) return false;
if (instanceName == null) {
if (other.instanceName != null) return false;
} else if (!instanceName.equals(other.instanceName)) return false;
if (port != other.port) return false;
if (schemaName == null) {
if (other.schemaName != null) return false;
} else if (!schemaName.equals(other.schemaName)) return false;
return true;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.