name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_AbstractSelect_sanitizeSelection | /**
* Checks that the current selection is valid, i.e. the selected item ids
* exist in the container. Updates the selection if one or several selected
* item ids are no longer available in the container.
*/
@SuppressWarnings("unchecked")
public void sanitizeSelection() {
Object value = getValue();
if (value == null) {
return;
}
boolean changed = false;
if (isMultiSelect()) {
Collection<Object> valueAsCollection = (Collection<Object>) value;
List<Object> newSelection = new ArrayList<Object>(
valueAsCollection.size());
for (Object subValue : valueAsCollection) {
if (containsId(subValue)) {
newSelection.add(subValue);
} else {
changed = true;
}
}
if (changed) {
setValue(newSelection);
}
} else {
if (!containsId(value)) {
setValue(null);
}
}
} | 3.68 |
flink_TypeInference_inputTypeStrategy | /**
* Sets the strategy for inferring and validating input arguments in a function call.
*
* <p>A {@link InputTypeStrategies#WILDCARD} strategy function is assumed by default.
*/
public Builder inputTypeStrategy(InputTypeStrategy inputTypeStrategy) {
this.inputTypeStrategy =
Preconditions.checkNotNull(
inputTypeStrategy, "Input type strategy must not be null.");
return this;
} | 3.68 |
hadoop_TypedBytesOutput_writeList | /**
* Writes a list as a typed bytes sequence.
*
* @param list the list to be written
* @throws IOException
*/
public void writeList(List list) throws IOException {
writeListHeader();
for (Object obj : list) {
write(obj);
}
writeListFooter();
} | 3.68 |
hbase_HRegion_doSyncOfUnflushedWALChanges | /**
* Sync unflushed WAL changes. See HBASE-8208 for details
*/
private static void doSyncOfUnflushedWALChanges(final WAL wal, final RegionInfo hri)
throws IOException {
if (wal == null) {
return;
}
try {
wal.sync(); // ensure that flush marker is sync'ed
} catch (IOException ioe) {
wal.abortCacheFlush(hri.getEncodedNameAsBytes());
throw ioe;
}
} | 3.68 |
hudi_HoodieInputFormatUtils_groupFileStatusForSnapshotPaths | /**
* Takes in a list of filesStatus and a list of table metadata. Groups the files status list
* based on given table metadata.
*
* @param fileStatuses
* @param fileExtension
* @param metaClientList
* @return
* @throws IOException
*/
public static Map<HoodieTableMetaClient, List<FileStatus>> groupFileStatusForSnapshotPaths(
FileStatus[] fileStatuses, String fileExtension, Collection<HoodieTableMetaClient> metaClientList) {
// This assumes the paths for different tables are grouped together
Map<HoodieTableMetaClient, List<FileStatus>> grouped = new HashMap<>();
HoodieTableMetaClient metadata = null;
for (FileStatus status : fileStatuses) {
Path inputPath = status.getPath();
if (!inputPath.getName().endsWith(fileExtension)) {
//FIXME(vc): skip non data files for now. This wont be needed once log file name start
// with "."
continue;
}
if ((metadata == null) || (!inputPath.toString().contains(metadata.getBasePath()))) {
for (HoodieTableMetaClient metaClient : metaClientList) {
if (inputPath.toString().contains(metaClient.getBasePath())) {
metadata = metaClient;
if (!grouped.containsKey(metadata)) {
grouped.put(metadata, new ArrayList<>());
}
break;
}
}
}
grouped.get(metadata).add(status);
}
return grouped;
} | 3.68 |
hibernate-validator_ResourceBundleMessageInterpolator_buildExpressionFactory | /**
* The jakarta.el FactoryFinder uses the TCCL to load the {@link ExpressionFactory} implementation so we need to be
* extra careful when initializing it.
*
* @return the {@link ExpressionFactory}
*/
private static ExpressionFactory buildExpressionFactory() {
// First, we try to load the instance from the original TCCL.
if ( canLoadExpressionFactory() ) {
ExpressionFactory expressionFactory = ELManager.getExpressionFactory();
LOG.debug( "Loaded expression factory via original TCCL" );
return expressionFactory;
}
final ClassLoader originalContextClassLoader = run( GetClassLoader.fromContext() );
try {
// Then we try the Hibernate Validator class loader. In a fully-functional modular environment such as
// WildFly or Jigsaw, it is the way to go.
run( SetContextClassLoader.action( ResourceBundleMessageInterpolator.class.getClassLoader() ) );
if ( canLoadExpressionFactory() ) {
ExpressionFactory expressionFactory = ELManager.getExpressionFactory();
LOG.debug( "Loaded expression factory via HV classloader" );
return expressionFactory;
}
// We try the CL of the EL module itself; the EL RI uses the TCCL to load the implementation from
// its own module, so this should work.
run( SetContextClassLoader.action( ELManager.class.getClassLoader() ) );
if ( canLoadExpressionFactory() ) {
ExpressionFactory expressionFactory = ELManager.getExpressionFactory();
LOG.debug( "Loaded expression factory via EL classloader" );
return expressionFactory;
}
// Finally we try the CL of the EL implementation itself. This is necessary for OSGi now that the
// implementation is separated from the API.
run( SetContextClassLoader.action( ExpressionFactoryImpl.class.getClassLoader() ) );
if ( canLoadExpressionFactory() ) {
ExpressionFactory expressionFactory = ELManager.getExpressionFactory();
LOG.debug( "Loaded expression factory via com.sun.el classloader" );
return expressionFactory;
}
}
catch (Throwable e) {
throw LOG.getUnableToInitializeELExpressionFactoryException( e );
}
finally {
run( SetContextClassLoader.action( originalContextClassLoader ) );
}
// HV-793 - We fail eagerly in case we have no EL dependencies on the classpath
throw LOG.getUnableToInitializeELExpressionFactoryException( null );
} | 3.68 |
hadoop_VersionInfoMojo_getSCMUri | /**
* Parses SCM output and returns URI of SCM.
*
* @param scm SCM in use for this build
* @return String URI of SCM
*/
private String getSCMUri(SCM scm) {
String uri = "Unknown";
switch (scm) {
case GIT:
for (String s : scmOut) {
if (s.startsWith("origin") && s.endsWith("(fetch)")) {
uri = s.substring("origin".length());
uri = uri.substring(0, uri.length() - "(fetch)".length());
break;
}
}
break;
}
return uri.trim();
} | 3.68 |
dubbo_RpcServiceContext_getRemoteHostName | /**
* get remote host name.
*
* @return remote host name
*/
@Override
public String getRemoteHostName() {
return remoteAddress == null ? null : remoteAddress.getHostName();
} | 3.68 |
framework_AbstractOrderedLayoutWithCaptions_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 13741;
} | 3.68 |
graphhopper_VectorTile_addLayers | /**
* <code>repeated .vector_tile.Tile.Layer layers = 3;</code>
*/
public Builder addLayers(
int index, vector_tile.VectorTile.Tile.Layer.Builder builderForValue) {
if (layersBuilder_ == null) {
ensureLayersIsMutable();
layers_.add(index, builderForValue.build());
onChanged();
} else {
layersBuilder_.addMessage(index, builderForValue.build());
}
return this;
} | 3.68 |
hbase_MultiTableInputFormatBase_includeRegionInSplit | /**
* Test if the given region is to be included in the InputSplit while splitting the regions of a
* table.
* <p>
* This optimization is effective when there is a specific reasoning to exclude an entire region
* from the M-R job, (and hence, not contributing to the InputSplit), given the start and end keys
* of the same. <br>
* Useful when we need to remember the last-processed top record and revisit the [last, current)
* interval for M-R processing, continuously. In addition to reducing InputSplits, reduces the
* load on the region server as well, due to the ordering of the keys. <br>
* <br>
* Note: It is possible that <code>endKey.length() == 0 </code> , for the last (recent) region.
* <br>
* Override this method, if you want to bulk exclude regions altogether from M-R. By default, no
* region is excluded( i.e. all regions are included).
* @param startKey Start key of the region
* @param endKey End key of the region
* @return true, if this region needs to be included as part of the input (default).
*/
protected boolean includeRegionInSplit(final byte[] startKey, final byte[] endKey) {
return true;
} | 3.68 |
hadoop_StagingCommitter_preCommitJob | /**
* Pre-commit actions for a job.
* Loads all the pending files to verify they can be loaded
* and parsed.
* @param commitContext commit context
* @param pending pending commits
* @throws IOException any failure
*/
@Override
public void preCommitJob(
CommitContext commitContext,
final ActiveCommit pending) throws IOException {
// see if the files can be loaded.
precommitCheckPendingFiles(commitContext, pending);
} | 3.68 |
hbase_ReplicationPeers_getAllPeerIds | /**
* Returns the set of peerIds of the clusters that have been connected and have an underlying
* ReplicationPeer.
* @return a Set of Strings for peerIds
*/
public Set<String> getAllPeerIds() {
return Collections.unmodifiableSet(peerCache.keySet());
} | 3.68 |
hbase_User_getCurrent | /** Returns the {@code User} instance within current execution context. */
public static User getCurrent() throws IOException {
User user = new SecureHadoopUser();
if (user.getUGI() == null) {
return null;
}
return user;
} | 3.68 |
hbase_ChaosAgent_register | /**
* registration of ChaosAgent by checking and creating necessary ZNodes.
*/
private void register() {
createIfZNodeNotExists(ChaosConstants.CHAOS_TEST_ROOT_ZNODE);
createIfZNodeNotExists(ChaosConstants.CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE);
createIfZNodeNotExists(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE);
createIfZNodeNotExists(ChaosConstants.CHAOS_AGENT_STATUS_PERSISTENT_ZNODE
+ ChaosConstants.ZNODE_PATH_SEPARATOR + agentName);
createEphemeralZNode(ChaosConstants.CHAOS_AGENT_REGISTRATION_EPIMERAL_ZNODE
+ ChaosConstants.ZNODE_PATH_SEPARATOR + agentName, new byte[0]);
} | 3.68 |
flink_SortUtil_putDecimalNormalizedKey | /** Just support the compact precision decimal. */
public static void putDecimalNormalizedKey(
DecimalData record, MemorySegment target, int offset, int len) {
assert record.isCompact();
putLongNormalizedKey(record.toUnscaledLong(), target, offset, len);
} | 3.68 |
framework_VCalendarPanel_onKeyPress | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.KeyPressHandler#onKeyPress(com.google
* .gwt.event.dom.client.KeyPressEvent)
*/
@Override
public void onKeyPress(KeyPressEvent event) {
handleKeyPress(event);
} | 3.68 |
hadoop_TimelineEntity_getDomainId | /**
* Get the ID of the domain that the entity is to be put
*
* @return the domain ID
*/
@XmlElement(name = "domain")
public String getDomainId() {
return domainId;
} | 3.68 |
flink_HadoopDataInputStream_forceSeek | /**
* Positions the stream to the given location. In contrast to {@link #seek(long)}, this method
* will always issue a "seek" command to the dfs and may not replace it by {@link #skip(long)}
* for small seeks.
*
* <p>Notice that the underlying DFS implementation can still decide to do skip instead of seek.
*
* @param seekPos the position to seek to.
* @throws IOException
*/
public void forceSeek(long seekPos) throws IOException {
fsDataInputStream.seek(seekPos);
} | 3.68 |
querydsl_PathBuilderFactory_create | /**
* Create a new PathBuilder instance for the given type
*
* @param type type of expression
* @return new PathBuilder instance
*/
@SuppressWarnings("unchecked")
public <T> PathBuilder<T> create(Class<T> type) {
PathBuilder<T> rv = (PathBuilder<T>) paths.get(type);
if (rv == null) {
rv = new PathBuilder<T>(type, variableName(type));
paths.put(type, rv);
}
return rv;
} | 3.68 |
flink_MemorySegment_getShortLittleEndian | /**
* Reads a short integer value (16 bit, 2 bytes) from the given position, in little-endian byte
* order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #getShort(int)}. For most cases (such as transient storage in memory or
* serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #getShort(int)} is
* the preferable choice.
*
* @param index The position from which the value will be read.
* @return The short value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public short getShortLittleEndian(int index) {
if (LITTLE_ENDIAN) {
return getShort(index);
} else {
return Short.reverseBytes(getShort(index));
}
} | 3.68 |
hbase_Encryption_getCipher | /**
* Get an cipher given a name
* @param name the cipher name
* @return the cipher, or null if a suitable one could not be found
*/
public static Cipher getCipher(Configuration conf, String name) {
return getCipherProvider(conf).getCipher(name);
} | 3.68 |
hbase_TagCompressionContext_uncompressTags | /**
* Uncompress tags from the InputStream and writes to the destination buffer.
* @param src Stream where the compressed tags are available
* @param dest Destination buffer where to write the uncompressed tags
* @param length Length of all tag bytes
* @throws IOException when the dictionary does not have the entry
*/
public void uncompressTags(InputStream src, ByteBuffer dest, int length) throws IOException {
if (dest.hasArray()) {
uncompressTags(src, dest.array(), dest.arrayOffset() + dest.position(), length);
} else {
byte[] tagBuf = new byte[length];
uncompressTags(src, tagBuf, 0, length);
dest.put(tagBuf);
}
} | 3.68 |
framework_Navigator_init | /**
* Initializes a navigator created with the no arguments constructor.
* <p>
* When a custom navigation state manager is not needed, use null to create
* a default one based on URI fragments.
* <p>
* Navigation is automatically initiated after {@code UI.init()} if a
* navigator was created. If at a later point changes are made to the
* navigator, {@code navigator.navigateTo(navigator.getState())} may need to
* be explicitly called to ensure the current view matches the navigation
* state.
*
* @since 7.6
* @param ui
* The UI to which this Navigator is attached.
* @param stateManager
* The NavigationStateManager keeping track of the active view
* and enabling bookmarking and direct navigation or null for
* default
* @param display
* The ViewDisplay used to display the views handled by this
* navigator
*/
protected void init(UI ui, NavigationStateManager stateManager,
ViewDisplay display) {
this.ui = ui;
this.ui.setNavigator(this);
if (stateManager == null) {
stateManager = createNavigationStateManager(ui);
}
if (stateManager != null && this.stateManager != null
&& stateManager != this.stateManager) {
this.stateManager.setNavigator(null);
}
this.stateManager = stateManager;
this.stateManager.setNavigator(this);
this.display = display;
} | 3.68 |
hibernate-validator_INNValidator_checkChecksumPersonalINN | /**
* Check the digits for personal INN using algorithm from
* <a href="https://ru.wikipedia.org/wiki/%D0%98%D0%B4%D0%B5%D0%BD%D1%82%D0%B8%D1%84%D0%B8%D0%BA%D0%B0%D1%86%D0%B8%D0%BE%D0%BD%D0%BD%D1%8B%D0%B9_%D0%BD%D0%BE%D0%BC%D0%B5%D1%80_%D0%BD%D0%B0%D0%BB%D0%BE%D0%B3%D0%BE%D0%BF%D0%BB%D0%B0%D1%82%D0%B5%D0%BB%D1%8C%D1%89%D0%B8%D0%BA%D0%B0#%D0%92%D1%8B%D1%87%D0%B8%D1%81%D0%BB%D0%B5%D0%BD%D0%B8%D0%B5_%D0%BA%D0%BE%D0%BD%D1%82%D1%80%D0%BE%D0%BB%D1%8C%D0%BD%D1%8B%D1%85_%D1%86%D0%B8%D1%84%D1%80">Wikipedia</a>.
*/
private static boolean checkChecksumPersonalINN(int[] digits) {
final int checkSum11 = getCheckSum( digits, INDIVIDUAL_WEIGHTS_11 );
final int checkSum12 = getCheckSum( digits, INDIVIDUAL_WEIGHTS_12 );
final boolean isCheckSum11Correct = checkSum11 == digits[digits.length - 2];
final boolean isCheckSum12Correct = checkSum12 == digits[digits.length - 1];
return isCheckSum11Correct && isCheckSum12Correct;
} | 3.68 |
hbase_SkipFilter_parseFrom | /**
* Parse a serialized representation of {@link SkipFilter}
* @param pbBytes A pb serialized {@link SkipFilter} instance
* @return An instance of {@link SkipFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static SkipFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.SkipFilter proto;
try {
proto = FilterProtos.SkipFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
try {
return new SkipFilter(ProtobufUtil.toFilter(proto.getFilter()));
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
} | 3.68 |
framework_ScrollbarBundle_getScrollSize | /**
* Gets the amount of pixels the scrollbar needs to be able to scroll
* through.
*
* @return the number of pixels the scrollbar should be able to scroll
* through
*/
public double getScrollSize() {
return parseCssDimensionToPixels(internalGetScrollSize());
} | 3.68 |
flink_ResourceManagerPartitionTrackerImpl_areAllMapsEmpty | /**
* Returns whether all maps are empty; used for checking for resource leaks in case entries
* aren't properly removed.
*
* @return whether all contained maps are empty
*/
@VisibleForTesting
boolean areAllMapsEmpty() {
return taskExecutorToDataSets.isEmpty()
&& dataSetToTaskExecutors.isEmpty()
&& dataSetMetaInfo.isEmpty()
&& partitionReleaseCompletionFutures.isEmpty();
} | 3.68 |
flink_JarHandlerUtils_getProgramArgs | /** Parse program arguments in jar run or plan request. */
private static <R extends JarRequestBody, M extends MessageParameters>
List<String> getProgramArgs(HandlerRequest<R> request, Logger log)
throws RestHandlerException {
JarRequestBody requestBody = request.getRequestBody();
@SuppressWarnings("deprecation")
List<String> programArgs =
tokenizeArguments(
fromRequestBodyOrQueryParameter(
emptyToNull(requestBody.getProgramArguments()),
() -> getQueryParameter(request, ProgramArgsQueryParameter.class),
null,
log));
List<String> programArgsList =
fromRequestBodyOrQueryParameter(
requestBody.getProgramArgumentsList(),
() -> request.getQueryParameter(ProgramArgQueryParameter.class),
null,
log);
if (!programArgsList.isEmpty()) {
if (!programArgs.isEmpty()) {
throw new RestHandlerException(
"Confusing request: programArgs and programArgsList are specified, please, use only programArgsList",
HttpResponseStatus.BAD_REQUEST);
}
return programArgsList;
} else {
return programArgs;
}
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_getFileSystemInstanceId | /**
* The unique identifier for this file system in the metrics.
* @return The unique identifier.
*/
public UUID getFileSystemInstanceId() {
return fileSystemInstanceId;
} | 3.68 |
flink_TaskExecutorRegistrationSuccess_getResourceManagerId | /** Gets the unique ID that identifies the ResourceManager. */
public ResourceID getResourceManagerId() {
return resourceManagerResourceId;
} | 3.68 |
pulsar_PrometheusMetricStreams_releaseAll | /**
* Release all the streams to clean up resources.
*/
void releaseAll() {
metricStreamMap.values().forEach(s -> s.getBuffer().release());
metricStreamMap.clear();
} | 3.68 |
framework_VListSelect_setReadOnly | /**
* Sets this select as read only, meaning selection cannot be changed.
*
* @param readOnly
* {@code true} for read only, {@code false} for not read only
*/
public void setReadOnly(boolean readOnly) {
if (this.readOnly != readOnly) {
this.readOnly = readOnly;
updateEnabledState();
}
} | 3.68 |
hbase_CellUtil_parseColumn | /**
* Splits a column in {@code family:qualifier} form into separate byte arrays. An empty qualifier
* (ie, {@code fam:}) is parsed as <code>{ fam, EMPTY_BYTE_ARRAY }</code> while no delimiter (ie,
* {@code fam}) is parsed as an array of one element, <code>{ fam }</code>.
* <p>
* Don't forget, HBase DOES support empty qualifiers. (see HBASE-9549)
* </p>
* <p>
* Not recommend to be used as this is old-style API.
* </p>
* @param c The column.
* @return The parsed column.
*/
public static byte[][] parseColumn(byte[] c) {
final int index = getDelimiter(c, 0, c.length, COLUMN_FAMILY_DELIMITER);
if (index == -1) {
// If no delimiter, return array of size 1
return new byte[][] { c };
} else if (index == c.length - 1) {
// family with empty qualifier, return array size 2
byte[] family = new byte[c.length - 1];
System.arraycopy(c, 0, family, 0, family.length);
return new byte[][] { family, HConstants.EMPTY_BYTE_ARRAY };
}
// Family and column, return array size 2
final byte[][] result = new byte[2][];
result[0] = new byte[index];
System.arraycopy(c, 0, result[0], 0, index);
final int len = c.length - (index + 1);
result[1] = new byte[len];
System.arraycopy(c, index + 1 /* Skip delimiter */, result[1], 0, len);
return result;
} | 3.68 |
pulsar_KeyValueSchemaImpl_getKeySchema | /**
* Get the Schema of the Key.
* @return the Schema of the Key
*/
@Override
public Schema<K> getKeySchema() {
return keySchema;
} | 3.68 |
hadoop_OBSLoginHelper_hasLogin | /**
* Predicate to verify login details are defined.
*
* @return true if the username is defined (not null, not empty).
*/
public boolean hasLogin() {
return StringUtils.isNotEmpty(user);
} | 3.68 |
hbase_MasterWalManager_getLiveServersFromWALDir | /**
* Get Servernames that COULD BE 'alive'; excludes those that have a '-splitting' suffix as these
* are already being split -- they cannot be 'alive'.
*/
public Set<ServerName> getLiveServersFromWALDir() throws IOException {
return getServerNamesFromWALDirPath(
p -> !p.getName().endsWith(AbstractFSWALProvider.SPLITTING_EXT));
} | 3.68 |
streampipes_DataStreamApi_create | /**
* Directly install a new data stream
*
* @param stream The data stream to add
*/
@Override
public void create(SpDataStream stream) {
post(StreamPipesApiPath.fromBaseApiPath().addToPath("streams"), stream);
} | 3.68 |
hadoop_ManifestSuccessData_getJobId | /** @return Job ID, if known. */
public String getJobId() {
return jobId;
} | 3.68 |
flink_JobResultDeserializer_assertNextToken | /** Advances the token and asserts that it matches the required {@link JsonToken}. */
private static void assertNextToken(final JsonParser p, final JsonToken requiredJsonToken)
throws IOException {
final JsonToken jsonToken = p.nextToken();
if (jsonToken != requiredJsonToken) {
throw new JsonMappingException(
p, String.format("Expected token %s (was %s)", requiredJsonToken, jsonToken));
}
} | 3.68 |
hadoop_TFile_lowerBound | /**
* @param key
* input key.
* @return the ID of the first block that contains key >= input key. Or -1
* if no such block exists.
*/
public int lowerBound(RawComparable key) {
if (comparator == null) {
throw new RuntimeException("Cannot search in unsorted TFile");
}
if (firstKey == null) {
return -1; // not found
}
int ret = Utils.lowerBound(index, key, comparator);
if (ret == index.size()) {
return -1;
}
return ret;
} | 3.68 |
hadoop_AbfsOperationMetrics_getOperationsSuccessful | /**
*
* @return no of successful operations.
*/
AtomicLong getOperationsSuccessful() {
return operationsSuccessful;
} | 3.68 |
hadoop_PendingSet_add | /**
* Add a commit.
* @param commit the single commit
*/
public void add(SinglePendingCommit commit) {
commits.add(commit);
// add any statistics.
IOStatisticsSnapshot st = commit.getIOStatistics();
if (st != null) {
iostats.aggregate(st);
st.clear();
}
} | 3.68 |
hadoop_Error_code | /**
**/
public Error code(Integer code) {
this.code = code;
return this;
} | 3.68 |
flink_FileWriterBucket_assembleNewPartPath | /** Constructor a new PartPath and increment the partCounter. */
private Path assembleNewPartPath() {
long currentPartCounter = partCounter++;
return new Path(
bucketPath,
outputFileConfig.getPartPrefix()
+ '-'
+ uniqueId
+ '-'
+ currentPartCounter
+ outputFileConfig.getPartSuffix());
} | 3.68 |
hadoop_AzureNativeFileSystemStore_listRootBlobs | /**
* This private method uses the root directory or the original container to
* list blobs under the directory or container given a specified prefix for
* the directory depending on whether the original file system object was
* constructed with a short- or long-form URI. It also uses the specified flat
* or hierarchical option, listing details options, request options, and
* operation context.
*
* @param aPrefix
* string name representing the prefix of containing blobs.
* @param useFlatBlobListing
* - the list is flat if true, or hierarchical otherwise.
* @param listingDetails
* - determine whether snapshots, metadata, committed/uncommitted
* data
* @param options
* - object specifying additional options for the request. null =
* default options
* @param opContext
* - context of the current operation
* @returns blobItems : iterable collection of blob items.
* @throws URISyntaxException
*
*/
private Iterable<ListBlobItem> listRootBlobs(String aPrefix, boolean useFlatBlobListing,
EnumSet<BlobListingDetails> listingDetails, BlobRequestOptions options,
OperationContext opContext) throws StorageException, URISyntaxException {
CloudBlobDirectoryWrapper directory = this.container.getDirectoryReference(aPrefix);
return directory.listBlobs(
null,
useFlatBlobListing,
listingDetails,
options,
opContext);
} | 3.68 |
flink_FlinkStatement_executeQuery | /**
* Execute a SELECT query.
*
* @param sql an SQL statement to be sent to the database, typically a static SQL <code>SELECT
* </code> statement
* @return the select query result set.
* @throws SQLException the thrown exception
*/
@Override
public ResultSet executeQuery(String sql) throws SQLException {
StatementResult result = executeInternal(sql);
if (!result.isQueryResult()) {
result.close();
throw new SQLException(String.format("Statement[%s] is not a query.", sql));
}
currentResults = new FlinkResultSet(this, result);
hasResults = true;
return currentResults;
} | 3.68 |
hbase_RegionCoprocessorHost_postDelete | /**
* @param delete The Delete object
* @param edit The WALEdit object.
* @exception IOException Exception
*/
public void postDelete(final Delete delete, final WALEdit edit) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postDelete(this, delete, edit);
}
});
} | 3.68 |
open-banking-gateway_FlowableConfig_productionCustomizeListenerAndJsonSerializer | /**
* Customizes flowable so that it can store custom classes (not ones that implement Serializable) as
* JSON as variables in database.
*/
@Bean
EngineConfigurationConfigurer<SpringProcessEngineConfiguration> productionCustomizeListenerAndJsonSerializer(
RequestScopedServicesProvider scopedServicesProvider,
FlowableProperties flowableProperties,
FlowableObjectMapper mapper,
FlowableJobEventListener eventListener
) {
int maxLength = flowableProperties.getSerialization().getMaxLength();
List<String> serializeOnlyPackages = flowableProperties.getSerialization().getSerializeOnlyPackages();
return processConfiguration -> {
processConfiguration.setCustomPreVariableTypes(
new ArrayList<>(
ImmutableList.of(
new JsonCustomSerializer(scopedServicesProvider, mapper.getMapper(), serializeOnlyPackages, maxLength),
new LargeJsonCustomSerializer(scopedServicesProvider, mapper.getMapper(), serializeOnlyPackages, maxLength)
)
)
);
processConfiguration.setEnableEventDispatcher(true);
processConfiguration.setEventListeners(ImmutableList.of(eventListener));
processConfiguration.setAsyncExecutorNumberOfRetries(flowableProperties.getNumberOfRetries());
};
} | 3.68 |
morf_AbstractSqlDialectTest_testResultSetToRecord | /**
* Tests non-null values are returned correctly from resultsets
*
* @throws SQLException If a SQL exception is thrown.
*/
@Test
public void testResultSetToRecord() throws SQLException {
ResultSet resultSet = mock(ResultSet.class);
List<DataType> dataTypes = Arrays.asList(DataType.values());
List<Column> columns = dataTypes
.stream()
.filter(d -> !d.equals(DataType.NULL))
.map(d -> column(d.name() + "Test", d))
.collect(toList());
when(resultSet.getLong(dataTypes.indexOf(DataType.BIG_INTEGER) + 1)).thenReturn(1L);
when(resultSet.getBytes(dataTypes.indexOf(DataType.BLOB) + 1)).thenReturn(BYTE_ARRAY);
when(resultSet.getString(dataTypes.indexOf(DataType.STRING) + 1)).thenReturn("test");
when(resultSet.getBoolean(dataTypes.indexOf(DataType.BOOLEAN) + 1)).thenReturn(true);
when(resultSet.getInt(dataTypes.indexOf(DataType.INTEGER) + 1)).thenReturn(3);
when(resultSet.getBigDecimal(dataTypes.indexOf(DataType.DECIMAL) + 1)).thenReturn(new BigDecimal("1.23"));
when(resultSet.getDate(dataTypes.indexOf(DataType.DATE) + 1)).thenReturn(java.sql.Date.valueOf("2010-07-02"));
Record record = testDialect.resultSetToRecord(resultSet, columns);
assertEquals(1, record.getLong(DataType.BIG_INTEGER.name() + "Test").longValue());
assertEquals("test", record.getString(DataType.STRING.name() + "Test"));
assertEquals(true, record.getBoolean(DataType.BOOLEAN.name() + "Test"));
assertEquals(3, record.getInteger(DataType.INTEGER.name() + "Test").intValue());
assertEquals(1.23D, record.getDouble(DataType.DECIMAL.name() + "Test").doubleValue(), 0.0001);
assertEquals(new BigDecimal("1.23"), record.getBigDecimal(DataType.DECIMAL.name() + "Test"));
assertEquals(new BigDecimal("1.23"), record.getObject(column(DataType.DECIMAL.name() + "Test", DataType.DECIMAL, 13, 2)));
assertEquals(BASE64_ENCODED, record.getString(DataType.BLOB.name() + "Test"));
assertArrayEquals(BYTE_ARRAY, record.getByteArray(DataType.BLOB.name() + "Test"));
assertArrayEquals(BYTE_ARRAY, (byte[])record.getObject(column(DataType.BLOB.name() + "Test", DataType.BLOB)));
assertEquals(java.sql.Date.valueOf("2010-07-02"), record.getDate(DataType.DATE.name() + "Test"));
assertEquals(new LocalDate(2010, 7, 2), record.getLocalDate(DataType.DATE.name() + "Test"));
assertEquals(java.sql.Date.valueOf("2010-07-02"), record.getObject(column(DataType.DATE.name() + "Test", DataType.DATE)));
} | 3.68 |
hadoop_LocatedFileStatus_hashCode | /**
* Returns a hash code value for the object, which is defined as
* the hash code of the path name.
*
* @return a hash code value for the path name.
*/
@Override
public int hashCode() {
return super.hashCode();
} | 3.68 |
flink_Tuple5_toString | /**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4), where the
* individual fields are the value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "("
+ StringUtils.arrayAwareToString(this.f0)
+ ","
+ StringUtils.arrayAwareToString(this.f1)
+ ","
+ StringUtils.arrayAwareToString(this.f2)
+ ","
+ StringUtils.arrayAwareToString(this.f3)
+ ","
+ StringUtils.arrayAwareToString(this.f4)
+ ")";
} | 3.68 |
flink_ParquetRowDataBuilder_createWriterFactory | /**
* Create a parquet {@link BulkWriter.Factory}.
*
* @param rowType row type of parquet table.
* @param conf hadoop configuration.
* @param utcTimestamp Use UTC timezone or local timezone to the conversion between epoch time
* and LocalDateTime. Hive 0.x/1.x/2.x use local timezone. But Hive 3.x use UTC timezone.
*/
public static ParquetWriterFactory<RowData> createWriterFactory(
RowType rowType, Configuration conf, boolean utcTimestamp) {
return new ParquetWriterFactory<>(new FlinkParquetBuilder(rowType, conf, utcTimestamp));
} | 3.68 |
framework_Escalator_addRowVisibilityChangeHandler | /**
* Adds an event handler that gets notified when the range of visible rows
* changes e.g. because of scrolling, row resizing or spacers
* appearing/disappearing.
*
* @param rowVisibilityChangeHandler
* the event handler
* @return a handler registration for the added handler
*/
public HandlerRegistration addRowVisibilityChangeHandler(
RowVisibilityChangeHandler rowVisibilityChangeHandler) {
return addHandler(rowVisibilityChangeHandler,
RowVisibilityChangeEvent.TYPE);
} | 3.68 |
hibernate-validator_ConstraintCheckIssue_warning | /**
* Creates a new ConstraintCheckIssue of warning kind ({@link IssueKind#WARNING}).
*
* @param element The element at which the error occurred.
* @param annotationMirror The annotation that causes the error.
* @param messageKey A key for retrieving an error message template from the bundle
* <p>
* {@code org.hibernate.validator.ap.ValidationProcessorMessages.}
* </p>
* @param messageParameters An array with values to put into the error message template
* using {@link java.text.MessageFormat}. The number of elements must match
* the number of place holders in the message template.
*/
public static ConstraintCheckIssue warning(Element element, AnnotationMirror annotationMirror, String messageKey, Object... messageParameters) {
return new ConstraintCheckIssue( element, annotationMirror, IssueKind.WARNING, messageKey, messageParameters );
} | 3.68 |
hadoop_MountTableProcedure_getMountEntry | /**
* Gets the mount table entry.
* @param mount name of the mount entry.
* @param mountTable the mount table.
* @return corresponding mount entry.
* @throws IOException in case of failure to retrieve mount entry.
*/
public static MountTable getMountEntry(String mount,
MountTableManager mountTable)
throws IOException {
GetMountTableEntriesRequest getRequest =
GetMountTableEntriesRequest.newInstance(mount);
GetMountTableEntriesResponse getResponse =
mountTable.getMountTableEntries(getRequest);
List<MountTable> results = getResponse.getEntries();
MountTable existingEntry = null;
for (MountTable result : results) {
if (mount.equals(result.getSourcePath())) {
existingEntry = result;
break;
}
}
return existingEntry;
} | 3.68 |
flink_RequestedGlobalProperties_filterBySemanticProperties | /**
* Filters these properties by what can be preserved by the given SemanticProperties when
* propagated down to the given input.
*
* @param props The SemanticProperties which define which fields are preserved.
* @param input The index of the operator's input.
* @return The filtered RequestedGlobalProperties
*/
public RequestedGlobalProperties filterBySemanticProperties(
SemanticProperties props, int input) {
// no semantic properties available. All global properties are filtered.
if (props == null) {
throw new NullPointerException("SemanticProperties may not be null.");
}
RequestedGlobalProperties rgProp = new RequestedGlobalProperties();
switch (this.partitioning) {
case FULL_REPLICATION:
case FORCED_REBALANCED:
case CUSTOM_PARTITIONING:
case RANDOM_PARTITIONED:
case ANY_DISTRIBUTION:
// make sure that certain properties are not pushed down
return null;
case HASH_PARTITIONED:
case ANY_PARTITIONING:
FieldSet newFields;
if (this.partitioningFields instanceof FieldList) {
newFields = new FieldList();
} else {
newFields = new FieldSet();
}
for (Integer targetField : this.partitioningFields) {
int sourceField = props.getForwardingSourceField(input, targetField);
if (sourceField >= 0) {
newFields = newFields.addField(sourceField);
} else {
// partial partitionings are not preserved to avoid skewed partitioning
return null;
}
}
rgProp.partitioning = this.partitioning;
rgProp.partitioningFields = newFields;
return rgProp;
case RANGE_PARTITIONED:
// range partitioning
Ordering newOrdering = new Ordering();
for (int i = 0; i < this.ordering.getInvolvedIndexes().size(); i++) {
int value = this.ordering.getInvolvedIndexes().get(i);
int sourceField = props.getForwardingSourceField(input, value);
if (sourceField >= 0) {
newOrdering.appendOrdering(
sourceField, this.ordering.getType(i), this.ordering.getOrder(i));
} else {
return null;
}
}
rgProp.partitioning = this.partitioning;
rgProp.ordering = newOrdering;
rgProp.dataDistribution = this.dataDistribution;
return rgProp;
default:
throw new RuntimeException("Unknown partitioning type encountered.");
}
} | 3.68 |
dubbo_TripleClientCall_onMessage | // stream listener start
@Override
public void onMessage(byte[] message, boolean isReturnTriException) {
if (done) {
LOGGER.warn(
PROTOCOL_STREAM_LISTENER,
"",
"",
"Received message from closed stream,connection=" + connectionClient + " service="
+ requestMetadata.service + " method="
+ requestMetadata.method.getMethodName());
return;
}
try {
final Object unpacked = requestMetadata.packableMethod.parseResponse(message, isReturnTriException);
listener.onMessage(unpacked, message.length);
} catch (Throwable t) {
TriRpcStatus status = TriRpcStatus.INTERNAL
.withDescription("Deserialize response failed")
.withCause(t);
cancelByLocal(status.asException());
listener.onClose(status, null, false);
LOGGER.error(
PROTOCOL_FAILED_RESPONSE,
"",
"",
String.format(
"Failed to deserialize triple response, service=%s, method=%s,connection=%s",
connectionClient, requestMetadata.service, requestMetadata.method.getMethodName()),
t);
}
} | 3.68 |
flink_GlobalConfiguration_isSensitive | /**
* Check whether the key is a hidden key.
*
* @param key the config key
*/
public static boolean isSensitive(String key) {
Preconditions.checkNotNull(key, "key is null");
final String keyInLower = key.toLowerCase();
for (String hideKey : SENSITIVE_KEYS) {
if (keyInLower.length() >= hideKey.length() && keyInLower.contains(hideKey)) {
return true;
}
}
return false;
} | 3.68 |
hadoop_FederationUtil_getJmx | /**
* Get a JMX data from a web endpoint.
*
* @param beanQuery JMX bean.
* @param webAddress Web address of the JMX endpoint.
* @param connectionFactory to open http/https connection.
* @param scheme to use for URL connection.
* @return JSON with the JMX data
*/
public static JSONArray getJmx(String beanQuery, String webAddress,
URLConnectionFactory connectionFactory, String scheme) {
JSONArray ret = null;
BufferedReader reader = null;
try {
String host = webAddress;
int port = -1;
if (webAddress.indexOf(":") > 0) {
String[] webAddressSplit = webAddress.split(":");
host = webAddressSplit[0];
port = Integer.parseInt(webAddressSplit[1]);
}
URL jmxURL = new URL(scheme, host, port, "/jmx?qry=" + beanQuery);
LOG.debug("JMX URL: {}", jmxURL);
// Create a URL connection
URLConnection conn = connectionFactory.openConnection(
jmxURL, UserGroupInformation.isSecurityEnabled());
conn.setConnectTimeout(5 * 1000);
conn.setReadTimeout(5 * 1000);
InputStream in = conn.getInputStream();
InputStreamReader isr = new InputStreamReader(in, "UTF-8");
reader = new BufferedReader(isr);
StringBuilder sb = new StringBuilder();
String line = null;
while ((line = reader.readLine()) != null) {
sb.append(line);
}
String jmxOutput = sb.toString();
// Parse JSON
JSONObject json = new JSONObject(jmxOutput);
ret = json.getJSONArray("beans");
} catch (IOException e) {
LOG.error("Cannot read JMX bean {} from server {}",
beanQuery, webAddress, e);
} catch (JSONException e) {
// We shouldn't need more details if the JSON parsing fails.
LOG.error("Cannot parse JMX output for {} from server {}: {}",
beanQuery, webAddress, e.getMessage());
} catch (Exception e) {
LOG.error("Cannot parse JMX output for {} from server {}",
beanQuery, webAddress, e);
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException e) {
LOG.error("Problem closing {}", webAddress, e);
}
}
}
return ret;
} | 3.68 |
hadoop_ContainerUpdates_getDecreaseRequests | /**
* Returns Container Decrease Requests.
* @return Container Decrease Requests.
*/
public List<UpdateContainerRequest> getDecreaseRequests() {
return decreaseRequests;
} | 3.68 |
framework_RangeValidator_getMaxValue | /**
* Gets the maximum value of the range.
*
* @return the maximum value
*/
public T getMaxValue() {
return maxValue;
} | 3.68 |
hbase_MultipleColumnPrefixFilter_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() {
FilterProtos.MultipleColumnPrefixFilter.Builder builder =
FilterProtos.MultipleColumnPrefixFilter.newBuilder();
for (byte[] element : sortedPrefixes) {
if (element != null) builder.addSortedPrefixes(UnsafeByteOperations.unsafeWrap(element));
}
return builder.build().toByteArray();
} | 3.68 |
hadoop_SimpleNamingService_getNewName | /**
* Generate a new checkpoint Name
* @return the checkpoint name
*/
public String getNewName(){
return "checkpoint_" + name;
} | 3.68 |
framework_AbstractComponent_findAncestor | /**
* Returns the closest ancestor with the given type.
* <p>
* To find the Window that contains the component, use {@code Window w =
* getParent(Window.class);}
* </p>
*
* @param <T>
* The type of the ancestor
* @param parentType
* The ancestor class we are looking for
* @return The first ancestor that can be assigned to the given class. Null
* if no ancestor with the correct type could be found.
*/
public <T extends HasComponents> T findAncestor(Class<T> parentType) {
HasComponents p = getParent();
while (p != null) {
if (parentType.isAssignableFrom(p.getClass())) {
return parentType.cast(p);
}
p = p.getParent();
}
return null;
} | 3.68 |
morf_H2MetaDataProvider_isIgnoredTable | /**
* @see org.alfasoftware.morf.jdbc.DatabaseMetaDataProvider#isIgnoredTable(RealName)
*/
@Override
protected boolean isIgnoredTable(RealName tableName) {
// Ignore temporary tables
return tableName.getDbName().startsWith(H2Dialect.TEMPORARY_TABLE_PREFIX);
} | 3.68 |
hadoop_FSStarvedApps_take | /**
* Blocking call to fetch the next app to process. The returned app is
* tracked until the next call to this method. This tracking assumes a
* single reader.
*
* @return starved application to process
* @throws InterruptedException if interrupted while waiting
*/
FSAppAttempt take() throws InterruptedException {
// Reset appBeingProcessed before the blocking call
appBeingProcessed = null;
// Blocking call to fetch the next starved application
FSAppAttempt app = appsToProcess.take();
appBeingProcessed = app;
return app;
} | 3.68 |
hadoop_YarnRegistryViewForProviders_putService | /**
* Add a service under a path, optionally purging any history.
* @param username user
* @param serviceClass service class to use under ~user
* @param serviceName name of the service
* @param record service record
* @param deleteTreeFirst perform recursive delete of the path first.
* @return the path the service was created at
* @throws IOException
*/
public String putService(String username,
String serviceClass,
String serviceName,
ServiceRecord record,
boolean deleteTreeFirst) throws IOException {
String path = RegistryUtils.servicePath(
username, serviceClass, serviceName);
if (deleteTreeFirst) {
registryOperations.delete(path, true);
}
registryOperations.mknode(RegistryPathUtils.parentOf(path), true);
registryOperations.bind(path, record, BindFlags.OVERWRITE);
return path;
} | 3.68 |
hadoop_TimelineWriteResponse_setErrorCode | /**
* Set the error code to the given error code.
*
* @param code an error code.
*/
public void setErrorCode(int code) {
this.errorCode = code;
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_getNodeInternal | /**
* Find the node containing the given key.
*
* @param key the key.
* @param namespace the namespace.
* @return id of the node. NIL_NODE will be returned if key does no exist.
*/
private S getNodeInternal(K key, N namespace) {
MemorySegment keySegment = getKeySegment(key, namespace);
int keyLen = keySegment.size();
return getNode(keySegment, 0, keyLen);
} | 3.68 |
framework_VAbstractSplitPanel_getSecondWidget | /**
* Returns the widget in the second region, if any.
*
* @return the widget in the second region, or {@code null} if not set
*/
public Widget getSecondWidget() {
return secondChild;
} | 3.68 |
hbase_StealJobQueue_getStealFromQueue | /**
* Get a queue whose job might be stolen by the consumer of this original queue
* @return the queue whose job could be stolen
*/
public BlockingQueue<T> getStealFromQueue() {
return stealFromQueue;
} | 3.68 |
hadoop_YarnAuthorizationProvider_destroy | /**
* Destroy the {@link YarnAuthorizationProvider} instance.
* This method is called only in Tests.
*/
@VisibleForTesting
public static void destroy() {
synchronized (YarnAuthorizationProvider.class) {
if (authorizer != null) {
LOG.debug("{} is destroyed.", authorizer.getClass().getName());
authorizer = null;
}
}
} | 3.68 |
flink_WindowMapState_isEmpty | /**
* Returns true if this state contains no key-value mappings, otherwise false.
*
* @return True if this state contains no key-value mappings, otherwise false.
* @throws Exception Thrown if the system cannot access the state.
*/
public boolean isEmpty(W window) throws Exception {
windowState.setCurrentNamespace(window);
return windowState.isEmpty();
} | 3.68 |
dubbo_DynamicConfiguration_getConfigItem | /**
* get configItem which contains content and stat info.
*
* @param key
* @param group
* @return
*/
default ConfigItem getConfigItem(String key, String group) {
String content = getConfig(key, group);
return new ConfigItem(content, null);
} | 3.68 |
hbase_HBaseTestingUtility_shutdownMiniHBaseCluster | /**
* Shutdown HBase mini cluster.Does not shutdown zk or dfs if running.
* @throws java.io.IOException in case command is unsuccessful
*/
public void shutdownMiniHBaseCluster() throws IOException {
cleanup();
if (this.hbaseCluster != null) {
this.hbaseCluster.shutdown();
// Wait till hbase is down before going on to shutdown zk.
this.hbaseCluster.waitUntilShutDown();
this.hbaseCluster = null;
}
if (zooKeeperWatcher != null) {
zooKeeperWatcher.close();
zooKeeperWatcher = null;
}
} | 3.68 |
hbase_ZKUtil_deleteChildrenRecursivelyMultiOrSequential | /**
* Delete all the children of the specified node but not the node itself. This will first traverse
* the znode tree for listing the children and then delete these znodes using multi-update api or
* sequential based on the specified configurations.
* <p>
* Sets no watches. Throws all exceptions besides dealing with deletion of children.
* <p>
* If the following is true:
* <ul>
* <li>runSequentialOnMultiFailure is true
* </ul>
* on calling multi, we get a ZooKeeper exception that can be handled by a sequential call(*), we
* retry the operations one-by-one (sequentially). - zk reference - if true when we get a
* ZooKeeper exception that could retry the operations one-by-one (sequentially) - path of the
* parent node(s)
* @throws KeeperException.NotEmptyException if node has children while deleting if unexpected
* ZooKeeper exception if an invalid path is specified
*/
public static void deleteChildrenRecursivelyMultiOrSequential(ZKWatcher zkw,
boolean runSequentialOnMultiFailure, String... pathRoots) throws KeeperException {
if (pathRoots == null || pathRoots.length <= 0) {
LOG.warn("Given path is not valid!");
return;
}
List<ZKUtilOp> ops = new ArrayList<>();
for (String eachRoot : pathRoots) {
List<String> children = listChildrenBFSNoWatch(zkw, eachRoot);
// Delete the leaves first and eventually get rid of the root
for (int i = children.size() - 1; i >= 0; --i) {
ops.add(ZKUtilOp.deleteNodeFailSilent(children.get(i)));
}
}
submitBatchedMultiOrSequential(zkw, runSequentialOnMultiFailure, ops);
} | 3.68 |
framework_AbstractBeanContainer_removeAllValueChangeListeners | /**
* Remove this contains as a listener for all the properties in the given
* {@link Item}.
*
* @param item
* The {@link Item} that contains the properties
*/
private void removeAllValueChangeListeners(Item item) {
for (Object propertyId : item.getItemPropertyIds()) {
removeValueChangeListener(item, propertyId);
}
} | 3.68 |
framework_WrappedPortletSession_setAttribute | /**
* Binds an object to this session in the given scope, using the name
* specified. If an object of the same name in this scope is already bound
* to the session, that object is replaced.
*
* <p>
* If the value is <code>null</code>, this has the same effect as calling
* <code>removeAttribute()</code>.
*
*
* @param name
* the name to which the object is bound; this cannot be
* <code>null</code>.
* @param value
* the object to be bound
* @param scope
* session scope of this attribute
*
* @exception java.lang.IllegalStateException
* if this method is called on a session which has been
* invalidated
* @exception java.lang.IllegalArgumentException
* if name is <code>null</code> or scope is unknown to the
* container.
*
* @see PortletSession#setAttribute(String, Object, int)
* @see PortletSession#PORTLET_SCOPE
* @see PortletSession#APPLICATION_SCOPE
*
* @since 7.6
*/
public void setAttribute(String name, Object value, int scope) {
session.setAttribute(name, value, scope);
} | 3.68 |
hbase_User_isHBaseSecurityEnabled | /**
* Returns whether or not secure authentication is enabled for HBase. Note that HBase security
* requires HDFS security to provide any guarantees, so it is recommended that secure HBase should
* run on secure HDFS.
*/
public static boolean isHBaseSecurityEnabled(Configuration conf) {
return "kerberos".equalsIgnoreCase(conf.get(HBASE_SECURITY_CONF_KEY));
} | 3.68 |
flink_HivePartitionUtils_getAllPartitions | /**
* Returns all HiveTablePartitions of a hive table, returns single HiveTablePartition if the
* hive table is not partitioned.
*/
public static List<HiveTablePartition> getAllPartitions(
JobConf jobConf,
String hiveVersion,
ObjectPath tablePath,
List<String> partitionColNames,
List<Map<String, String>> remainingPartitions) {
List<HiveTablePartition> allHivePartitions = new ArrayList<>();
try (HiveMetastoreClientWrapper client =
HiveMetastoreClientFactory.create(HiveConfUtils.create(jobConf), hiveVersion)) {
String dbName = tablePath.getDatabaseName();
String tableName = tablePath.getObjectName();
Table hiveTable = client.getTable(dbName, tableName);
Properties tableProps =
HiveReflectionUtils.getTableMetadata(
HiveShimLoader.loadHiveShim(hiveVersion), hiveTable);
if (partitionColNames != null && partitionColNames.size() > 0) {
List<Partition> partitions = new ArrayList<>();
if (remainingPartitions != null) {
List<String> partitionNames =
getPartitionNames(
remainingPartitions,
partitionColNames,
JobConfUtils.getDefaultPartitionName(jobConf));
partitions.addAll(
client.getPartitionsByNames(dbName, tableName, partitionNames));
} else {
partitions.addAll(client.listPartitions(dbName, tableName, (short) -1));
}
for (Partition partition : partitions) {
HiveTablePartition hiveTablePartition =
toHiveTablePartition(partitionColNames, tableProps, partition);
allHivePartitions.add(hiveTablePartition);
}
} else {
allHivePartitions.add(new HiveTablePartition(hiveTable.getSd(), tableProps));
}
} catch (TException e) {
throw new FlinkHiveException("Failed to collect all partitions from hive metaStore", e);
}
return allHivePartitions;
} | 3.68 |
rocketmq-connect_RebalanceImpl_updateProcessConfigsInRebalance | /**
* Start all the connectors and tasks allocated to current process.
*
* @param allocateResult
*/
private void updateProcessConfigsInRebalance(ConnAndTaskConfigs allocateResult) {
try {
worker.startConnectors(allocateResult.getConnectorConfigs(), connectController);
} catch (Throwable e) {
log.error("RebalanceImpl#updateProcessConfigsInRebalance start connector failed", e);
}
try {
worker.startTasks(allocateResult.getTaskConfigs());
} catch (Throwable e) {
log.error("RebalanceImpl#updateProcessConfigsInRebalance start task failed", e);
}
} | 3.68 |
flink_SerializedCheckpointData_toDeque | /**
* De-serializes an array of SerializedCheckpointData back into an ArrayDeque of element
* checkpoints.
*
* @param data The data to be deserialized.
* @param serializer The serializer used to deserialize the data.
* @param <T> The type of the elements.
* @return An ArrayDeque of element checkpoints.
* @throws IOException Thrown, if the serialization fails.
*/
public static <T> ArrayDeque<Tuple2<Long, Set<T>>> toDeque(
SerializedCheckpointData[] data, TypeSerializer<T> serializer) throws IOException {
ArrayDeque<Tuple2<Long, Set<T>>> deque = new ArrayDeque<>(data.length);
DataInputDeserializer deser = null;
for (SerializedCheckpointData checkpoint : data) {
byte[] serializedData = checkpoint.getSerializedData();
if (deser == null) {
deser = new DataInputDeserializer(serializedData, 0, serializedData.length);
} else {
deser.setBuffer(serializedData);
}
final Set<T> ids = CollectionUtil.newHashSetWithExpectedSize(checkpoint.getNumIds());
final int numIds = checkpoint.getNumIds();
for (int i = 0; i < numIds; i++) {
ids.add(serializer.deserialize(deser));
}
deque.addLast(new Tuple2<Long, Set<T>>(checkpoint.checkpointId, ids));
}
return deque;
} | 3.68 |
framework_LegacyApplication_setTheme | /**
* Sets the application's theme.
* <p>
* The default theme for {@link LegacyApplication} is reindeer, unlike for
* {@link UI} the default theme is valo.
* <p>
* Note that this theme can be overridden for a specific UI with
* {@link VaadinSession#getThemeForUI(UI)}. Setting theme to be
* <code>null</code> selects the default theme. For the available theme
* names, see the contents of the VAADIN/themes directory.
* </p>
*
* @param theme
* the new theme for this application.
*/
public void setTheme(String theme) {
this.theme = theme;
} | 3.68 |
hudi_HoodieCommitMetadata_fetchTotalPartitionsWritten | // Here the functions are named "fetch" instead of "get", to get avoid of the json conversion.
public long fetchTotalPartitionsWritten() {
return partitionToWriteStats.size();
} | 3.68 |
hbase_WALActionsListener_postAppend | /**
* For notification post append to the writer. Used by metrics system at least. TODO: Combine this
* with above.
* @param entryLen approx length of cells in this append.
* @param elapsedTimeMillis elapsed time in milliseconds.
* @param logKey A WAL key
* @param logEdit A WAL edit containing list of cells.
* @throws IOException if any network or I/O error occurred
*/
default void postAppend(final long entryLen, final long elapsedTimeMillis, final WALKey logKey,
final WALEdit logEdit) throws IOException {
} | 3.68 |
flink_Transformation_declareManagedMemoryUseCaseAtSlotScope | /**
* Declares that this transformation contains certain slot scope managed memory use case.
*
* @param managedMemoryUseCase The use case that this transformation declares needing managed
* memory for.
*/
public void declareManagedMemoryUseCaseAtSlotScope(ManagedMemoryUseCase managedMemoryUseCase) {
Preconditions.checkNotNull(managedMemoryUseCase);
Preconditions.checkArgument(managedMemoryUseCase.scope == ManagedMemoryUseCase.Scope.SLOT);
managedMemorySlotScopeUseCases.add(managedMemoryUseCase);
} | 3.68 |
flink_DeclineCheckpoint_getSerializedCheckpointException | /**
* Gets the reason why the checkpoint was declined.
*
* @return The reason why the checkpoint was declined
*/
@Nonnull
public SerializedCheckpointException getSerializedCheckpointException() {
return serializedCheckpointException;
} | 3.68 |
hbase_CompactionConfiguration_getMaxCompactSize | /** Returns upper bound on file size to be included in minor compactions */
public long getMaxCompactSize() {
return maxCompactSize;
} | 3.68 |
flink_YarnApplicationFileUploader_registerMultipleLocalResources | /**
* Recursively uploads (and registers) any (user and system) files in <tt>shipFiles</tt> except
* for files matching "<tt>flink-dist*.jar</tt>" which should be uploaded separately. If it is
* already a remote file, the uploading will be skipped.
*
* @param shipFiles local or remote files to register as Yarn local resources
* @param localResourcesDirectory the directory the localResources are uploaded to
* @param resourceType type of the resource, which can be one of FILE, PATTERN, or ARCHIVE
* @return list of class paths with the proper resource keys from the registration
*/
List<String> registerMultipleLocalResources(
final Collection<Path> shipFiles,
final String localResourcesDirectory,
final LocalResourceType resourceType)
throws IOException {
final List<Path> localPaths = new ArrayList<>();
final List<Path> relativePaths = new ArrayList<>();
for (Path shipFile : shipFiles) {
if (Utils.isRemotePath(shipFile.toString())) {
if (fileSystem.isDirectory(shipFile)) {
final URI parentURI = shipFile.getParent().toUri();
final RemoteIterator<LocatedFileStatus> iterable =
fileSystem.listFiles(shipFile, true);
while (iterable.hasNext()) {
final Path current = iterable.next().getPath();
localPaths.add(current);
relativePaths.add(
new Path(
localResourcesDirectory,
parentURI.relativize(current.toUri()).getPath()));
}
continue;
}
} else {
final File file = new File(shipFile.toUri().getPath());
if (file.isDirectory()) {
final java.nio.file.Path shipPath = file.toPath().toRealPath();
final java.nio.file.Path parentPath = shipPath.getParent();
Collection<java.nio.file.Path> paths =
FileUtils.listFilesInDirectory(shipPath, path -> true);
for (java.nio.file.Path javaPath : paths) {
localPaths.add(new Path(javaPath.toUri()));
relativePaths.add(
new Path(
localResourcesDirectory,
parentPath.relativize(javaPath).toString()));
}
continue;
}
}
localPaths.add(shipFile);
relativePaths.add(new Path(localResourcesDirectory, shipFile.getName()));
}
final Set<String> archives = new HashSet<>();
final Set<String> resources = new HashSet<>();
for (int i = 0; i < localPaths.size(); i++) {
final Path localPath = localPaths.get(i);
final Path relativePath = relativePaths.get(i);
if (!isFlinkDistJar(relativePath.getName())) {
final String key = relativePath.toString();
final YarnLocalResourceDescriptor resourceDescriptor =
registerSingleLocalResource(
key,
localPath,
relativePath.getParent().toString(),
resourceType,
true,
true);
if (!resourceDescriptor.alreadyRegisteredAsLocalResource()) {
if (key.endsWith("jar")) {
archives.add(relativePath.toString());
} else {
resources.add(relativePath.getParent().toString());
}
}
}
}
// construct classpath, we always want resource directories to go first, we also sort
// both resources and archives in order to make classpath deterministic
final ArrayList<String> classPaths = new ArrayList<>();
resources.stream().sorted().forEach(classPaths::add);
archives.stream().sorted().forEach(classPaths::add);
return classPaths;
} | 3.68 |
hbase_RollingStatCalculator_fillWithZeros | /** Returns an array of given size initialized with zeros */
private long[] fillWithZeros(int size) {
long[] zeros = new long[size];
for (int i = 0; i < size; i++) {
zeros[i] = 0L;
}
return zeros;
} | 3.68 |
framework_HierarchicalDataProvider_size | /**
* Get the number of immediate child data items for the parent item returned
* by a given query.
*
* @param query
* given query to request the count for
* @return the count of child data items for the data item
* {@link HierarchicalQuery#getParent()}
*
* @throws IllegalArgumentException
* if the query is not of type HierarchicalQuery
*/
@Override
public default int size(Query<T, F> query) {
if (query instanceof HierarchicalQuery<?, ?>) {
return getChildCount((HierarchicalQuery<T, F>) query);
}
throw new IllegalArgumentException(
"Hierarchical data provider doesn't support non-hierarchical queries");
} | 3.68 |
framework_HierarchyRenderer_getInnerRenderer | /**
* Returns the wrapped renderer.
*
* @return Wrapped renderer.
*/
@SuppressWarnings("rawtypes")
public Renderer getInnerRenderer() {
return innerRenderer;
} | 3.68 |
hbase_TableRegionModel_getStartKey | /** Returns the start key */
@XmlAttribute
public byte[] getStartKey() {
return startKey;
} | 3.68 |
framework_AbstractOrderedLayout_setDefaultComponentAlignment | /*
* (non-Javadoc)
*
* @see
* com.vaadin.ui.Layout.AlignmentHandler#setDefaultComponentAlignment(com
* .vaadin.ui.Alignment)
*/
@Override
public void setDefaultComponentAlignment(Alignment defaultAlignment) {
defaultComponentAlignment = defaultAlignment;
} | 3.68 |
hmily_HmilyResourceManager_get | /**
* Get hmily resource.
*
* @param resourceId the resource id
* @return the hmily resource
*/
public static HmilyTacResource get(final String resourceId) {
return DATASOURCE_CACHE.get(resourceId);
} | 3.68 |
hbase_PrivateCellUtil_deepClone | /**
* Deep clones the given cell if the cell supports deep cloning
* @param cell the cell to be cloned
* @return the cloned cell
*/
public static Cell deepClone(Cell cell) throws CloneNotSupportedException {
if (cell instanceof ExtendedCell) {
return ((ExtendedCell) cell).deepClone();
}
throw new CloneNotSupportedException();
} | 3.68 |
graphhopper_PrepareRoutingSubnetworks_setMinNetworkSize | /**
* All components of the graph with less than 2*{@link #minNetworkSize} directed edges (edge keys) will be marked
* as subnetworks. The biggest component will never be marked as subnetwork, even when it is below this size.
*/
public PrepareRoutingSubnetworks setMinNetworkSize(int minNetworkSize) {
this.minNetworkSize = minNetworkSize;
return this;
} | 3.68 |
hadoop_Configured_setConf | // inherit javadoc
@Override
public void setConf(Configuration conf) {
this.conf = conf;
} | 3.68 |
hadoop_BaseRecord_init | /**
* Initialize the object.
*/
public void init() {
// Call this after the object has been constructed
initDefaultTimes();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.