name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
dubbo_JCacheFactory_createCache | /**
* Takes url as an method argument and return new instance of cache store implemented by JCache.
* @param url url of the method
* @return JCache instance of cache
*/
@Override
protected Cache createCache(URL url) {
return new JCache(url);
} | 3.68 |
framework_Escalator_scrollToColumn | /**
* Scrolls the body horizontally so that the column at the given index is
* visible and there is at least {@code padding} pixels in the direction of
* the given scroll destination.
*
* @param columnIndex
* the index of the column to scroll to
* @param destination
* where the column should be aligned visually after scrolling
* @param padding
* the number pixels to place between the scrolled-to column and
* the viewport edge.
* @throws IndexOutOfBoundsException
* if {@code columnIndex} is not a valid index for an existing
* column
* @throws IllegalArgumentException
* if {@code destination} is {@link ScrollDestination#MIDDLE}
* and padding is nonzero; or if the indicated column is frozen;
* or if {@code destination == null}
*/
public void scrollToColumn(final int columnIndex,
final ScrollDestination destination, final int padding)
throws IndexOutOfBoundsException, IllegalArgumentException {
validateScrollDestination(destination, padding);
verifyValidColumnIndex(columnIndex);
if (columnIndex < columnConfiguration.frozenColumns) {
throw new IllegalArgumentException(
"The given column index " + columnIndex + " is frozen.");
}
scroller.scrollToColumn(columnIndex, destination, padding);
} | 3.68 |
hbase_TableRecordReader_getCurrentKey | /**
* Returns the current key.
* @return The current key.
* @throws InterruptedException When the job is aborted.
* @see org.apache.hadoop.mapreduce.RecordReader#getCurrentKey()
*/
@Override
public ImmutableBytesWritable getCurrentKey() throws IOException, InterruptedException {
return this.recordReaderImpl.getCurrentKey();
} | 3.68 |
flink_ConfigurationUtils_getSystemResourceMetricsProbingInterval | /**
* @return extracted {@link MetricOptions#SYSTEM_RESOURCE_METRICS_PROBING_INTERVAL} or {@code
* Optional.empty()} if {@link MetricOptions#SYSTEM_RESOURCE_METRICS} are disabled.
*/
public static Optional<Time> getSystemResourceMetricsProbingInterval(
Configuration configuration) {
if (!configuration.getBoolean(SYSTEM_RESOURCE_METRICS)) {
return Optional.empty();
} else {
return Optional.of(
Time.milliseconds(
configuration.getLong(SYSTEM_RESOURCE_METRICS_PROBING_INTERVAL)));
}
} | 3.68 |
flink_FlinkPreparingTableBase_getCollationList | /**
* Returns a description of the physical ordering (or orderings) of the rows returned from this
* table.
*
* @see org.apache.calcite.rel.metadata.RelMetadataQuery#collations(RelNode)
*/
public List<RelCollation> getCollationList() {
return ImmutableList.of();
} | 3.68 |
hadoop_FileIoProvider_mkdirsWithExistsCheck | /**
* Create the target directory using {@link File#mkdirs()} only if
* it doesn't exist already.
*
* @param volume target volume. null if unavailable.
* @param dir directory to be created.
* @throws IOException if the directory could not created
*/
public void mkdirsWithExistsCheck(
@Nullable FsVolumeSpi volume, File dir) throws IOException {
final long begin = profilingEventHook.beforeMetadataOp(volume, MKDIRS);
boolean succeeded = false;
try {
faultInjectorEventHook.beforeMetadataOp(volume, MKDIRS);
succeeded = dir.isDirectory() || dir.mkdirs();
profilingEventHook.afterMetadataOp(volume, MKDIRS, begin);
} catch(Exception e) {
onFailure(volume, begin);
throw e;
}
if (!succeeded) {
throw new IOException("Mkdirs failed to create " + dir);
}
} | 3.68 |
hudi_Pair_getKey | /**
* <p>
* Gets the key from this pair.
* </p>
*
* <p>
* This method implements the {@code Map.Entry} interface returning the left element as the key.
* </p>
*
* @return the left element as the key, may be null
*/
@Override
public final L getKey() {
return getLeft();
} | 3.68 |
hadoop_S3LogParser_q | /**
* Quoted entry using the {@link #QUOTED} pattern.
* @param name name of the element (for code clarity only)
* @return the pattern for the regexp
*/
private static String q(String name) {
return e(name, QUOTED);
} | 3.68 |
framework_VDebugWindow_addSection | /**
* Adds the given {@link Section} as a tab in the {@link VDebugWindow} UI.
* {@link Section#getTabButton()} is called to obtain a button which is used
* tab.
*
* @param section
*/
public void addSection(final Section section) {
Button b = section.getTabButton();
b.addClickHandler(event -> {
activateSection(section);
writeStoredState();
});
b.setStylePrimaryName(STYLENAME_TAB);
tabs.add(b);
sections.add(section);
if (activeSection == null) {
activateSection(section);
}
} | 3.68 |
framework_TabSheet_readTabFromDesign | /**
* Reads the given tab element from design
*
* @since 7.4
*
* @param tabElement
* the element to be read
* @param designContext
* the design context
*/
private void readTabFromDesign(Element tabElement,
DesignContext designContext) {
Attributes attr = tabElement.attributes();
if (tabElement.children().size() != 1) {
throw new DesignException(
"A tab must have exactly one child element");
}
// create the component that is in tab content
Element content = tabElement.child(0);
Component child = designContext.readDesign(content);
Tab tab = this.addTab(child);
if (attr.hasKey("visible")) {
tab.setVisible(DesignAttributeHandler.readAttribute("visible", attr,
Boolean.class));
}
if (attr.hasKey("closable")) {
tab.setClosable(DesignAttributeHandler.readAttribute("closable",
attr, Boolean.class));
}
if (attr.hasKey("caption")) {
tab.setCaption(DesignAttributeHandler.readAttribute("caption", attr,
String.class));
}
if (attr.hasKey("enabled")) {
tab.setEnabled(DesignAttributeHandler.readAttribute("enabled", attr,
Boolean.class));
}
if (attr.hasKey("icon")) {
tab.setIcon(DesignAttributeHandler.readAttribute("icon", attr,
Resource.class));
}
if (attr.hasKey("icon-alt")) {
tab.setIconAlternateText(DesignAttributeHandler
.readAttribute("icon-alt", attr, String.class));
}
if (attr.hasKey("description")) {
tab.setDescription(DesignAttributeHandler
.readAttribute("description", attr, String.class));
}
if (attr.hasKey("style-name")) {
tab.setStyleName(DesignAttributeHandler.readAttribute("style-name",
attr, String.class));
}
if (attr.hasKey("id")) {
tab.setId(DesignAttributeHandler.readAttribute("id", attr,
String.class));
}
if (attr.hasKey("selected")) {
boolean selected = DesignAttributeHandler.readAttribute("selected",
attr, Boolean.class);
if (selected) {
this.setSelectedTab(tab.getComponent(), false);
}
}
} | 3.68 |
hbase_HRegionFileSystem_openRegionFromFileSystem | /**
* Open Region from file-system.
* @param conf the {@link Configuration} to use
* @param fs {@link FileSystem} from which to add the region
* @param tableDir {@link Path} to where the table is being stored
* @param regionInfo {@link RegionInfo} for region to be added
* @param readOnly True if you don't want to edit the region data
* @throws IOException if the region creation fails due to a FileSystem exception.
*/
public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
final FileSystem fs, final Path tableDir, final RegionInfo regionInfo, boolean readOnly)
throws IOException {
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
Path regionDir = regionFs.getRegionDir();
if (!fs.exists(regionDir)) {
LOG.warn("Trying to open a region that do not exists on disk: " + regionDir);
throw new IOException("The specified region do not exists on disk: " + regionDir);
}
if (!readOnly) {
// Cleanup temporary directories
regionFs.cleanupTempDir();
// If it doesn't exists, Write HRI to a file, in case we need to recover hbase:meta
// Only create HRI if we are the default replica
if (regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
regionFs.checkRegionInfoOnFilesystem();
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping creation of .regioninfo file for " + regionInfo);
}
}
}
return regionFs;
} | 3.68 |
hbase_FutureUtils_rethrow | /**
* If we could propagate the given {@code error} directly, we will fill the stack trace with the
* current thread's stack trace so it is easier to trace where is the exception thrown. If not, we
* will just create a new IOException and then throw it.
*/
public static IOException rethrow(Throwable error) throws IOException {
if (error instanceof IOException) {
setStackTrace(error);
throw (IOException) error;
} else if (error instanceof RuntimeException) {
setStackTrace(error);
throw (RuntimeException) error;
} else if (error instanceof Error) {
setStackTrace(error);
throw (Error) error;
} else {
throw new IOException(error);
}
} | 3.68 |
querydsl_SQLExpressions_covarSamp | /**
* CORR returns the coefficient of correlation of a set of number pairs.
*
* @param expr1 first arg
* @param expr2 second arg
* @return corr(expr1, expr2)
*/
public static WindowOver<Double> covarSamp(Expression<? extends Number> expr1, Expression<? extends Number> expr2) {
return new WindowOver<Double>(Double.class, SQLOps.COVARSAMP, expr1, expr2);
} | 3.68 |
framework_PasswordField_readDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractField#readDesign(org.jsoup.nodes.Element ,
* com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void readDesign(Element design, DesignContext designContext) {
super.readDesign(design, designContext);
Attributes attr = design.attributes();
if (attr.hasKey("value")) {
setValue(DesignAttributeHandler.readAttribute("value", attr,
String.class), false, true);
}
} | 3.68 |
hbase_RegionCoprocessorHost_postCompactSelection | /**
* Called after the {@link HStoreFile}s to be compacted have been selected from the available
* candidates.
* @param store The store where compaction is being requested
* @param selected The store files selected to compact
* @param tracker used to track the life cycle of a compaction
* @param request the compaction request
* @param user the user
*/
public void postCompactSelection(final HStore store, final List<HStoreFile> selected,
final CompactionLifeCycleTracker tracker, final CompactionRequest request, final User user)
throws IOException {
if (coprocEnvironments.isEmpty()) {
return;
}
execOperation(new RegionObserverOperationWithoutResult(user) {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postCompactSelection(this, store, selected, tracker, request);
}
});
} | 3.68 |
druid_MySqlStatementParser_parseCase | /**
* parse case statement
*
* @return MySqlCaseStatement
*/
public MySqlCaseStatement parseCase() {
MySqlCaseStatement stmt = new MySqlCaseStatement();
accept(Token.CASE);
if (lexer.token() == Token.WHEN)// grammar 1
{
while (lexer.token() == Token.WHEN) {
MySqlWhenStatement when = new MySqlWhenStatement();
accept(Token.WHEN);
// when expr
when.setCondition(exprParser.expr());
accept(Token.THEN);
// when block
this.parseStatementList(when.getStatements(), -1, when);
stmt.addWhenStatement(when);
}
if (lexer.token() == Token.ELSE) {
// parse else block
SQLIfStatement.Else elseStmt = new SQLIfStatement.Else();
this.parseStatementList(elseStmt.getStatements(), -1, elseStmt);
stmt.setElseItem(elseStmt);
}
} else {
// case expr
stmt.setCondition(exprParser.expr());
while (lexer.token() == Token.WHEN) {
accept(Token.WHEN);
MySqlWhenStatement when = new MySqlWhenStatement();
// when expr
when.setCondition(exprParser.expr());
accept(Token.THEN);
// when block
this.parseStatementList(when.getStatements(), -1, when);
stmt.addWhenStatement(when);
}
if (lexer.token() == Token.ELSE) {
accept(Token.ELSE);
// else block
SQLIfStatement.Else elseStmt = new SQLIfStatement.Else();
this.parseStatementList(elseStmt.getStatements(), -1, elseStmt);
stmt.setElseItem(elseStmt);
}
}
accept(Token.END);
accept(Token.CASE);
accept(Token.SEMI);
return stmt;
} | 3.68 |
hudi_BaseHoodieTableServiceClient_cluster | /**
* Ensures clustering instant is in expected state and performs clustering for the plan stored in metadata.
*
* @param clusteringInstant Clustering Instant Time
* @return Collection of Write Status
*/
public HoodieWriteMetadata<O> cluster(String clusteringInstant, boolean shouldComplete) {
HoodieTable<?, I, ?, T> table = createTable(config, context.getHadoopConf().get());
HoodieTimeline pendingClusteringTimeline = table.getActiveTimeline().filterPendingReplaceTimeline();
HoodieInstant inflightInstant = HoodieTimeline.getReplaceCommitInflightInstant(clusteringInstant);
if (pendingClusteringTimeline.containsInstant(inflightInstant)) {
table.rollbackInflightClustering(inflightInstant, commitToRollback -> getPendingRollbackInfo(table.getMetaClient(), commitToRollback, false));
table.getMetaClient().reloadActiveTimeline();
}
clusteringTimer = metrics.getClusteringCtx();
LOG.info("Starting clustering at " + clusteringInstant);
HoodieWriteMetadata<T> writeMetadata = table.cluster(context, clusteringInstant);
HoodieWriteMetadata<O> clusteringMetadata = convertToOutputMetadata(writeMetadata);
// Validation has to be done after cloning. if not, it could result in referencing the write status twice which means clustering could get executed twice.
validateClusteringCommit(clusteringMetadata, clusteringInstant, table);
// Publish file creation metrics for clustering.
if (config.isMetricsOn()) {
clusteringMetadata.getWriteStats()
.ifPresent(hoodieWriteStats -> hoodieWriteStats.stream()
.filter(hoodieWriteStat -> hoodieWriteStat.getRuntimeStats() != null)
.map(hoodieWriteStat -> hoodieWriteStat.getRuntimeStats().getTotalCreateTime())
.forEach(metrics::updateClusteringFileCreationMetrics));
}
// TODO : Where is shouldComplete used ?
if (shouldComplete && clusteringMetadata.getCommitMetadata().isPresent()) {
completeClustering((HoodieReplaceCommitMetadata) clusteringMetadata.getCommitMetadata().get(), table, clusteringInstant, Option.ofNullable(convertToWriteStatus(writeMetadata)));
}
return clusteringMetadata;
} | 3.68 |
hbase_HBaseTestingUtility_generateColumnDescriptors | /**
* Create a set of column descriptors with the combination of compression, encoding, bloom codecs
* available.
* @param prefix family names prefix
* @return the list of column descriptors
*/
public static List<ColumnFamilyDescriptor> generateColumnDescriptors(final String prefix) {
List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
long familyId = 0;
for (Compression.Algorithm compressionType : getSupportedCompressionAlgorithms()) {
for (DataBlockEncoding encodingType : DataBlockEncoding.values()) {
for (BloomType bloomType : BloomType.values()) {
String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder =
ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(name));
columnFamilyDescriptorBuilder.setCompressionType(compressionType);
columnFamilyDescriptorBuilder.setDataBlockEncoding(encodingType);
columnFamilyDescriptorBuilder.setBloomFilterType(bloomType);
columnFamilyDescriptors.add(columnFamilyDescriptorBuilder.build());
familyId++;
}
}
}
return columnFamilyDescriptors;
} | 3.68 |
graphhopper_Distributions_logExponentialDistribution | /**
* Use this function instead of Math.log(exponentialDistribution(beta, x)) to avoid an
* arithmetic underflow for very small probabilities.
*
* @param beta =1/lambda with lambda being the standard exponential distribution rate parameter
*/
static double logExponentialDistribution(double beta, double x) {
return log(1.0 / beta) - (x / beta);
} | 3.68 |
hadoop_IOStatisticsBinding_emptyStatisticsStore | /**
* Get the shared instance of the immutable empty statistics
* store.
* @return an empty statistics object.
*/
public static IOStatisticsStore emptyStatisticsStore() {
return EmptyIOStatisticsStore.getInstance();
} | 3.68 |
hbase_PersistentIOEngine_calculateChecksum | /**
* Using an encryption algorithm to calculate a checksum, the default encryption algorithm is MD5
* @return the checksum which is convert to HexString
* @throws IOException something happened like file not exists
* @throws NoSuchAlgorithmException no such algorithm
*/
protected byte[] calculateChecksum(String algorithm) {
try {
StringBuilder sb = new StringBuilder();
for (String filePath : filePaths) {
File file = new File(filePath);
sb.append(filePath);
sb.append(getFileSize(filePath));
sb.append(file.lastModified());
}
MessageDigest messageDigest = MessageDigest.getInstance(algorithm);
messageDigest.update(Bytes.toBytes(sb.toString()));
return messageDigest.digest();
} catch (IOException ioex) {
LOG.error("Calculating checksum failed, because of ", ioex);
return new byte[0];
} catch (NoSuchAlgorithmException e) {
LOG.error("No such algorithm : " + algorithm + "!");
return new byte[0];
}
} | 3.68 |
flink_NetUtils_socketAddressToUrlString | /**
* Encodes an IP address and port to be included in URL. in particular, this method makes sure
* that IPv6 addresses have the proper formatting to be included in URLs.
*
* @param address The socket address with the IP address and port.
* @return The proper URL string encoded IP address and port.
*/
public static String socketAddressToUrlString(InetSocketAddress address) {
if (address.isUnresolved()) {
throw new IllegalArgumentException(
"Address cannot be resolved: " + address.getHostString());
}
return ipAddressAndPortToUrlString(address.getAddress(), address.getPort());
} | 3.68 |
hbase_HRegion_registerService | /**
* Registers a new protocol buffer {@link Service} subclass as a coprocessor endpoint to be
* available for handling {@link #execService(RpcController, CoprocessorServiceCall)} calls.
* <p/>
* Only a single instance may be registered per region for a given {@link Service} subclass (the
* instances are keyed on {@link ServiceDescriptor#getFullName()}.. After the first registration,
* subsequent calls with the same service name will fail with a return value of {@code false}.
* @param instance the {@code Service} subclass instance to expose as a coprocessor endpoint
* @return {@code true} if the registration was successful, {@code false} otherwise
*/
public boolean registerService(Service instance) {
// No stacking of instances is allowed for a single service name
ServiceDescriptor serviceDesc = instance.getDescriptorForType();
String serviceName = CoprocessorRpcUtils.getServiceName(serviceDesc);
if (coprocessorServiceHandlers.containsKey(serviceName)) {
LOG.error("Coprocessor service {} already registered, rejecting request from {} in region {}",
serviceName, instance, this);
return false;
}
coprocessorServiceHandlers.put(serviceName, instance);
if (LOG.isDebugEnabled()) {
LOG.debug("Registered coprocessor service: region="
+ Bytes.toStringBinary(getRegionInfo().getRegionName()) + " service=" + serviceName);
}
return true;
} | 3.68 |
flink_SqlNodeConvertUtils_validateAlterView | /**
* Validate the view to alter is valid and existed and return the {@link CatalogView} to alter.
*/
static CatalogView validateAlterView(SqlAlterView alterView, ConvertContext context) {
UnresolvedIdentifier unresolvedIdentifier =
UnresolvedIdentifier.of(alterView.fullViewName());
ObjectIdentifier viewIdentifier =
context.getCatalogManager().qualifyIdentifier(unresolvedIdentifier);
Optional<ContextResolvedTable> optionalCatalogTable =
context.getCatalogManager().getTable(viewIdentifier);
// check the view exist and is not a temporary view
if (!optionalCatalogTable.isPresent() || optionalCatalogTable.get().isTemporary()) {
throw new ValidationException(
String.format("View %s doesn't exist or is a temporary view.", viewIdentifier));
}
// check the view is exactly a view
CatalogBaseTable baseTable = optionalCatalogTable.get().getResolvedTable();
if (baseTable instanceof CatalogTable) {
throw new ValidationException("ALTER VIEW for a table is not allowed");
}
return (CatalogView) baseTable;
} | 3.68 |
hbase_ProcedureExecutor_isRunning | // ==========================================================================
// Accessors
// ==========================================================================
public boolean isRunning() {
return running.get();
} | 3.68 |
framework_ContainerHierarchicalWrapper_getItemIds | /*
* Gets the ID's of all Items stored in the Container Don't add a JavaDoc
* comment here, we use the default documentation from implemented
* interface.
*/
@Override
public Collection<?> getItemIds() {
return container.getItemIds();
} | 3.68 |
framework_PropertysetItem_hashCode | /*
* (non-Javadoc)
*
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
return (list == null ? 0 : list.hashCode())
^ (map == null ? 0 : map.hashCode())
^ ((propertySetChangeListeners == null
|| propertySetChangeListeners.isEmpty()) ? 0
: propertySetChangeListeners.hashCode());
} | 3.68 |
flink_DeltaIterationBase_getSolutionSet | /**
* Gets the contract that represents the solution set for the step function.
*
* @return The solution set for the step function.
*/
public Operator getSolutionSet() {
return this.solutionSetPlaceholder;
} | 3.68 |
hadoop_AMRMProxyService_recover | /**
* Recover from NM state store. Called after serviceInit before serviceStart.
*
* @throws IOException if recover fails
*/
public void recover() throws IOException {
LOG.info("Recovering AMRMProxyService.");
RecoveredAMRMProxyState state =
this.nmContext.getNMStateStore().loadAMRMProxyState();
this.secretManager.recover(state);
LOG.info("Recovering {} running applications for AMRMProxy.",
state.getAppContexts().size());
for (Map.Entry<ApplicationAttemptId, Map<String, byte[]>> entry : state
.getAppContexts().entrySet()) {
ApplicationAttemptId attemptId = entry.getKey();
LOG.info("Recovering app attempt {}.", attemptId);
long startTime = clock.getTime();
// Try recover for the running application attempt
try {
String user = null;
Token<AMRMTokenIdentifier> amrmToken = null;
for (Map.Entry<String, byte[]> contextEntry : entry.getValue()
.entrySet()) {
if (contextEntry.getKey().equals(NMSS_USER_KEY)) {
user = new String(contextEntry.getValue(), StandardCharsets.UTF_8);
} else if (contextEntry.getKey().equals(NMSS_AMRMTOKEN_KEY)) {
amrmToken = new Token<>();
amrmToken.decodeFromUrlString(
new String(contextEntry.getValue(), StandardCharsets.UTF_8));
// Clear the service field, as if RM just issued the token
amrmToken.setService(new Text());
}
}
if (amrmToken == null) {
throw new IOException("No amrmToken found for app attempt " + attemptId);
}
if (user == null) {
throw new IOException("No user found for app attempt " + attemptId);
}
// Regenerate the local AMRMToken for the AM
Token<AMRMTokenIdentifier> localToken =
this.secretManager.createAndGetAMRMToken(attemptId);
// Retrieve the AM container credentials from NM context
Credentials amCred = null;
for (Container container : this.nmContext.getContainers().values()) {
LOG.debug("From NM Context container {}.", container.getContainerId());
if (container.getContainerId().getApplicationAttemptId().equals(
attemptId) && container.getContainerTokenIdentifier() != null) {
LOG.debug("Container type {}.",
container.getContainerTokenIdentifier().getContainerType());
if (container.getContainerTokenIdentifier()
.getContainerType() == ContainerType.APPLICATION_MASTER) {
LOG.info("AM container {} found in context, has credentials: {}.",
container.getContainerId(),
(container.getCredentials() != null));
amCred = container.getCredentials();
}
}
}
if (amCred == null) {
LOG.error("No credentials found for AM container of {}. "
+ "Yarn registry access might not work.", attemptId);
}
// Create the interceptor pipeline for the AM
initializePipeline(attemptId, user, amrmToken, localToken,
entry.getValue(), true, amCred);
long endTime = clock.getTime();
this.metrics.succeededRecoverRequests(endTime - startTime);
} catch (Throwable e) {
LOG.error("Exception when recovering {}, removing it from NMStateStore and move on.",
attemptId, e);
this.metrics.incrFailedAppRecoveryCount();
this.nmContext.getNMStateStore().removeAMRMProxyAppContext(attemptId);
}
}
} | 3.68 |
open-banking-gateway_AuthSessionHandler_createNewAuthSessionAndEnhanceResult | /**
* Creates new authorization session associated with the request.
* @param request Request to associate session with.
* @param sessionKey Authorization session encryption key.
* @param context Service context for the request
* @param result Protocol response that required to open the session
* @param <O> Outcome class
* @return New authorization session
*/
@NotNull
@SneakyThrows
@Transactional
public <O> AuthSession createNewAuthSessionAndEnhanceResult(
FacadeServiceableRequest request,
SecretKeyWithIv sessionKey,
ServiceContext context,
FacadeResultRedirectable<O, ?> result
) {
return fillAuthSessionData(request, context, sessionKey, result);
} | 3.68 |
flink_RocksDBStateDownloader_transferAllStateDataToDirectory | /**
* Transfer all state data to the target directory, as specified in the download requests.
*
* @param downloadRequests the list of downloads.
* @throws Exception If anything about the download goes wrong.
*/
public void transferAllStateDataToDirectory(
Collection<StateHandleDownloadSpec> downloadRequests,
CloseableRegistry closeableRegistry)
throws Exception {
// We use this closer for fine-grained shutdown of all parallel downloading.
CloseableRegistry internalCloser = new CloseableRegistry();
// Make sure we also react to external close signals.
closeableRegistry.registerCloseable(internalCloser);
try {
List<CompletableFuture<Void>> futures =
transferAllStateDataToDirectoryAsync(downloadRequests, internalCloser)
.collect(Collectors.toList());
// Wait until either all futures completed successfully or one failed exceptionally.
FutureUtils.completeAll(futures).get();
} catch (Exception e) {
downloadRequests.stream()
.map(StateHandleDownloadSpec::getDownloadDestination)
.map(Path::toFile)
.forEach(FileUtils::deleteDirectoryQuietly);
// Error reporting
Throwable throwable = ExceptionUtils.stripExecutionException(e);
throwable = ExceptionUtils.stripException(throwable, RuntimeException.class);
if (throwable instanceof IOException) {
throw (IOException) throwable;
} else {
throw new FlinkRuntimeException("Failed to download data for state handles.", e);
}
} finally {
// Unregister and close the internal closer.
if (closeableRegistry.unregisterCloseable(internalCloser)) {
IOUtils.closeQuietly(internalCloser);
}
}
} | 3.68 |
hbase_MetricsConnection_getServerStats | /** serverStats metric */
public ConcurrentHashMap<ServerName, ConcurrentMap<byte[], RegionStats>> getServerStats() {
return serverStats;
} | 3.68 |
hbase_ServerNonceManager_endOperation | /**
* Ends the operation started by startOperation.
* @param group Nonce group.
* @param nonce Nonce.
* @param success Whether the operation has succeeded.
*/
public void endOperation(long group, long nonce, boolean success) {
if (nonce == HConstants.NO_NONCE) return;
NonceKey nk = new NonceKey(group, nonce);
OperationContext newResult = nonces.get(nk);
assert newResult != null;
synchronized (newResult) {
assert newResult.getState() == OperationContext.WAIT;
// If we failed, other retries can proceed.
newResult.setState(success ? OperationContext.DONT_PROCEED : OperationContext.PROCEED);
if (success) {
newResult.reportActivity(); // Set time to use for cleanup.
} else {
OperationContext val = nonces.remove(nk);
assert val == newResult;
}
if (newResult.hasWait()) {
LOG.debug("Conflict with running op ended: " + nk + ", " + newResult);
newResult.notifyAll();
}
}
} | 3.68 |
hadoop_ComponentContainers_containers | /**
* Sets the containers.
* @param compContainers containers of the component.
*/
public ComponentContainers containers(List<Container> compContainers) {
this.containers = compContainers;
return this;
} | 3.68 |
dubbo_AdaptiveClassCodeGenerator_generateUrlAssignmentIndirectly | /**
* get parameter with type <code>URL</code> from method parameter:
* <p>
* test if parameter has method which returns type <code>URL</code>
* <p>
* if not found, throws IllegalStateException
*/
private String generateUrlAssignmentIndirectly(Method method) {
Class<?>[] pts = method.getParameterTypes();
Map<String, Integer> getterReturnUrl = new HashMap<>();
// find URL getter method
for (int i = 0; i < pts.length; ++i) {
for (Method m : pts[i].getMethods()) {
String name = m.getName();
if ((name.startsWith("get") || name.length() > 3)
&& Modifier.isPublic(m.getModifiers())
&& !Modifier.isStatic(m.getModifiers())
&& m.getParameterTypes().length == 0
&& m.getReturnType() == URL.class) {
getterReturnUrl.put(name, i);
}
}
}
if (getterReturnUrl.size() <= 0) {
// getter method not found, throw
throw new IllegalStateException("Failed to create adaptive class for interface " + type.getName()
+ ": not found url parameter or url attribute in parameters of method " + method.getName());
}
Integer index = getterReturnUrl.get("getUrl");
if (index != null) {
return generateGetUrlNullCheck(index, pts[index], "getUrl");
} else {
Map.Entry<String, Integer> entry =
getterReturnUrl.entrySet().iterator().next();
return generateGetUrlNullCheck(entry.getValue(), pts[entry.getValue()], entry.getKey());
}
} | 3.68 |
hbase_DependentColumnFilter_parseFrom | /**
* Parse a seralized representation of {@link DependentColumnFilter}
* @param pbBytes A pb serialized {@link DependentColumnFilter} instance
* @return An instance of {@link DependentColumnFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static DependentColumnFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {
FilterProtos.DependentColumnFilter proto;
try {
proto = FilterProtos.DependentColumnFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
final CompareOperator valueCompareOp =
CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name());
ByteArrayComparable valueComparator = null;
try {
if (proto.getCompareFilter().hasComparator()) {
valueComparator = ProtobufUtil.toComparator(proto.getCompareFilter().getComparator());
}
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new DependentColumnFilter(
proto.hasColumnFamily() ? proto.getColumnFamily().toByteArray() : null,
proto.hasColumnQualifier() ? proto.getColumnQualifier().toByteArray() : null,
proto.getDropDependentColumn(), valueCompareOp, valueComparator);
} | 3.68 |
hudi_Triple_equals | /**
* <p>
* Compares this triple to another based on the three elements.
* </p>
*
* @param obj the object to compare to, null returns false
* @return true if the elements of the triple are equal
*/
@SuppressWarnings("deprecation") // ObjectUtils.equals(Object, Object) has been deprecated in 3.2
@Override
public boolean equals(final Object obj) {
if (obj == this) {
return true;
}
if (obj instanceof Triple<?, ?, ?>) {
final Triple<?, ?, ?> other = (Triple<?, ?, ?>) obj;
return getLeft().equals(other.getLeft()) && getMiddle().equals(other.getMiddle())
&& getRight().equals(other.getRight());
}
return false;
} | 3.68 |
flink_JobGraph_getVertices | /**
* Returns an Iterable to iterate all vertices registered with the job graph.
*
* @return an Iterable to iterate all vertices registered with the job graph
*/
public Iterable<JobVertex> getVertices() {
return this.taskVertices.values();
} | 3.68 |
hadoop_ByteArray_buffer | /**
* @return the underlying buffer.
*/
@Override
public byte[] buffer() {
return buffer;
} | 3.68 |
hbase_SingleColumnValueExcludeFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof SingleColumnValueExcludeFilter)) {
return false;
}
return super.areSerializedFieldsEqual(o);
} | 3.68 |
hadoop_FedBalanceContext_build | /**
* Build the FedBalanceContext.
*
* @return the FedBalanceContext obj.
*/
public FedBalanceContext build() {
FedBalanceContext context = new FedBalanceContext();
context.src = this.src;
context.dst = this.dst;
context.mount = this.mount;
context.conf = this.conf;
context.forceCloseOpenFiles = this.forceCloseOpenFiles;
context.useMountReadOnly = this.useMountReadOnly;
context.mapNum = this.mapNum;
context.bandwidthLimit = this.bandwidthLimit;
context.trashOpt = this.trashOpt;
context.delayDuration = this.delayDuration;
context.diffThreshold = this.diffThreshold;
return context;
} | 3.68 |
hbase_KeyValue_compareIgnoringPrefix | /**
* Overridden
* @param commonPrefix location of expected common prefix
* @param left the left kv serialized byte[] to be compared with
* @param loffset the offset in the left byte[]
* @param llength the length in the left byte[]
* @param right the right kv serialized byte[] to be compared with
* @param roffset the offset in the byte[]
* @param rlength the length in the right byte[]
* @return 0 if equal, <0 if left smaller, >0 if right smaller
*/
@Override // SamePrefixComparator
public int compareIgnoringPrefix(int commonPrefix, byte[] left, int loffset, int llength,
byte[] right, int roffset, int rlength) {
// Compare row
short lrowlength = Bytes.toShort(left, loffset);
short rrowlength;
int comparisonResult = 0;
if (commonPrefix < ROW_LENGTH_SIZE) {
// almost nothing in common
rrowlength = Bytes.toShort(right, roffset);
comparisonResult = compareRows(left, loffset + ROW_LENGTH_SIZE, lrowlength, right,
roffset + ROW_LENGTH_SIZE, rrowlength);
} else { // the row length is the same
rrowlength = lrowlength;
if (commonPrefix < ROW_LENGTH_SIZE + rrowlength) {
// The rows are not the same. Exclude the common prefix and compare
// the rest of the two rows.
int common = commonPrefix - ROW_LENGTH_SIZE;
comparisonResult = compareRows(left, loffset + common + ROW_LENGTH_SIZE,
lrowlength - common, right, roffset + common + ROW_LENGTH_SIZE, rrowlength - common);
}
}
if (comparisonResult != 0) {
return comparisonResult;
}
assert lrowlength == rrowlength;
return compareWithoutRow(commonPrefix, left, loffset, llength, right, roffset, rlength,
lrowlength);
} | 3.68 |
hudi_CleanActionExecutor_clean | /**
* Performs cleaning of partition paths according to cleaning policy and returns the number of files cleaned. Handles
* skews in partitions to clean by making files to clean as the unit of task distribution.
*
* @throws IllegalArgumentException if unknown cleaning policy is provided
*/
List<HoodieCleanStat> clean(HoodieEngineContext context, HoodieCleanerPlan cleanerPlan) {
int cleanerParallelism = Math.min(
cleanerPlan.getFilePathsToBeDeletedPerPartition().values().stream().mapToInt(List::size).sum(),
config.getCleanerParallelism());
LOG.info("Using cleanerParallelism: " + cleanerParallelism);
context.setJobStatus(this.getClass().getSimpleName(), "Perform cleaning of table: " + config.getTableName());
Stream<Pair<String, CleanFileInfo>> filesToBeDeletedPerPartition =
cleanerPlan.getFilePathsToBeDeletedPerPartition().entrySet().stream()
.flatMap(x -> x.getValue().stream().map(y -> new ImmutablePair<>(x.getKey(),
new CleanFileInfo(y.getFilePath(), y.getIsBootstrapBaseFile()))));
Stream<ImmutablePair<String, PartitionCleanStat>> partitionCleanStats =
context.mapPartitionsToPairAndReduceByKey(filesToBeDeletedPerPartition,
iterator -> deleteFilesFunc(iterator, table), PartitionCleanStat::merge, cleanerParallelism);
Map<String, PartitionCleanStat> partitionCleanStatsMap = partitionCleanStats
.collect(Collectors.toMap(Pair::getKey, Pair::getValue));
List<String> partitionsToBeDeleted = table.getMetaClient().getTableConfig().isTablePartitioned() && cleanerPlan.getPartitionsToBeDeleted() != null
? cleanerPlan.getPartitionsToBeDeleted()
: new ArrayList<>();
partitionsToBeDeleted.forEach(entry -> {
try {
if (!isNullOrEmpty(entry)) {
deleteFileAndGetResult(table.getMetaClient().getFs(), table.getMetaClient().getBasePath() + "/" + entry);
}
} catch (IOException e) {
LOG.warn("Partition deletion failed " + entry);
}
});
// Return PartitionCleanStat for each partition passed.
return cleanerPlan.getFilePathsToBeDeletedPerPartition().keySet().stream().map(partitionPath -> {
PartitionCleanStat partitionCleanStat = partitionCleanStatsMap.containsKey(partitionPath)
? partitionCleanStatsMap.get(partitionPath)
: new PartitionCleanStat(partitionPath);
HoodieActionInstant actionInstant = cleanerPlan.getEarliestInstantToRetain();
return HoodieCleanStat.newBuilder().withPolicy(config.getCleanerPolicy()).withPartitionPath(partitionPath)
.withEarliestCommitRetained(Option.ofNullable(
actionInstant != null
? new HoodieInstant(HoodieInstant.State.valueOf(actionInstant.getState()),
actionInstant.getAction(), actionInstant.getTimestamp())
: null))
.withLastCompletedCommitTimestamp(cleanerPlan.getLastCompletedCommitTimestamp())
.withDeletePathPattern(partitionCleanStat.deletePathPatterns())
.withSuccessfulDeletes(partitionCleanStat.successDeleteFiles())
.withFailedDeletes(partitionCleanStat.failedDeleteFiles())
.withDeleteBootstrapBasePathPatterns(partitionCleanStat.getDeleteBootstrapBasePathPatterns())
.withSuccessfulDeleteBootstrapBaseFiles(partitionCleanStat.getSuccessfulDeleteBootstrapBaseFiles())
.withFailedDeleteBootstrapBaseFiles(partitionCleanStat.getFailedDeleteBootstrapBaseFiles())
.isPartitionDeleted(partitionsToBeDeleted.contains(partitionPath))
.build();
}).collect(Collectors.toList());
} | 3.68 |
hadoop_ServiceLauncher_getServiceName | /**
* Get the service name via {@link Service#getName()}.
*
* If the service is not instantiated, the classname is returned instead.
* @return the service name
*/
public String getServiceName() {
Service s = service;
String name = null;
if (s != null) {
try {
name = s.getName();
} catch (Exception ignored) {
// ignored
}
}
if (name != null) {
return "service " + name;
} else {
return "service " + serviceName;
}
} | 3.68 |
querydsl_MathExpressions_round | /**
* Round to s decimal places
*
* @param num numeric expression
* @param s decimal places
* @return round(num, s)
*/
public static <A extends Number & Comparable<?>> NumberExpression<A> round(Expression<A> num, int s) {
return Expressions.numberOperation(num.getType(), MathOps.ROUND2, num, ConstantImpl.create(s));
} | 3.68 |
hadoop_IdentityTransformer_transformAclEntriesForSetRequest | /**
* Perform Identity transformation when calling setAcl(),removeAclEntries() and modifyAclEntries()
* If the AclEntry type is a user or group, and its name is one of the following:
* 1.short name; 2.$superuser; 3.Fully qualified name; 4. principal id.
* <pre>
* Short name could be transformed to:
* - A service principal id or $superuser, if short name belongs a daemon service
* stated in substitution list AND "fs.azure.identity.transformer.service.principal.id"
* is set with $superuser or a principal id.
* - A fully qualified name, if the AclEntry type is User AND if "fs.azure.identity.transformer.domain.name"
* is set in configuration. This is to make the behavior consistent with HDI.
*
* $superuser, fully qualified name and principal id should not be transformed.
* </pre>
* @param aclEntries list of AclEntry
* */
public void transformAclEntriesForSetRequest(final List<AclEntry> aclEntries) {
if (skipUserIdentityReplacement) {
return;
}
for (int i = 0; i < aclEntries.size(); i++) {
AclEntry aclEntry = aclEntries.get(i);
String name = aclEntry.getName();
String transformedName = name;
if (name == null || name.isEmpty() || aclEntry.getType().equals(AclEntryType.OTHER) || aclEntry.getType().equals(AclEntryType.MASK)) {
continue;
}
// case 1: when the user or group name to be set is stated in substitution list.
if (isInSubstitutionList(name)) {
transformedName = servicePrincipalId;
} else if (aclEntry.getType().equals(AclEntryType.USER) // case 2: when the owner is a short name
&& shouldUseFullyQualifiedUserName(name)) { // of the user principal name (UPN).
// Notice: for group type ACL entry, if name is shortName.
// It won't be converted to Full Name. This is
// to make the behavior consistent with HDI.
transformedName = getFullyQualifiedName(name);
}
// Avoid unnecessary new AclEntry allocation
if (transformedName.equals(name)) {
continue;
}
AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
aclEntryBuilder.setType(aclEntry.getType());
aclEntryBuilder.setName(transformedName);
aclEntryBuilder.setScope(aclEntry.getScope());
aclEntryBuilder.setPermission(aclEntry.getPermission());
// Replace the original AclEntry
aclEntries.set(i, aclEntryBuilder.build());
}
} | 3.68 |
pulsar_AbstractMetrics_createMetricsByDimension | /**
* Creates a dimension key for replication metrics.
*
* @param namespace
* @param fromClusterName
* @param toClusterName
* @return
*/
protected Metrics createMetricsByDimension(String namespace, String fromClusterName, String toClusterName) {
Map<String, String> dimensionMap = new HashMap<>();
dimensionMap.put("namespace", namespace);
dimensionMap.put("from_cluster", fromClusterName);
dimensionMap.put("to_cluster", toClusterName);
return createMetrics(dimensionMap);
} | 3.68 |
zxing_FinderPatternFinder_foundPatternDiagonal | /**
* @param stateCount count of black/white/black/white/black pixels just read
* @return true iff the proportions of the counts is close enough to the 1/1/3/1/1 ratios
* used by finder patterns to be considered a match
*/
protected static boolean foundPatternDiagonal(int[] stateCount) {
int totalModuleSize = 0;
for (int i = 0; i < 5; i++) {
int count = stateCount[i];
if (count == 0) {
return false;
}
totalModuleSize += count;
}
if (totalModuleSize < 7) {
return false;
}
float moduleSize = totalModuleSize / 7.0f;
float maxVariance = moduleSize / 1.333f;
// Allow less than 75% variance from 1-1-3-1-1 proportions
return
Math.abs(moduleSize - stateCount[0]) < maxVariance &&
Math.abs(moduleSize - stateCount[1]) < maxVariance &&
Math.abs(3.0f * moduleSize - stateCount[2]) < 3 * maxVariance &&
Math.abs(moduleSize - stateCount[3]) < maxVariance &&
Math.abs(moduleSize - stateCount[4]) < maxVariance;
} | 3.68 |
querydsl_SQLExpressions_datediff | /**
* Get a datediff(unit, start, end) expression
*
* @param unit date part
* @param start start
* @param end end
* @return difference in units
*/
public static <D extends Comparable> NumberExpression<Integer> datediff(DatePart unit,
DateTimeExpression<D> start, D end) {
return Expressions.numberOperation(Integer.class, DATE_DIFF_OPS.get(unit), start, ConstantImpl.create(end));
} | 3.68 |
flink_TableConfig_getConfiguration | /**
* Gives direct access to the underlying application-specific key-value map for advanced
* configuration.
*/
public Configuration getConfiguration() {
return configuration;
} | 3.68 |
hadoop_IdentifierResolver_setOutputReaderClass | /**
* Sets the {@link OutputReader} class.
*/
protected void setOutputReaderClass(Class<? extends OutputReader>
outputReaderClass) {
this.outputReaderClass = outputReaderClass;
} | 3.68 |
flink_ExecutionConfig_getRegisteredTypesWithKryoSerializers | /** Returns the registered types with Kryo Serializers. */
public LinkedHashMap<Class<?>, SerializableSerializer<?>>
getRegisteredTypesWithKryoSerializers() {
return registeredTypesWithKryoSerializers;
} | 3.68 |
framework_GridLayoutElement_getColumnCount | /**
* Gets the total number of columns in the layout.
*
* @return the number of columns in the layout
* @since 8.0.6
*/
public long getColumnCount() {
Long res = (Long) getCommandExecutor()
.executeScript("return arguments[0].getColumnCount()", this);
if (res == null) {
throw new IllegalStateException("getColumnCount returned null");
}
return res.longValue();
} | 3.68 |
pulsar_AdminResource_isRedirectException | /**
* Check current exception whether is redirect exception.
*
* @param ex The throwable.
* @return Whether is redirect exception
*/
protected static boolean isRedirectException(Throwable ex) {
Throwable realCause = FutureUtil.unwrapCompletionException(ex);
return realCause instanceof WebApplicationException
&& ((WebApplicationException) realCause).getResponse().getStatus()
== Status.TEMPORARY_REDIRECT.getStatusCode();
} | 3.68 |
flink_PekkoInvocationHandler_tell | /**
* Sends the message to the RPC endpoint.
*
* @param message to send to the RPC endpoint.
*/
protected void tell(Object message) {
rpcEndpoint.tell(message, ActorRef.noSender());
} | 3.68 |
hadoop_SchedulingResponse_isSuccess | /**
* Returns true if Scheduler was able to accept and commit this request.
* @return isSuccessful.
*/
public boolean isSuccess() {
return this.isSuccess;
} | 3.68 |
hadoop_DistributedCache_getTimestamp | /**
* Returns mtime of a given cache file on hdfs. Internal to MapReduce.
* @param conf configuration
* @param cache cache file
* @return mtime of a given cache file on hdfs
* @throws IOException
*/
@Deprecated
public static long getTimestamp(Configuration conf, URI cache)
throws IOException {
return getFileStatus(conf, cache).getModificationTime();
} | 3.68 |
flink_ServiceType_buildUpExternalRestService | /**
* Build up the external rest service template, according to the jobManager parameters.
*
* @param kubernetesJobManagerParameters the parameters of jobManager.
* @return the external rest service
*/
public Service buildUpExternalRestService(
KubernetesJobManagerParameters kubernetesJobManagerParameters) {
final String serviceName =
ExternalServiceDecorator.getExternalServiceName(
kubernetesJobManagerParameters.getClusterId());
return new ServiceBuilder()
.withApiVersion(Constants.API_VERSION)
.withNewMetadata()
.withName(serviceName)
.withLabels(kubernetesJobManagerParameters.getCommonLabels())
.withAnnotations(kubernetesJobManagerParameters.getRestServiceAnnotations())
.endMetadata()
.withNewSpec()
.withType(
kubernetesJobManagerParameters
.getRestServiceExposedType()
.serviceType()
.getType())
.withSelector(kubernetesJobManagerParameters.getSelectors())
.addNewPort()
.withName(Constants.REST_PORT_NAME)
.withPort(kubernetesJobManagerParameters.getRestPort())
.withNewTargetPort(kubernetesJobManagerParameters.getRestBindPort())
.endPort()
.endSpec()
.build();
} | 3.68 |
framework_Form_bindPropertyToField | /**
* Binds an item property to a field. The default behavior is to bind
* property straight to Field. If Property.Viewer type property (e.g.
* PropertyFormatter) is already set for field, the property is bound to
* that Property.Viewer.
*
* @param propertyId
* @param property
* @param field
* @since 6.7.3
*/
protected void bindPropertyToField(final Object propertyId,
final Property property, final Field field) {
// check if field has a property that is Viewer set. In that case we
// expect developer has e.g. PropertyFormatter that he wishes to use and
// assign the property to the Viewer instead.
boolean hasFilterProperty = field.getPropertyDataSource() != null
&& field.getPropertyDataSource() instanceof Property.Viewer;
if (hasFilterProperty) {
((Property.Viewer) field.getPropertyDataSource())
.setPropertyDataSource(property);
} else {
field.setPropertyDataSource(property);
}
} | 3.68 |
pulsar_FunctionMetaDataManager_acquireLeadership | /**
* Called by the leader service when this worker becomes the leader.
* We first get exclusive producer on the metadata topic. Next we drain the tailer
* to ensure that we have caught up to metadata topic. After which we close the tailer.
* Note that this method cannot be syncrhonized because the tailer might still be processing messages
*/
public void acquireLeadership(Producer<byte[]> exclusiveProducer) {
log.info("FunctionMetaDataManager becoming leader by creating exclusive producer");
if (exclusiveLeaderProducer != null) {
log.error("FunctionMetaData Manager entered invalid state");
errorNotifier.triggerError(new IllegalStateException());
}
this.exclusiveLeaderProducer = exclusiveProducer;
FunctionMetaDataTopicTailer tailer = this.functionMetaDataTopicTailer;
this.functionMetaDataTopicTailer = null;
// Now that we have created the exclusive producer, wait for reader to get over
if (tailer != null) {
try {
tailer.stopWhenNoMoreMessages().get();
} catch (Exception e) {
log.error("Error while waiting for metadata tailer thread to finish", e);
errorNotifier.triggerError(e);
}
tailer.close();
}
log.info("FunctionMetaDataManager done becoming leader");
} | 3.68 |
framework_VaadinUriResolver_resolveVaadinUri | /**
* Translates a Vaadin URI to a URL that can be loaded by the browser. The
* following URI schemes are supported:
* <ul>
* <li><code>theme://</code> - resolves to the URL of the currently active
* theme.</li>
* <li><code>published://</code> - resolves to resources on the classpath
* published by {@link com.vaadin.annotations.JavaScript @JavaScript} or
* {@link com.vaadin.annotations.StyleSheet @StyleSheet} annotations on
* connectors.</li>
* <li><code>app://</code> - resolves to a URL that will be routed to the
* currently registered {@link com.vaadin.server.RequestHandler
* RequestHandler} instances.</li>
* <li><code>vaadin://</code> - resolves to the location of static resouces
* in the VAADIN directory</li>
* <li><code>frontend://</code> - resolves to the location of frontend
* (Bower and similar) resources, which might vary depending on the used
* browser</li>
* </ul>
* Any other URI protocols, such as <code>http://</code> or
* <code>https://</code> are passed through this method unmodified.
*
* @since 7.4
* @param vaadinUri
* the uri to resolve
* @return the resolved uri
*/
public String resolveVaadinUri(String vaadinUri) {
if (vaadinUri == null) {
return null;
}
if (vaadinUri
.startsWith(ApplicationConstants.FRONTEND_PROTOCOL_PREFIX)) {
final String frontendUrl = getFrontendUrl();
vaadinUri = frontendUrl + vaadinUri.substring(
ApplicationConstants.FRONTEND_PROTOCOL_PREFIX.length());
}
if (vaadinUri.startsWith(ApplicationConstants.THEME_PROTOCOL_PREFIX)) {
final String themeUri = getThemeUri();
vaadinUri = themeUri + vaadinUri.substring(7);
}
if (vaadinUri
.startsWith(ApplicationConstants.PUBLISHED_PROTOCOL_PREFIX)) {
// getAppUri *should* always end with /
// substring *should* always start with / (published:///foo.bar
// without published://)
vaadinUri = ApplicationConstants.APP_PROTOCOL_PREFIX
+ ApplicationConstants.PUBLISHED_FILE_PATH
+ vaadinUri.substring(
ApplicationConstants.PUBLISHED_PROTOCOL_PREFIX
.length());
// Let translation of app:// urls take care of the rest
}
if (vaadinUri.startsWith(ApplicationConstants.APP_PROTOCOL_PREFIX)) {
String relativeUrl = vaadinUri.substring(
ApplicationConstants.APP_PROTOCOL_PREFIX.length());
String serviceUrl = getServiceUrl();
String serviceUrlParameterName = getServiceUrlParameterName();
if (serviceUrlParameterName != null) {
// Should put path in v-resourcePath parameter and append query
// params to base portlet url
String[] parts = relativeUrl.split("\\?", 2);
String path = parts[0];
// If there's a "?" followed by something, append it as a query
// string to the base URL
if (parts.length > 1) {
String appUrlParams = parts[1];
serviceUrl = SharedUtil.addGetParameters(serviceUrl,
appUrlParams);
}
if (!path.startsWith("/")) {
path = '/' + path;
}
String pathParam = serviceUrlParameterName + "="
+ encodeQueryStringParameterValue(path);
serviceUrl = SharedUtil.addGetParameters(serviceUrl, pathParam);
vaadinUri = serviceUrl;
} else {
vaadinUri = serviceUrl + relativeUrl;
}
}
if (vaadinUri.startsWith(ApplicationConstants.VAADIN_PROTOCOL_PREFIX)) {
final String vaadinDirUri = getVaadinDirUrl();
String relativeUrl = vaadinUri.substring(
ApplicationConstants.VAADIN_PROTOCOL_PREFIX.length());
vaadinUri = vaadinDirUri + relativeUrl;
}
if (vaadinUri
.startsWith(ApplicationConstants.CONTEXT_PROTOCOL_PREFIX)) {
final String contextRoot = getContextRootUrl();
String relativeUrl = vaadinUri.substring(
ApplicationConstants.CONTEXT_PROTOCOL_PREFIX.length());
vaadinUri = contextRoot + relativeUrl;
}
return vaadinUri;
} | 3.68 |
pulsar_ResourceUnitRanking_getEstimatedMessageRate | /**
* Get the estimated message rate.
*/
public double getEstimatedMessageRate() {
return this.estimatedMessageRate;
} | 3.68 |
hadoop_JavaCommandLineBuilder_setJVMOpts | /**
* Set JVM opts.
* @param jvmOpts JVM opts
*/
public void setJVMOpts(String jvmOpts) {
if (ServiceUtils.isSet(jvmOpts)) {
add(jvmOpts);
}
} | 3.68 |
flink_LogicalScopeProvider_castFrom | /**
* Casts the given metric group to a {@link LogicalScopeProvider}, if it implements the
* interface.
*
* @param metricGroup metric group to cast
* @return cast metric group
* @throws IllegalStateException if the metric group did not implement the LogicalScopeProvider
* interface
*/
static LogicalScopeProvider castFrom(MetricGroup metricGroup) throws IllegalStateException {
if (metricGroup instanceof LogicalScopeProvider) {
return (LogicalScopeProvider) metricGroup;
} else {
throw new IllegalStateException(
"The given metric group does not implement the LogicalScopeProvider interface.");
}
} | 3.68 |
flink_GSChecksumWriteChannel_write | /**
* Writes bytes to the underlying channel and updates checksum.
*
* @param content The content to write
* @param start The start position
* @param length The number of bytes to write
* @return The number of bytes written
* @throws IOException On underlying failure
*/
public int write(byte[] content, int start, int length) throws IOException {
LOGGER.trace("Writing {} bytes to blob {}", length, blobIdentifier);
Preconditions.checkNotNull(content);
Preconditions.checkArgument(start >= 0);
Preconditions.checkArgument(length >= 0);
hasher.putBytes(content, start, length);
return writeChannel.write(content, start, length);
} | 3.68 |
hadoop_SampleQuantiles_insertBatch | /**
* Merges items from buffer into the samples array in one pass.
* This is more efficient than doing an insert on every item.
*/
private void insertBatch() {
if (bufferCount == 0) {
return;
}
Arrays.sort(buffer, 0, bufferCount);
// Base case: no samples
int start = 0;
if (samples.size() == 0) {
SampleItem newItem = new SampleItem(buffer[0], 1, 0);
samples.add(newItem);
start++;
}
ListIterator<SampleItem> it = samples.listIterator();
SampleItem item = it.next();
for (int i = start; i < bufferCount; i++) {
long v = buffer[i];
while (it.nextIndex() < samples.size() && item.value < v) {
item = it.next();
}
// If we found that bigger item, back up so we insert ourselves before it
if (item.value > v) {
it.previous();
}
// We use different indexes for the edge comparisons, because of the above
// if statement that adjusts the iterator
int delta;
if (it.previousIndex() == 0 || it.nextIndex() == samples.size()) {
delta = 0;
} else {
delta = ((int) Math.floor(allowableError(it.nextIndex()))) - 1;
}
SampleItem newItem = new SampleItem(v, 1, delta);
it.add(newItem);
item = newItem;
}
bufferCount = 0;
} | 3.68 |
framework_ImmediateUpload_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
// by default is in immediate mode (since 8.0)
Upload immediateUpload = new Upload();
immediateUpload.setId("immediateupload");
immediateUpload.setAcceptMimeTypes(TEST_MIME_TYPE);
addComponent(immediateUpload);
Upload upload = new Upload();
upload.setId("upload");
upload.setImmediateMode(false);
addComponent(upload);
} | 3.68 |
zxing_AddressBookParsedResult_getBirthday | /**
* @return birthday formatted as yyyyMMdd (e.g. 19780917)
*/
public String getBirthday() {
return birthday;
} | 3.68 |
flink_CompositeType_createComparator | /**
* Generic implementation of the comparator creation. Composite types are supplying the
* infrastructure to create the actual comparators
*
* @return The comparator
*/
@PublicEvolving
public TypeComparator<T> createComparator(
int[] logicalKeyFields,
boolean[] orders,
int logicalFieldOffset,
ExecutionConfig config) {
TypeComparatorBuilder<T> builder = createTypeComparatorBuilder();
builder.initializeTypeComparatorBuilder(logicalKeyFields.length);
for (int logicalKeyFieldIndex = 0;
logicalKeyFieldIndex < logicalKeyFields.length;
logicalKeyFieldIndex++) {
int logicalKeyField = logicalKeyFields[logicalKeyFieldIndex];
int logicalField = logicalFieldOffset; // this is the global/logical field number
boolean comparatorAdded = false;
for (int localFieldId = 0;
localFieldId < this.getArity()
&& logicalField <= logicalKeyField
&& !comparatorAdded;
localFieldId++) {
TypeInformation<?> localFieldType = this.getTypeAt(localFieldId);
if (localFieldType instanceof AtomicType && logicalField == logicalKeyField) {
// we found an atomic key --> create comparator
builder.addComparatorField(
localFieldId,
((AtomicType<?>) localFieldType)
.createComparator(orders[logicalKeyFieldIndex], config));
comparatorAdded = true;
}
// must be composite type and check that the logicalKeyField is within the bounds
// of the composite type's logical fields
else if (localFieldType instanceof CompositeType
&& logicalField <= logicalKeyField
&& logicalKeyField
<= logicalField + (localFieldType.getTotalFields() - 1)) {
// we found a compositeType that is containing the logicalKeyField we are
// looking for --> create comparator
builder.addComparatorField(
localFieldId,
((CompositeType<?>) localFieldType)
.createComparator(
new int[] {logicalKeyField},
new boolean[] {orders[logicalKeyFieldIndex]},
logicalField,
config));
comparatorAdded = true;
}
if (localFieldType instanceof CompositeType) {
// we need to subtract 1 because we are not accounting for the local field (not
// accessible for the user)
logicalField += localFieldType.getTotalFields() - 1;
}
logicalField++;
}
if (!comparatorAdded) {
throw new IllegalArgumentException(
"Could not add a comparator for the logical"
+ "key field index "
+ logicalKeyFieldIndex
+ ".");
}
}
return builder.createTypeComparator(config);
} | 3.68 |
hbase_HRegion_close | /**
* Close down this HRegion. Flush the cache unless abort parameter is true, Shut down each HStore,
* don't service any more calls. This method could take some time to execute, so don't call it
* from a time-sensitive thread.
* @param abort true if server is aborting (only during testing)
* @param ignoreStatus true if ignore the status (wont be showed on task list)
* @param isGracefulStop true if region is being closed during graceful stop and the blocks in the
* BucketCache should not be evicted.
* @return Vector of all the storage files that the HRegion's component HStores make use of. It's
* a list of StoreFile objects. Can be null if we are not to close at this time or we are
* already closed.
* @throws IOException e
* @throws DroppedSnapshotException Thrown when replay of wal is required because a Snapshot was
* not properly persisted. The region is put in closing mode, and
* the caller MUST abort after this.
*/
public Map<byte[], List<HStoreFile>> close(boolean abort, boolean ignoreStatus,
boolean isGracefulStop) throws IOException {
// Only allow one thread to close at a time. Serialize them so dual
// threads attempting to close will run up against each other.
MonitoredTask status = TaskMonitor.get().createStatus(
"Closing region " + this.getRegionInfo().getEncodedName() + (abort ? " due to abort" : ""),
ignoreStatus, true);
status.setStatus("Waiting for close lock");
try {
synchronized (closeLock) {
if (isGracefulStop && rsServices != null) {
rsServices.getBlockCache().ifPresent(blockCache -> {
if (blockCache instanceof CombinedBlockCache) {
BlockCache l2 = ((CombinedBlockCache) blockCache).getSecondLevelCache();
if (l2 instanceof BucketCache) {
if (((BucketCache) l2).isCachePersistenceEnabled()) {
LOG.info(
"Closing region {} during a graceful stop, and cache persistence is on, "
+ "so setting evict on close to false. ",
this.getRegionInfo().getRegionNameAsString());
this.getStores().forEach(s -> s.getCacheConfig().setEvictOnClose(false));
}
}
}
});
}
return doClose(abort, status);
}
} finally {
if (LOG.isDebugEnabled()) {
LOG.debug("Region close journal for {}:\n{}", this.getRegionInfo().getEncodedName(),
status.prettyPrintJournal());
}
status.cleanup();
}
} | 3.68 |
morf_SqlUtils_asString | /**
* Returns a SQL DSL expression to return the field CASTed to
* a string of the specified length
*
* @param length length of the string cast
* @return {@link Cast} as string of given length
*/
public Cast asString(int length) {
return asType(DataType.STRING, length);
} | 3.68 |
hadoop_SinglePendingCommit_getLength | /**
* Destination file size.
* @return size of destination object
*/
public long getLength() {
return length;
} | 3.68 |
dubbo_NacosRegistry_getServiceNames | /**
* Get the service names from the specified {@link URL url}
*
* @param url {@link URL}
* @param listener {@link NotifyListener}
* @return non-null
*/
private Set<String> getServiceNames(URL url, NacosAggregateListener listener) {
if (isAdminProtocol(url)) {
scheduleServiceNamesLookup(url, listener);
return getServiceNamesForOps(url);
} else {
return getServiceNames0(url);
}
} | 3.68 |
hibernate-validator_SizeValidatorForArraysOfFloat_isValid | /**
* Checks the number of entries in an array.
*
* @param array The array to validate.
* @param constraintValidatorContext context in which the constraint is evaluated.
*
* @return Returns {@code true} if the array is {@code null} or the number of entries in
* {@code array} is between the specified {@code min} and {@code max} values (inclusive),
* {@code false} otherwise.
*/
@Override
public boolean isValid(float[] array, ConstraintValidatorContext constraintValidatorContext) {
if ( array == null ) {
return true;
}
return array.length >= min && array.length <= max;
} | 3.68 |
flink_SegmentsUtil_byteIndex | /**
* Given a bit index, return the byte index containing it.
*
* @param bitIndex the bit index.
* @return the byte index.
*/
private static int byteIndex(int bitIndex) {
return bitIndex >>> ADDRESS_BITS_PER_WORD;
} | 3.68 |
druid_ZookeeperNodeRegister_register | /**
* Register a Node which has a Properties as the payload.
* <pre>
* CAUTION: only one node can be registered,
* if you want to register another one,
* call deregister first
* </pre>
*
* @param payload The information used to generate the payload Properties
* @return true, register successfully; false, skip the registeration
*/
public boolean register(String nodeId, List<ZookeeperNodeInfo> payload) {
if (payload == null || payload.isEmpty()) {
return false;
}
lock.lock();
try {
createPathIfNotExisted();
if (member != null) {
LOG.warn("GroupMember has already registered. Please deregister first.");
return false;
}
String payloadString = getPropertiesString(payload);
member = new GroupMember(client, path, nodeId, payloadString.getBytes());
member.start();
LOG.info("Register Node[" + nodeId + "] in path[" + path + "].");
return true;
} finally {
lock.unlock();
}
} | 3.68 |
hadoop_TemporaryAWSCredentialsProvider_createCredentials | /**
* The credentials here must include a session token, else this operation
* will raise an exception.
* @param config the configuration
* @return temporary credentials.
* @throws IOException on any failure to load the credentials.
* @throws NoAuthWithAWSException validation failure
* @throws NoAwsCredentialsException the credentials are actually empty.
*/
@Override
protected AwsCredentials createCredentials(Configuration config)
throws IOException {
MarshalledCredentials creds = MarshalledCredentialBinding.fromFileSystem(
getUri(), config);
MarshalledCredentials.CredentialTypeRequired sessionOnly
= MarshalledCredentials.CredentialTypeRequired.SessionOnly;
// treat only having non-session creds as empty.
if (!creds.isValid(sessionOnly)) {
throw new NoAwsCredentialsException(COMPONENT);
}
return MarshalledCredentialBinding.toAWSCredentials(creds,
sessionOnly, COMPONENT);
} | 3.68 |
morf_DatabaseMetaDataProvider_loadTable | /**
* Loads a table.
*
* @param tableName Name of the table.
* @return The table metadata.
*/
protected Table loadTable(AName tableName) {
final RealName realTableName = tableNames.get().get(tableName);
if (realTableName == null) {
throw new IllegalArgumentException("Table [" + tableName + "] not found.");
}
final Map<AName, Integer> primaryKey = loadTablePrimaryKey(realTableName);
final Supplier<List<Column>> columns = Suppliers.memoize(() -> loadTableColumns(realTableName, primaryKey));
final Supplier<List<Index>> indexes = Suppliers.memoize(() -> loadTableIndexes(realTableName));
return new Table() {
@Override
public String getName() {
return realTableName.getRealName();
}
@Override
public List<Column> columns() {
return columns.get();
}
@Override
public List<Index> indexes() {
return indexes.get();
}
@Override
public boolean isTemporary() {
return false;
}
};
} | 3.68 |
framework_Slot_getSpacingResizeListener | /**
* Returns the spacing element resize listener for this slot if one has been
* set.
*
* @return the listener or {@code null} if not set
*/
public ElementResizeListener getSpacingResizeListener() {
return spacingResizeListener;
} | 3.68 |
framework_DDEventHandleStrategy_restoreDragImage | /**
* Restores drag image after temporary update by
* {@link #updateDragImage(NativePreviewEvent, DDManagerMediator)}.
*
* @param originalImageDisplay
* original "display" CSS style property of drag image element
* @param mediator
* VDragAndDropManager data accessor
* @param event
* GWT event for active DnD operation
*/
public void restoreDragImage(String originalImageDisplay,
DDManagerMediator mediator, NativePreviewEvent event) {
VDragAndDropManager manager = mediator.getManager();
if (manager.getDragElement() != null) {
manager.getDragElement().getStyle().setProperty("display",
originalImageDisplay);
}
} | 3.68 |
flink_ResolvedCatalogBaseTable_getSchema | /**
* @deprecated This method returns the deprecated {@link TableSchema} class. The old class was a
* hybrid of resolved and unresolved schema information. It has been replaced by the new
* {@link ResolvedSchema} which is resolved by the framework and accessible via {@link
* #getResolvedSchema()}.
*/
@Deprecated
default TableSchema getSchema() {
return TableSchema.fromResolvedSchema(getResolvedSchema());
} | 3.68 |
hbase_SplitTableRegionProcedure_rollbackState | /**
* To rollback {@link SplitTableRegionProcedure}, an AssignProcedure is asynchronously submitted
* for parent region to be split (rollback doesn't wait on the completion of the AssignProcedure)
* . This can be improved by changing rollback() to support sub-procedures. See HBASE-19851 for
* details.
*/
@Override
protected void rollbackState(final MasterProcedureEnv env, final SplitTableRegionState state)
throws IOException, InterruptedException {
LOG.trace("{} rollback state={}", this, state);
try {
switch (state) {
case SPLIT_TABLE_REGION_POST_OPERATION:
case SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
case SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
case SPLIT_TABLE_REGION_UPDATE_META:
// PONR
throw new UnsupportedOperationException(this + " unhandled state=" + state);
case SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META:
break;
case SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
case SPLIT_TABLE_REGION_WRITE_MAX_SEQUENCE_ID_FILE:
deleteDaughterRegions(env);
break;
case SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS:
openParentRegion(env);
break;
case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
// If it rolls back with state SPLIT_TABLE_REGION_CLOSE_PARENT_REGION, no need to call
// openParentRegion(), otherwise, it will result in OpenRegionProcedure for an
// already open region.
break;
case SPLIT_TABLE_REGION_PRE_OPERATION:
postRollBackSplitRegion(env);
break;
case SPLIT_TABLE_REGION_PREPARE:
break; // nothing to do
default:
throw new UnsupportedOperationException(this + " unhandled state=" + state);
}
} catch (IOException e) {
// This will be retried. Unless there is a bug in the code,
// this should be just a "temporary error" (e.g. network down)
LOG.warn("pid=" + getProcId() + " failed rollback attempt step " + state
+ " for splitting the region " + getParentRegion().getEncodedName() + " in table "
+ getTableName(), e);
throw e;
}
} | 3.68 |
dubbo_AbstractServiceDiscovery_doUpdate | /**
* Update Service Instance. Unregister and then register by default.
* Can be override if registry support update instance directly.
* <br/>
* NOTICE: Remind to update {@link AbstractServiceDiscovery#serviceInstance}'s reference if updated
* and report metadata by {@link AbstractServiceDiscovery#reportMetadata(MetadataInfo)}
*
* @param oldServiceInstance origin service instance
* @param newServiceInstance new service instance
*/
protected void doUpdate(ServiceInstance oldServiceInstance, ServiceInstance newServiceInstance) {
this.doUnregister(oldServiceInstance);
this.serviceInstance = newServiceInstance;
if (!EMPTY_REVISION.equals(getExportedServicesRevision(newServiceInstance))) {
reportMetadata(newServiceInstance.getServiceMetadata());
this.doRegister(newServiceInstance);
}
} | 3.68 |
hadoop_MutableInverseQuantiles_getQuantiles | /**
* Returns the array of Inverse Quantiles declared in MutableInverseQuantiles.
*
* @return array of Inverse Quantiles
*/
public synchronized Quantile[] getQuantiles() {
return INVERSE_QUANTILES;
} | 3.68 |
hibernate-validator_NotEmptyValidatorForArraysOfBoolean_isValid | /**
* Checks the array is not {@code null} and not empty.
*
* @param array the array to validate
* @param constraintValidatorContext context in which the constraint is evaluated
* @return returns {@code true} if the array is not {@code null} and the array is not empty
*/
@Override
public boolean isValid(boolean[] array, ConstraintValidatorContext constraintValidatorContext) {
if ( array == null ) {
return false;
}
return array.length > 0;
} | 3.68 |
hbase_CommonFSUtils_getNamespaceDir | /**
* Returns the {@link org.apache.hadoop.fs.Path} object representing the namespace directory under
* path rootdir
* @param rootdir qualified path of HBase root directory
* @param namespace namespace name
* @return {@link org.apache.hadoop.fs.Path} for table
*/
public static Path getNamespaceDir(Path rootdir, final String namespace) {
return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR, new Path(namespace)));
} | 3.68 |
rocketmq-connect_WorkerSinkTask_execute | /**
* execute poll and send record
*/
@Override
protected void execute() {
while (isRunning()) {
try {
long startTimeStamp = System.currentTimeMillis();
log.info("START pullMessageFromQueues, time started : {}", startTimeStamp);
if (messageQueues.size() == 0) {
log.info("messageQueuesOffsetMap is null, : {}", startTimeStamp);
stopPullMsgLatch.await(PULL_MSG_ERROR_BACKOFF_MS, TimeUnit.MILLISECONDS);
continue;
}
if (shouldPause()) {
// pause
pauseAll();
onPause();
try {
// wait unpause
if (awaitUnpause()) {
// check paused for retry
if (!pausedForRetry) {
resumeAll();
onResume();
}
}
continue;
} catch (InterruptedException e) {
// do exception
}
}
iteration();
} catch (RetriableException e) {
log.error(" Sink task {}, pull message RetriableException, Error {} ", this, e.getMessage(), e);
} catch (InterruptedException interruptedException) {
//NO-op
} catch (Throwable e) {
log.error(" Sink task {}, pull message Throwable, Error {} ", this, e.getMessage(), e);
throw e;
}
}
} | 3.68 |
pulsar_FileUtils_deleteFilesInDirectory | /**
* Deletes all files (not directories) in the given directory (recursive)
* that match the given filename filter. If any file cannot be deleted then
* this is printed at warn to the given logger.
*
* @param directory to delete contents of
* @param filter if null then no filter is used
* @param logger to notify
* @param recurse will look for contents of sub directories.
* @param deleteEmptyDirectories default is false; if true will delete
* directories found that are empty
* @throws IOException if abstract pathname does not denote a directory, or
* if an I/O error occurs
*/
public static void deleteFilesInDirectory(
final File directory, final FilenameFilter filter, final Logger logger,
final boolean recurse, final boolean deleteEmptyDirectories) throws IOException {
// ensure the specified directory is actually a directory and that it exists
if (null != directory && directory.isDirectory()) {
final File ingestFiles[] = directory.listFiles();
if (ingestFiles == null) {
// null if abstract pathname does not denote a directory, or if an I/O error occurs
throw new IOException("Unable to list directory content in: " + directory.getAbsolutePath());
}
for (File ingestFile : ingestFiles) {
boolean process = (filter == null) ? true : filter.accept(directory, ingestFile.getName());
if (ingestFile.isFile() && process) {
FileUtils.deleteFile(ingestFile, logger, 3);
}
if (ingestFile.isDirectory() && recurse) {
FileUtils.deleteFilesInDirectory(ingestFile, filter, logger, recurse, deleteEmptyDirectories);
String[] ingestFileList = ingestFile.list();
if (deleteEmptyDirectories && ingestFileList != null && ingestFileList.length == 0) {
FileUtils.deleteFile(ingestFile, logger, 3);
}
}
}
}
} | 3.68 |
flink_OperationExecutorFactory_getDecimalDigits | /**
* The number of fractional digits for this type. Null is returned for data types where this is
* not applicable.
*/
private static @Nullable Integer getDecimalDigits(LogicalType columnType) {
switch (columnType.getTypeRoot()) {
case BOOLEAN:
case TINYINT:
case SMALLINT:
case INTEGER:
case BIGINT:
return 0;
case FLOAT:
return 7;
case DOUBLE:
return 15;
case DECIMAL:
return ((DecimalType) columnType).getScale();
case TIMESTAMP_WITHOUT_TIME_ZONE:
return 9;
default:
return null;
}
} | 3.68 |
hadoop_ApplicationMaster_addAsLocalResourceFromEnv | /**
* Add the given resource into the map of resources, using information from
* the supplied environment variables.
*
* @param resource The resource to add.
* @param localResources Map of local resources to insert into.
* @param env Map of environment variables.
*/
public void addAsLocalResourceFromEnv(DynoResource resource,
Map<String, LocalResource> localResources, Map<String, String> env) {
LOG.debug("Adding resource to localResources: " + resource);
String resourcePath = resource.getResourcePath();
if (resourcePath == null) {
// Default to using the file name in the path
resourcePath = resource.getPath(env).getName();
}
localResources.put(resourcePath,
LocalResource.newInstance(URL.fromPath(resource.getPath(env)),
resource.getType(), LocalResourceVisibility.APPLICATION,
resource.getLength(env), resource.getTimestamp(env)));
} | 3.68 |
framework_Window_addCloseListener | /**
* Adds a CloseListener to the window.
*
* For a window the CloseListener is fired when the user closes it (clicks
* on the close button).
*
* For a browser level window the CloseListener is fired when the browser
* level window is closed. Note that closing a browser level window does not
* mean it will be destroyed. Also note that Opera does not send events like
* all other browsers and therefore the close listener might not be called
* if Opera is used.
*
* <p>
* Since Vaadin 6.5, removing windows using {@link #removeWindow(Window)}
* does fire the CloseListener.
* </p>
*
* @param listener
* the CloseListener to add, not null
* @since 8.0
*/
public Registration addCloseListener(CloseListener listener) {
return addListener(CloseEvent.class, listener, WINDOW_CLOSE_METHOD);
} | 3.68 |
flink_FromClasspathEntryClassInformationProvider_getJarFile | /**
* Always returns an empty {@code Optional} because this implementation relies on the JAR
* archive being available on either the user or the system classpath.
*
* @return An empty {@code Optional}.
*/
@Override
public Optional<File> getJarFile() {
return Optional.empty();
} | 3.68 |
pulsar_ReplicatedSubscriptionSnapshotCache_advancedMarkDeletePosition | /**
* Signal that the mark-delete position on the subscription has been advanced. If there is a snapshot that
* correspond to this position, it will returned, other it will return null.
*/
public synchronized ReplicatedSubscriptionsSnapshot advancedMarkDeletePosition(PositionImpl pos) {
ReplicatedSubscriptionsSnapshot snapshot = null;
while (!snapshots.isEmpty()) {
PositionImpl first = snapshots.firstKey();
if (first.compareTo(pos) > 0) {
// Snapshot is associated which an higher position, so it cannot be used now
break;
} else {
// This snapshot is potentially good. Continue the search for to see if there is a higher snapshot we
// can use
snapshot = snapshots.pollFirstEntry().getValue();
}
}
if (log.isDebugEnabled()) {
if (snapshot != null) {
log.debug("[{}] Advanced mark-delete position to {} -- found snapshot {} at {}:{}", subscription, pos,
snapshot.getSnapshotId(),
snapshot.getLocalMessageId().getLedgerId(),
snapshot.getLocalMessageId().getEntryId());
} else {
log.debug("[{}] Advanced mark-delete position to {} -- snapshot not found", subscription, pos);
}
}
return snapshot;
} | 3.68 |
framework_LayoutManager_getInnerHeightDouble | /**
* Gets the inner height (excluding margins, paddings and borders) of the
* given element, provided that it has been measured. These elements are
* guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* -1 is returned if the element has not been measured. If 0 is returned, it
* might indicate that the element is not attached to the DOM.
*
* @since 7.5.1
* @param element
* the element to get the measured size for
* @return the measured inner height (excluding margins, paddings and
* borders) of the element in pixels.
*/
public final double getInnerHeightDouble(Element element) {
assert needsMeasure(
element) : "Getting measurement for element that is not measured";
return getMeasuredSize(element, nullSize).getInnerHeight();
} | 3.68 |
hadoop_ManifestCommitterSupport_getAppAttemptId | /**
* Get the Application Attempt Id for this job
* by looking for {@link MRJobConfig#APPLICATION_ATTEMPT_ID}
* in the configuration, falling back to 0 if unset.
* For spark it will always be 0, for MR it will be set in the AM
* to the {@code ApplicationAttemptId} the AM is launched with.
* @param conf job configuration.
* @return the Application Attempt Id for the job.
*/
public static int getAppAttemptId(Configuration conf) {
return conf.getInt(MRJobConfig.APPLICATION_ATTEMPT_ID,
INITIAL_APP_ATTEMPT_ID);
} | 3.68 |
morf_SelectStatement_except | /**
* Perform a EXCEPT set operation with another {@code selectStatement},
* eliminating any rows from the top select statement which exist in the bottom
* select statement.
* <p>
* If an except operation is performed then all participating select statements
* require the same selected column list.
* </p>
*
* @param selectStatement the select statement to be united with the current
* select statement;
* @return a new select statement with the change applied.
*/
public SelectStatement except(SelectStatement selectStatement) {
return copyOnWriteOrMutate(
(SelectStatementBuilder b) -> b.except(selectStatement),
() -> setOperators.add(new ExceptSetOperator(this, selectStatement))
);
} | 3.68 |
dubbo_Converter_getSourceType | /**
* Get the source type
*
* @return non-null
*/
default Class<S> getSourceType() {
return findActualTypeArgument(getClass(), Converter.class, 0);
} | 3.68 |
flink_HadoopOutputFormatBase_open | /**
* create the temporary output file for hadoop RecordWriter.
*
* @param taskNumber The number of the parallel instance.
* @param numTasks The number of parallel tasks.
* @throws java.io.IOException
*/
@Override
public void open(int taskNumber, int numTasks) throws IOException {
// enforce sequential open() calls
synchronized (OPEN_MUTEX) {
if (Integer.toString(taskNumber + 1).length() > 6) {
throw new IOException("Task id too large.");
}
TaskAttemptID taskAttemptID =
TaskAttemptID.forName(
"attempt__0000_r_"
+ String.format(
"%"
+ (6
- Integer.toString(
taskNumber + 1)
.length())
+ "s",
" ")
.replace(" ", "0")
+ Integer.toString(taskNumber + 1)
+ "_0");
this.jobConf.set("mapred.task.id", taskAttemptID.toString());
this.jobConf.setInt("mapred.task.partition", taskNumber + 1);
// for hadoop 2.2
this.jobConf.set("mapreduce.task.attempt.id", taskAttemptID.toString());
this.jobConf.setInt("mapreduce.task.partition", taskNumber + 1);
this.context = new TaskAttemptContextImpl(this.jobConf, taskAttemptID);
this.outputCommitter = this.jobConf.getOutputCommitter();
JobContext jobContext = new JobContextImpl(this.jobConf, new JobID());
this.outputCommitter.setupJob(jobContext);
this.recordWriter =
this.mapredOutputFormat.getRecordWriter(
null,
this.jobConf,
Integer.toString(taskNumber + 1),
new HadoopDummyProgressable());
}
} | 3.68 |
framework_VaadinService_addSessionInitListener | /**
* Adds a listener that gets notified when a new Vaadin service session is
* initialized for this service.
* <p>
* Because of the way different service instances share the same session,
* the listener is not necessarily notified immediately when the session is
* created but only when the first request for that session is handled by
* this service.
*
* @see #removeSessionInitListener(SessionInitListener)
* @see SessionInitListener
*
* @param listener
* the Vaadin service session initialization listener
* @return a registration object for removing the listener
* @since 8.0
*/
public Registration addSessionInitListener(SessionInitListener listener) {
sessionInitListeners.add(listener);
return () -> sessionInitListeners.remove(listener);
} | 3.68 |
framework_DragSourceExtension_addDragStartListener | /**
* Attaches dragstart listener for the current drag source.
* {@link DragStartListener#dragStart(DragStartEvent)} is called when
* dragstart event happens on the client side.
*
* @param listener
* Listener to handle dragstart event.
* @return Handle to be used to remove this listener.
*/
public Registration addDragStartListener(DragStartListener<T> listener) {
return addListener(DragSourceState.EVENT_DRAGSTART,
DragStartEvent.class, listener,
DragStartListener.DRAGSTART_METHOD);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.