name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_RenameOperation_renameFileToDest | /**
* The source is a file: rename it to the destination, which
* will be under the current destination path if that is a directory.
* @return the path of the object created.
* @throws IOException failure
*/
protected Path renameFileToDest() throws IOException {
final StoreContext storeContext = getStoreContext();
// the source is a file.
Path copyDestinationPath = destPath;
String copyDestinationKey = destKey;
S3ObjectAttributes sourceAttributes =
callbacks.createObjectAttributes(sourceStatus);
S3AReadOpContext readContext = callbacks.createReadContext(sourceStatus);
if (destStatus != null && destStatus.isDirectory()) {
// destination is a directory: build the final destination underneath
String newDestKey = maybeAddTrailingSlash(destKey);
String filename = sourceKey.substring(
storeContext.pathToKey(sourcePath.getParent()).length() + 1);
newDestKey = newDestKey + filename;
copyDestinationKey = newDestKey;
copyDestinationPath = storeContext.keyToPath(newDestKey);
}
// destination either does not exist or is a file to overwrite.
LOG.debug("rename: renaming file {} to {}", sourcePath,
copyDestinationPath);
copySource(
sourceKey,
sourceAttributes,
readContext,
copyDestinationPath,
copyDestinationKey);
bytesCopied.addAndGet(sourceStatus.getLen());
// delete the source
callbacks.deleteObjectAtPath(sourcePath, sourceKey, true);
return copyDestinationPath;
} | 3.68 |
flink_BlockingBackChannelBroker_instance | /** Retrieve singleton instance. */
public static Broker<BlockingBackChannel> instance() {
return INSTANCE;
} | 3.68 |
shardingsphere-elasticjob_JobAnnotationBuilder_generateJobConfiguration | /**
* Generate job configuration from @ElasticJobConfiguration.
*
* @param type The job of @ElasticJobConfiguration annotation class
* @return job configuration
*/
public static JobConfiguration generateJobConfiguration(final Class<?> type) {
ElasticJobConfiguration annotation = type.getAnnotation(ElasticJobConfiguration.class);
Preconditions.checkArgument(null != annotation, "@ElasticJobConfiguration not found by class '%s'.", type);
Preconditions.checkArgument(!Strings.isNullOrEmpty(annotation.jobName()), "@ElasticJobConfiguration jobName could not be empty by class '%s'.", type);
JobConfiguration.Builder jobConfigurationBuilder = JobConfiguration.newBuilder(annotation.jobName(), annotation.shardingTotalCount())
.shardingItemParameters(annotation.shardingItemParameters())
.cron(Strings.isNullOrEmpty(annotation.cron()) ? null : annotation.cron())
.timeZone(Strings.isNullOrEmpty(annotation.timeZone()) ? null : annotation.timeZone())
.jobParameter(annotation.jobParameter())
.monitorExecution(annotation.monitorExecution())
.failover(annotation.failover())
.misfire(annotation.misfire())
.maxTimeDiffSeconds(annotation.maxTimeDiffSeconds())
.reconcileIntervalMinutes(annotation.reconcileIntervalMinutes())
.jobShardingStrategyType(Strings.isNullOrEmpty(annotation.jobShardingStrategyType()) ? null : annotation.jobShardingStrategyType())
.jobExecutorThreadPoolSizeProviderType(Strings.isNullOrEmpty(annotation.jobExecutorThreadPoolSizeProviderType()) ? null : annotation.jobExecutorThreadPoolSizeProviderType())
.jobErrorHandlerType(Strings.isNullOrEmpty(annotation.jobErrorHandlerType()) ? null : annotation.jobErrorHandlerType())
.jobListenerTypes(annotation.jobListenerTypes())
.description(annotation.description())
.disabled(annotation.disabled())
.overwrite(annotation.overwrite());
for (Class<? extends JobExtraConfigurationFactory> clazz : annotation.extraConfigurations()) {
try {
Optional<JobExtraConfiguration> jobExtraConfig = clazz.newInstance().getJobExtraConfiguration();
jobExtraConfig.ifPresent(jobConfigurationBuilder::addExtraConfigurations);
} catch (IllegalAccessException | InstantiationException exception) {
throw (JobConfigurationException) new JobConfigurationException("new JobExtraConfigurationFactory instance by class '%s' failure", clazz).initCause(exception);
}
}
for (ElasticJobProp prop : annotation.props()) {
jobConfigurationBuilder.setProperty(prop.key(), prop.value());
}
return jobConfigurationBuilder.build();
} | 3.68 |
hbase_SyncTable_main | /**
* Main entry point.
*/
public static void main(String[] args) throws Exception {
int ret = ToolRunner.run(new SyncTable(HBaseConfiguration.create()), args);
System.exit(ret);
} | 3.68 |
morf_SqlServerDialect_getSqlForDateToYyyymmddHHmmss | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForDateToYyyymmddHHmmss(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForDateToYyyymmddHHmmss(Function function) {
return String.format("REPLACE(REPLACE(REPLACE(CONVERT(VARCHAR(19),%s, 120),'-',''), ':', ''), ' ', '')", getSqlFrom(function.getArguments().get(0)));
} | 3.68 |
querydsl_MathExpressions_cosh | /**
* Create a {@code cosh(num)} expression
*
* <p>Returns the hyperbolic cosine of num radians.</p>
*
* @param num numeric expression
* @return cosh(num)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Double> cosh(Expression<A> num) {
return Expressions.numberOperation(Double.class, Ops.MathOps.COSH, num);
} | 3.68 |
hbase_AvlUtil_seekTo | /**
* Reset the iterator, and seeks to the specified key
* @param root the current root of the tree
* @param key the key for the node we are trying to find
* @param keyComparator the comparator to use to match node and key
*/
public void seekTo(final TNode root, final Object key,
final AvlKeyComparator<TNode> keyComparator) {
current = null;
height = 0;
TNode node = root;
while (node != null) {
if (keyComparator.compareKey(node, key) >= 0) {
if (node.avlLeft != null) {
stack[height++] = node;
node = (TNode) node.avlLeft;
} else {
current = node;
return;
}
} else {
if (node.avlRight != null) {
stack[height++] = node;
node = (TNode) node.avlRight;
} else {
if (height > 0) {
TNode parent = (TNode) stack[--height];
while (node == parent.avlRight) {
if (height == 0) {
current = null;
return;
}
node = parent;
parent = (TNode) stack[--height];
}
current = parent;
return;
}
current = null;
return;
}
}
}
} | 3.68 |
framework_WindowElement_close | /**
* Clicks the close button of the window.
*/
public void close() {
getCloseButton().click();
} | 3.68 |
hbase_HFileBlockIndex_writeIndexBlocks | /**
* Writes the root level and intermediate levels of the block index into the output stream,
* generating the tree from bottom up. Assumes that the leaf level has been inline-written to
* the disk if there is enough data for more than one leaf block. We iterate by breaking the
* current level of the block index, starting with the index of all leaf-level blocks, into
* chunks small enough to be written to disk, and generate its parent level, until we end up
* with a level small enough to become the root level. If the leaf level is not large enough,
* there is no inline block index anymore, so we only write that level of block index to disk as
* the root level.
* @param out FSDataOutputStream
* @return position at which we entered the root-level index.
*/
public long writeIndexBlocks(FSDataOutputStream out) throws IOException {
if (curInlineChunk != null && curInlineChunk.getNumEntries() != 0) {
throw new IOException("Trying to write a multi-level block index, " + "but are "
+ curInlineChunk.getNumEntries() + " entries in the " + "last inline chunk.");
}
// We need to get mid-key metadata before we create intermediate
// indexes and overwrite the root chunk.
byte[] midKeyMetadata = numLevels > 1 ? rootChunk.getMidKeyMetadata() : null;
if (curInlineChunk != null) {
while (
rootChunk.getRootSize() > maxChunkSize
// HBASE-16288: if firstKey is larger than maxChunkSize we will loop indefinitely
&& rootChunk.getNumEntries() > minIndexNumEntries
// Sanity check. We will not hit this (minIndexNumEntries ^ 16) blocks can be addressed
&& numLevels < 16
) {
rootChunk = writeIntermediateLevel(out, rootChunk);
numLevels += 1;
}
}
// write the root level
long rootLevelIndexPos = out.getPos();
{
DataOutput blockStream = blockWriter.startWriting(BlockType.ROOT_INDEX);
indexBlockEncoder.encode(rootChunk, true, blockStream);
if (midKeyMetadata != null) blockStream.write(midKeyMetadata);
blockWriter.writeHeaderAndData(out);
if (cacheConf != null) {
cacheConf.getBlockCache().ifPresent(cache -> {
HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf);
cache.cacheBlock(new BlockCacheKey(nameForCaching, rootLevelIndexPos, true,
blockForCaching.getBlockType()), blockForCaching);
});
}
}
// Add root index block size
totalBlockOnDiskSize += blockWriter.getOnDiskSizeWithoutHeader();
totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader();
if (LOG.isTraceEnabled()) {
LOG.trace("Wrote a " + numLevels + "-level index with root level at pos "
+ rootLevelIndexPos + ", " + rootChunk.getNumEntries() + " root-level entries, "
+ totalNumEntries + " total entries, "
+ StringUtils.humanReadableInt(this.totalBlockOnDiskSize) + " on-disk size, "
+ StringUtils.humanReadableInt(totalBlockUncompressedSize) + " total uncompressed size.");
}
return rootLevelIndexPos;
} | 3.68 |
graphhopper_VectorTile_getFeaturesCount | /**
* <pre>
* The actual features in this tile.
* </pre>
*
* <code>repeated .vector_tile.Tile.Feature features = 2;</code>
*/
public int getFeaturesCount() {
if (featuresBuilder_ == null) {
return features_.size();
} else {
return featuresBuilder_.getCount();
}
} | 3.68 |
hadoop_ProcessIdFileReader_getProcessId | /**
* Get the process id from specified file path.
* Parses each line to find a valid number
* and returns the first one found.
* @return Process Id if obtained from path specified else null
* @throws IOException
*/
public static String getProcessId(Path path) throws IOException {
if (path == null) {
throw new IOException("Trying to access process id from a null path");
}
LOG.debug("Accessing pid from pid file {}", path);
String processId = null;
BufferedReader bufReader = null;
try {
File file = new File(path.toString());
if (file.exists()) {
FileInputStream fis = new FileInputStream(file);
bufReader = new BufferedReader(new InputStreamReader(fis, "UTF-8"));
while (true) {
String line = bufReader.readLine();
if (line == null) {
break;
}
String temp = line.trim();
if (!temp.isEmpty()) {
if (Shell.WINDOWS) {
// On Windows, pid is expected to be a container ID, so find first
// line that parses successfully as a container ID.
try {
ContainerId.fromString(temp);
processId = temp;
break;
} catch (Exception e) {
// do nothing
}
}
else {
// Otherwise, find first line containing a numeric pid.
try {
long pid = Long.parseLong(temp);
if (pid > 0) {
processId = temp;
break;
}
} catch (Exception e) {
// do nothing
}
}
}
}
}
} finally {
if (bufReader != null) {
bufReader.close();
}
}
LOG.debug("Got pid {} from path {}",
(processId != null ? processId : "null"), path);
return processId;
} | 3.68 |
flink_TaskStateManagerImpl_notifyCheckpointAborted | /** Tracking when some local state can be disposed. */
@Override
public void notifyCheckpointAborted(long checkpointId) {
localStateStore.abortCheckpoint(checkpointId);
} | 3.68 |
druid_Lexer_lexError | /**
* Report an error at the given position using the provided arguments.
*/
protected void lexError(String key, Object... args) {
token = ERROR;
} | 3.68 |
rocketmq-connect_RocketMQScheduledReporter_reportHistograms | /**
* report histograms
*
* @param histograms
*/
private void reportHistograms(SortedMap<MetricName, Double> histograms) {
histograms.forEach((name, value) -> {
send(name, value);
});
} | 3.68 |
morf_XmlDataSetProducer_tableExists | /**
* @see org.alfasoftware.morf.metadata.Schema#tableExists(java.lang.String)
*/
@Override
public boolean tableExists(String name) {
return xmlStreamProvider.tableExists(name);
} | 3.68 |
hadoop_MountTableRefresherService_refresh | /**
* Refresh mount table cache of this router as well as all other routers.
*
* @throws StateStoreUnavailableException if the state store is not available.
*/
public void refresh() throws StateStoreUnavailableException {
RouterStore routerStore = router.getRouterStateManager();
try {
routerStore.loadCache(true);
} catch (IOException e) {
LOG.warn("RouterStore load cache failed,", e);
}
List<RouterState> cachedRecords = routerStore.getCachedRecords();
List<MountTableRefresherThread> refreshThreads = new ArrayList<>();
for (RouterState routerState : cachedRecords) {
String adminAddress = routerState.getAdminAddress();
if (adminAddress == null || adminAddress.length() == 0) {
// this router has not enabled router admin.
continue;
}
// No use of calling refresh on router which is not running state
if (routerState.getStatus() != RouterServiceState.RUNNING) {
LOG.info(
"Router {} is not running. Mount table cache will not refresh.",
routerState.getAddress());
// remove if RouterClient is cached.
removeFromCache(adminAddress);
} else if (isLocalAdmin(adminAddress)) {
/*
* Local router's cache update does not require RPC call, so no need for
* RouterClient
*/
refreshThreads.add(getLocalRefresher(adminAddress));
} else {
try {
RouterClient client = routerClientsCache.get(adminAddress);
refreshThreads.add(new MountTableRefresherThread(
client.getMountTableManager(), adminAddress));
} catch (ExecutionException execExcep) {
// Can not connect, seems router is stopped now.
LOG.warn(ROUTER_CONNECT_ERROR_MSG, adminAddress, execExcep);
}
}
}
if (!refreshThreads.isEmpty()) {
invokeRefresh(refreshThreads);
}
} | 3.68 |
framework_PushRequestHandler_createPushHandler | /**
* Creates a push handler for this request handler.
* <p>
* Create your own request handler and override this method if you want to
* customize the {@link PushHandler}, e.g. to dynamically decide the suspend
* timeout.
*
* @since 7.6
* @param service
* the vaadin service
* @return the push handler to use for this service
*/
protected PushHandler createPushHandler(VaadinServletService service) {
return new PushHandler(service);
} | 3.68 |
framework_HierarchyMapper_doExpand | /**
* Expands the given item if it is collapsed and has children, and returns
* whether this method expanded the item.
*
* @param item
* the item to expand
* @return {@code true} if this method expanded the item, {@code false}
* otherwise
*/
private boolean doExpand(T item) {
boolean expanded = false;
if (!isExpanded(item) && hasChildren(item)) {
expandedItemIds.add(getDataProvider().getId(item));
expanded = true;
}
return expanded;
} | 3.68 |
AreaShop_CancellableRegionEvent_isCancelled | /**
* Check if the event has been cancelled.
* @return true if the event has been cancelled, otherwise false
*/
public boolean isCancelled() {
return cancelled;
} | 3.68 |
hadoop_OBSFileSystem_getMaxEntriesToDelete | /**
* Return maximum number of entries in one multi-object delete call.
*
* @return the maximum number of entries in one multi-object delete call
*/
int getMaxEntriesToDelete() {
return maxEntriesToDelete;
} | 3.68 |
hadoop_SnapshotDiffReportListing_getLastPath | /**
* @return {@link #lastPath}
*/
public byte[] getLastPath() {
return lastPath;
} | 3.68 |
hadoop_AwsStatisticsCollector_publish | /**
* This is the callback from the AWS SDK where metrics
* can be collected.
* @param metricCollection metrics collection
*/
@Override
public void publish(MetricCollection metricCollection) {
// MetricCollections are nested, so we need to traverse through their
// "children" to collect the desired metrics. E.g.:
//
// ApiCall
// βββββββββββββββββββββββββββββββββββββββββββ
// β MarshallingDuration=PT0.002808333S β
// β RetryCount=0 β
// β ApiCallSuccessful=true β
// β OperationName=DeleteObject β
// β ApiCallDuration=PT0.079801458S β
// β CredentialsFetchDuration=PT0.000007083S β
// β ServiceId=S3 β
// βββββββββββββββββββββββββββββββββββββββββββ
// ApiCallAttempt
// βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
// β SigningDuration=PT0.000319375S β
// β ServiceCallDuration=PT0.078908584S β
// β AwsExtendedRequestId=Kmvb2Sz8NuDgIFJPKzLLBhuHgQGmpAjVYBMrSHDvy= β
// β HttpStatusCode=204 β
// β BackoffDelayDuration=PT0S β
// β AwsRequestId=KR0XZCSX β
// βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
// HttpClient
// βββββββββββββββββββββββββββββββββββ
// β AvailableConcurrency=1 β
// β LeasedConcurrency=0 β
// β ConcurrencyAcquireDuration=PT0S β
// β PendingConcurrencyAcquires=0 β
// β MaxConcurrency=96 β
// β HttpClientName=Apache β
// βββββββββββββββββββββββββββββββββββ
final long[] throttling = {0};
recurseThroughChildren(metricCollection)
.collect(Collectors.toList())
.forEach(m -> {
counter(m, CoreMetric.RETRY_COUNT, retries -> {
collector.updateAwsRetryCount(retries);
collector.updateAwsRequestCount(retries + 1);
});
counter(m, HttpMetric.HTTP_STATUS_CODE, statusCode -> {
if (statusCode == HttpStatusCode.THROTTLING) {
throttling[0] += 1;
}
});
timing(m, CoreMetric.API_CALL_DURATION,
collector::noteAwsClientExecuteTime);
timing(m, CoreMetric.SERVICE_CALL_DURATION,
collector::noteAwsRequestTime);
timing(m, CoreMetric.MARSHALLING_DURATION,
collector::noteRequestMarshallTime);
timing(m, CoreMetric.SIGNING_DURATION,
collector::noteRequestSigningTime);
timing(m, CoreMetric.UNMARSHALLING_DURATION,
collector::noteResponseProcessingTime);
});
collector.updateAwsThrottleExceptionsCount(throttling[0]);
} | 3.68 |
hbase_Scan_setAllowPartialResults | /**
* Setting whether the caller wants to see the partial results when server returns
* less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. By
* default this value is false and the complete results will be assembled client side before being
* delivered to the caller.
* @see Result#mayHaveMoreCellsInRow()
* @see #setBatch(int)
*/
public Scan setAllowPartialResults(final boolean allowPartialResults) {
this.allowPartialResults = allowPartialResults;
return this;
} | 3.68 |
pulsar_Schema_AVRO | /**
* Create a Avro schema type with schema definition.
*
* @param schemaDefinition the definition of the schema
* @return a Schema instance
*/
static <T> Schema<T> AVRO(SchemaDefinition<T> schemaDefinition) {
return DefaultImplementation.getDefaultImplementation().newAvroSchema(schemaDefinition);
} | 3.68 |
hbase_FirstKeyOnlyFilter_hasFoundKV | /** Returns true if first KV has been found. */
protected boolean hasFoundKV() {
return this.foundKV;
} | 3.68 |
framework_UIConnector_loadTheme | /**
* Loads the given theme and replaces the given link element with the new
* theme link element.
*
* @param newTheme
* The name of the new theme
* @param newThemeUrl
* The url of the new theme
* @param tagToReplace
* The link element to replace. If null, then the new link
* element is added at the end.
*/
private void loadTheme(final String newTheme, final String newThemeUrl,
final LinkElement tagToReplace) {
LinkElement newThemeLinkElement = Document.get().createLinkElement();
newThemeLinkElement.setRel("stylesheet");
newThemeLinkElement.setType("text/css");
newThemeLinkElement.setHref(newThemeUrl);
ResourceLoader.addOnloadHandler(newThemeLinkElement,
new ResourceLoadListener() {
@Override
public void onLoad(ResourceLoadEvent event) {
getLogger().info("Loading of " + newTheme + " from "
+ newThemeUrl + " completed");
if (tagToReplace != null) {
tagToReplace.getParentElement()
.removeChild(tagToReplace);
}
activateTheme(newTheme);
}
@Override
public void onError(ResourceLoadEvent event) {
getLogger().warning("Could not load theme from "
+ getThemeUrl(newTheme));
}
}, null);
if (tagToReplace != null) {
getHead().insertBefore(newThemeLinkElement, tagToReplace);
} else {
getHead().appendChild(newThemeLinkElement);
}
} | 3.68 |
framework_TabSheet_setTabPosition | /**
* Sets the position of the tab.
*
* @param tab
* The tab
* @param position
* The new position of the tab
*/
public void setTabPosition(Tab tab, int position) {
int oldPosition = getTabPosition(tab);
components.remove(oldPosition);
components.add(position, tab.getComponent());
getState().tabs.remove(oldPosition);
getState().tabs.add(position, ((TabSheetTabImpl) tab).getTabState());
} | 3.68 |
framework_VPopupTimeCalendar_makeDate | /**
* Creates a date based on the provided date values map.
*
* @param dateValues
* a map with date values to convert into a date
* @return the date based on the dateValues map
*/
@SuppressWarnings("deprecation")
public static Date makeDate(Map<DateTimeResolution, Integer> dateValues) {
if (dateValues.get(YEAR) == null) {
return null;
}
Date date = new Date(2000 - 1900, 0, 1);
Integer year = dateValues.get(YEAR);
if (year != null) {
date.setYear(year - 1900);
}
Integer month = dateValues.get(MONTH);
if (month != null) {
date.setMonth(month - 1);
}
Integer day = dateValues.get(DAY);
if (day != null) {
date.setDate(day);
}
Integer hour = dateValues.get(HOUR);
if (hour != null) {
date.setHours(hour);
}
Integer minute = dateValues.get(MINUTE);
if (minute != null) {
date.setMinutes(minute);
}
Integer second = dateValues.get(SECOND);
if (second != null) {
date.setSeconds(second);
}
return date;
} | 3.68 |
framework_FileDropTarget_getParent | /**
* Returns the component this extension is attached to.
*
* @return Extended component.
*/
@Override
public T getParent() {
return super.getParent();
} | 3.68 |
framework_VComboBox_getItemOffsetHeight | /*
* Gets the height of one menu item.
*/
int getItemOffsetHeight() {
List<MenuItem> items = getItems();
return items != null && !items.isEmpty()
? items.get(0).getOffsetHeight()
: 0;
} | 3.68 |
flink_Task_transitionState | /**
* Try to transition the execution state from the current state to the new state.
*
* @param currentState of the execution
* @param newState of the execution
* @param cause of the transition change or null
* @return true if the transition was successful, otherwise false
*/
private boolean transitionState(
ExecutionState currentState, ExecutionState newState, Throwable cause) {
if (STATE_UPDATER.compareAndSet(this, currentState, newState)) {
if (cause == null) {
LOG.info(
"{} ({}) switched from {} to {}.",
taskNameWithSubtask,
executionId,
currentState,
newState);
} else if (ExceptionUtils.findThrowable(cause, CancelTaskException.class).isPresent()) {
if (LOG.isDebugEnabled()) {
LOG.debug(
"{} ({}) switched from {} to {} due to CancelTaskException:",
taskNameWithSubtask,
executionId,
currentState,
newState,
cause);
} else {
LOG.info(
"{} ({}) switched from {} to {} due to CancelTaskException.",
taskNameWithSubtask,
executionId,
currentState,
newState);
}
} else {
// proper failure of the task. record the exception as the root
// cause
failureCause = cause;
LOG.warn(
"{} ({}) switched from {} to {} with failure cause:",
taskNameWithSubtask,
executionId,
currentState,
newState,
cause);
}
return true;
} else {
return false;
}
} | 3.68 |
zxing_PDF417_getNumberOfPadCodewords | /**
* Calculates the number of pad codewords as described in 4.9.2 of ISO/IEC 15438:2001(E).
*
* @param m the number of source codewords prior to the additional of the Symbol Length
* Descriptor and any pad codewords
* @param k the number of error correction codewords
* @param c the number of columns in the symbol in the data region (excluding start, stop and
* row indicator codewords)
* @param r the number of rows in the symbol
* @return the number of pad codewords
*/
private static int getNumberOfPadCodewords(int m, int k, int c, int r) {
int n = c * r - k;
return n > m + 1 ? n - m - 1 : 0;
} | 3.68 |
flink_JobVertexBackPressureHandler_getBackPressureLevel | /**
* Returns the back pressure level as a String.
*
* @param backPressureRatio Ratio of back pressures samples to total number of samples.
* @return Back pressure level ('ok', 'low', or 'high')
*/
private static JobVertexBackPressureInfo.VertexBackPressureLevel getBackPressureLevel(
double backPressureRatio) {
if (backPressureRatio <= 0.10) {
return JobVertexBackPressureInfo.VertexBackPressureLevel.OK;
} else if (backPressureRatio <= 0.5) {
return JobVertexBackPressureInfo.VertexBackPressureLevel.LOW;
} else {
return JobVertexBackPressureInfo.VertexBackPressureLevel.HIGH;
}
} | 3.68 |
flink_TwoPhaseCommitSinkFunction_recoverAndCommitInternal | /**
* This method must be the only place to call {@link #recoverAndCommit(Object)} to ensure that
* the configuration parameters {@link #transactionTimeout} and {@link
* #ignoreFailuresAfterTransactionTimeout} are respected.
*/
private void recoverAndCommitInternal(TransactionHolder<TXN> transactionHolder) {
try {
logWarningIfTimeoutAlmostReached(transactionHolder);
recoverAndCommit(transactionHolder.handle);
} catch (final Exception e) {
final long elapsedTime = clock.millis() - transactionHolder.transactionStartTime;
if (ignoreFailuresAfterTransactionTimeout && elapsedTime > transactionTimeout) {
LOG.error(
"Error while committing transaction {}. "
+ "Transaction has been open for longer than the transaction timeout ({})."
+ "Commit will not be attempted again. Data loss might have occurred.",
transactionHolder.handle,
transactionTimeout,
e);
} else {
throw e;
}
}
} | 3.68 |
hadoop_ListResultEntrySchema_name | /**
* Get the name value.
*
* @return the name value
*/
public String name() {
return name;
} | 3.68 |
framework_SQLContainer_commit | /**
* Commits all the changes, additions and removals made to the items of this
* container.
*
* @throws UnsupportedOperationException
* @throws SQLException
*/
public void commit() throws UnsupportedOperationException, SQLException {
try {
getLogger().log(Level.FINER,
"Commiting changes through delegate...");
queryDelegate.beginTransaction();
/* Perform buffered deletions */
for (RowItem item : removedItems.values()) {
try {
if (!queryDelegate.removeRow(item)) {
throw new SQLException(
"Removal failed for row with ID: "
+ item.getId());
}
} catch (IllegalArgumentException e) {
throw new SQLException(
"Removal failed for row with ID: " + item.getId(),
e);
}
}
/* Perform buffered modifications */
for (RowItem item : modifiedItems) {
if (!removedItems.containsKey(item.getId())) {
if (queryDelegate.storeRow(item) > 0) {
/*
* Also reset the modified state in the item in case it
* is reused e.g. in a form.
*/
item.commit();
} else {
queryDelegate.rollback();
refresh();
throw new ConcurrentModificationException(
"Item with the ID '" + item.getId()
+ "' has been externally modified.");
}
}
}
/* Perform buffered additions */
for (RowItem item : addedItems) {
queryDelegate.storeRow(item);
}
queryDelegate.commit();
removedItems.clear();
addedItems.clear();
modifiedItems.clear();
refresh();
if (notificationsEnabled) {
CacheFlushNotifier.notifyOfCacheFlush(this);
}
} catch (SQLException e) {
queryDelegate.rollback();
throw e;
} catch (OptimisticLockException e) {
queryDelegate.rollback();
throw e;
}
} | 3.68 |
flink_BlobServer_moveTempFileToStore | /**
* Moves the temporary <tt>incomingFile</tt> to its permanent location where it is available for
* use.
*
* @param incomingFile temporary file created during transfer
* @param jobId ID of the job this blob belongs to or <tt>null</tt> if job-unrelated
* @param digest BLOB content digest, i.e. hash
* @param blobType whether this file is a permanent or transient BLOB
* @return unique BLOB key that identifies the BLOB on the server
* @throws IOException thrown if an I/O error occurs while moving the file or uploading it to
* the HA store
*/
BlobKey moveTempFileToStore(
File incomingFile, @Nullable JobID jobId, byte[] digest, BlobKey.BlobType blobType)
throws IOException {
int retries = 10;
int attempt = 0;
while (true) {
// add unique component independent of the BLOB content
BlobKey blobKey = BlobKey.createKey(blobType, digest);
File storageFile = BlobUtils.getStorageLocation(storageDir.deref(), jobId, blobKey);
// try again until the key is unique (put the existence check into the lock!)
readWriteLock.writeLock().lock();
try {
if (!storageFile.exists()) {
BlobUtils.moveTempFileToStore(
incomingFile,
jobId,
blobKey,
storageFile,
LOG,
blobKey instanceof PermanentBlobKey ? blobStore : null);
// add TTL for transient BLOBs:
if (blobKey instanceof TransientBlobKey) {
// must be inside read or write lock to add a TTL
blobExpiryTimes.put(
Tuple2.of(jobId, (TransientBlobKey) blobKey),
System.currentTimeMillis() + cleanupInterval);
}
return blobKey;
}
} finally {
readWriteLock.writeLock().unlock();
}
++attempt;
if (attempt >= retries) {
String message =
"Failed to find a unique key for BLOB of job "
+ jobId
+ " (last tried "
+ storageFile.getAbsolutePath()
+ ".";
LOG.error(message + " No retries left.");
throw new IOException(message);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug(
"Trying to find a unique key for BLOB of job {} (retry {}, last tried {})",
jobId,
attempt,
storageFile.getAbsolutePath());
}
}
}
} | 3.68 |
rocketmq-connect_RocketMQScheduledReporter_reportCounters | /**
* report counters
*
* @param counters
*/
private void reportCounters(SortedMap<MetricName, Long> counters) {
counters.forEach((name, value) -> {
send(name, Double.parseDouble(value.toString()));
});
} | 3.68 |
hudi_HoodieRealtimeRecordReaderUtils_avroToArrayWritable | /**
* Convert the projected read from delta record into an array writable.
*/
public static Writable avroToArrayWritable(Object value, Schema schema) {
return avroToArrayWritable(value, schema, false);
} | 3.68 |
hudi_CompactionUtils_getEarliestInstantToRetainForCompaction | /**
* Gets the earliest instant to retain for MOR compaction.
* If there is no completed compaction,
* num delta commits >= "hoodie.compact.inline.max.delta.commits"
* If there is a completed compaction,
* num delta commits after latest completed compaction >= "hoodie.compact.inline.max.delta.commits"
*
* @param activeTimeline Active timeline of a table.
* @param maxDeltaCommits Maximum number of delta commits that trigger the compaction plan,
* i.e., "hoodie.compact.inline.max.delta.commits".
* @return the earliest instant to keep for MOR compaction.
*/
public static Option<HoodieInstant> getEarliestInstantToRetainForCompaction(
HoodieActiveTimeline activeTimeline, int maxDeltaCommits) {
Option<Pair<HoodieTimeline, HoodieInstant>> deltaCommitsInfoOption =
CompactionUtils.getDeltaCommitsSinceLatestCompaction(activeTimeline);
if (deltaCommitsInfoOption.isPresent()) {
Pair<HoodieTimeline, HoodieInstant> deltaCommitsInfo = deltaCommitsInfoOption.get();
HoodieTimeline deltaCommitTimeline = deltaCommitsInfo.getLeft();
int numDeltaCommits = deltaCommitTimeline.countInstants();
if (numDeltaCommits < maxDeltaCommits) {
return Option.of(deltaCommitsInfo.getRight());
} else {
// delta commits with the last one to keep
List<HoodieInstant> instants = deltaCommitTimeline.getInstantsAsStream()
.limit(numDeltaCommits - maxDeltaCommits + 1).collect(Collectors.toList());
return Option.of(instants.get(instants.size() - 1));
}
}
return Option.empty();
} | 3.68 |
hadoop_BatchedRequests_getIteratorType | /**
* Get Iterator type.
* @return Iterator type.
*/
public IteratorType getIteratorType() {
return iteratorType;
} | 3.68 |
framework_ColorPickerSelect_createAllColors | /**
* Creates the all colors.
*
* @param rows
* the rows
* @param columns
* the columns
*
* @return the color[][]
*/
private Color[][] createAllColors(int rows, int columns) {
Color[][] colors = new Color[rows][columns];
for (int row = 0; row < rows; row++) {
for (int col = 0; col < columns; col++) {
if (row < rows - 1) {
// Create the color grid by varying the saturation and value
// Calculate new hue value
float hue = (float) col / (float) columns;
float saturation = 1f;
float value = 1f;
// For the upper half use value=1 and variable
// saturation
if (row < rows / 2) {
saturation = (row + 1f) / (rows / 2f);
} else {
value = 1f - (row - rows / 2f) / (rows / 2f);
}
colors[row][col] = new Color(
Color.HSVtoRGB(hue, saturation, value));
} else {
// The last row should have the black&white gradient
float hue = 0f;
float saturation = 0f;
float value = 1f - (float) col / (float) columns;
colors[row][col] = new Color(
Color.HSVtoRGB(hue, saturation, value));
}
}
}
return colors;
} | 3.68 |
pulsar_ConsumerConfiguration_setSubscriptionType | /**
* Select the subscription type to be used when subscribing to the topic.
* <p>
* Default is {@link SubscriptionType#Exclusive}
*
* @param subscriptionType
* the subscription type value
*/
public ConsumerConfiguration setSubscriptionType(SubscriptionType subscriptionType) {
Objects.requireNonNull(subscriptionType);
conf.setSubscriptionType(subscriptionType);
return this;
} | 3.68 |
hudi_CleanPlanner_getLastCompletedCommitTimestamp | /**
* Returns the last completed commit timestamp before clean.
*/
public String getLastCompletedCommitTimestamp() {
if (commitTimeline.lastInstant().isPresent()) {
return commitTimeline.lastInstant().get().getTimestamp();
} else {
return "";
}
} | 3.68 |
hadoop_ManifestSuccessData_dumpDiagnostics | /**
* Dump the diagnostics (if any) to a string.
* @param prefix prefix before every entry
* @param middle string between key and value
* @param suffix suffix to each entry
* @return the dumped string
*/
public String dumpDiagnostics(String prefix, String middle, String suffix) {
return joinMap(diagnostics, prefix, middle, suffix);
} | 3.68 |
hudi_BaseHoodieClient_close | /**
* Releases any resources used by the client.
*/
@Override
public void close() {
stopEmbeddedServerView(true);
this.context.setJobStatus("", "");
this.heartbeatClient.close();
this.txnManager.close();
} | 3.68 |
graphhopper_DijkstraOneToMany_getMemoryUsageAsString | /**
* List currently used memory in MB (approximately)
*/
public String getMemoryUsageAsString() {
long len = weights.length;
return ((8L + 4L + 4L) * len
+ changedNodes.getCapacity() * 4L
+ heap.getCapacity() * (4L + 4L)) / Helper.MB
+ "MB";
} | 3.68 |
hadoop_LoggingAuditor_prepareActiveContext | /**
* Get/Prepare the active context for a span.
* @return the common audit context.
*/
private CommonAuditContext prepareActiveContext() {
return currentAuditContext();
} | 3.68 |
framework_UIDL_getIntVariable | /**
* Gets the value of the named variable.
*
* @param name
* the name of the variable
* @return the value of the variable
*/
public int getIntVariable(String name) {
return var().getInt(name);
} | 3.68 |
hmily_SpringCloudHmilyOrderApplication_main | /**
* main.
*
* @param args args
*/
public static void main(final String[] args) {
SpringApplication.run(SpringCloudHmilyOrderApplication.class, args);
} | 3.68 |
hadoop_GetContentSummaryOperation_probePathStatusOrNull | /**
* Get the status of a path, downgrading FNFE to null result.
* @param p path to probe.
* @param probes probes to exec
* @return the status or null
* @throws IOException failure other than FileNotFound
*/
private S3AFileStatus probePathStatusOrNull(final Path p,
final Set<StatusProbeEnum> probes) throws IOException {
try {
return callbacks.probePathStatus(p, probes);
} catch (FileNotFoundException fnfe) {
return null;
}
} | 3.68 |
hbase_AsyncTableRegionLocator_getEndKeys | /**
* Gets the ending row key for every region in the currently open table.
* <p>
* This is mainly useful for the MapReduce integration.
* @return Array of region ending row keys
*/
default CompletableFuture<List<byte[]>> getEndKeys() {
return getStartEndKeys().thenApply(
startEndKeys -> startEndKeys.stream().map(Pair::getSecond).collect(Collectors.toList()));
} | 3.68 |
hadoop_AHSLogsPage_preHead | /*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSView#
* preHead(org.apache.hadoop .yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override
protected void preHead(Page.HTML<__> html) {
String logEntity = $(ENTITY_STRING);
if (logEntity == null || logEntity.isEmpty()) {
logEntity = $(CONTAINER_ID);
}
if (logEntity == null || logEntity.isEmpty()) {
logEntity = "UNKNOWN";
}
commonPreHead(html);
} | 3.68 |
hbase_CommonFSUtils_checkShortCircuitReadBufferSize | /**
* Check if short circuit read buffer size is set and if not, set it to hbase value.
* @param conf must not be null
*/
public static void checkShortCircuitReadBufferSize(final Configuration conf) {
final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
final int notSet = -1;
// DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY is only defined in h2
final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
int size = conf.getInt(dfsKey, notSet);
// If a size is set, return -- we will use it.
if (size != notSet) {
return;
}
// But short circuit buffer size is normally not set. Put in place the hbase wanted size.
int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
} | 3.68 |
hudi_EmbeddedTimelineService_stopForBasePath | /**
* Stops the embedded timeline service for the given base path. If a timeline service is managing multiple tables, it will only be shutdown once all tables have been stopped.
* @param basePath For the table to stop the service for
*/
public void stopForBasePath(String basePath) {
synchronized (SERVICE_LOCK) {
basePaths.remove(basePath);
if (basePaths.isEmpty()) {
RUNNING_SERVICES.remove(timelineServiceIdentifier);
}
}
if (this.server != null) {
this.server.unregisterBasePath(basePath);
}
// continue rest of shutdown outside of the synchronized block to avoid excess blocking
if (basePaths.isEmpty() && null != server) {
LOG.info("Closing Timeline server");
this.server.close();
METRICS_REGISTRY.set(NUM_EMBEDDED_TIMELINE_SERVERS, NUM_SERVERS_RUNNING.decrementAndGet());
this.server = null;
this.viewManager = null;
LOG.info("Closed Timeline server");
}
} | 3.68 |
dubbo_CodecSupport_getNullBytesOf | /**
* Get the null object serialize result byte[] of Serialization from the cache,
* if not, generate it first.
*
* @param s Serialization Instances
* @return serialize result of null object
*/
public static byte[] getNullBytesOf(Serialization s) {
return ConcurrentHashMapUtils.computeIfAbsent(ID_NULLBYTES_MAP, s.getContentTypeId(), k -> {
// Pre-generated Null object bytes
ByteArrayOutputStream baos = new ByteArrayOutputStream();
byte[] nullBytes = new byte[0];
try {
ObjectOutput out = s.serialize(null, baos);
out.writeObject(null);
out.flushBuffer();
nullBytes = baos.toByteArray();
baos.close();
} catch (Exception e) {
logger.warn(
TRANSPORT_FAILED_SERIALIZATION,
"",
"",
"Serialization extension " + s.getClass().getName()
+ " not support serializing null object, return an empty bytes instead.");
}
return nullBytes;
});
} | 3.68 |
hbase_HBaseServerException_setServerOverloaded | /**
* Necessary for parsing RemoteException on client side
* @param serverOverloaded True if server was overloaded when exception was thrown
*/
public void setServerOverloaded(boolean serverOverloaded) {
this.serverOverloaded = serverOverloaded;
} | 3.68 |
framework_Html5File_setStreamVariable | /**
* Sets the {@link StreamVariable} that into which the file contents will be
* written. Usage of StreamVariable is similar to {@link Upload} component.
* <p>
* If the {@link StreamVariable} is not set in the {@link DropHandler} the
* file contents will not be sent to server.
* <p>
* <em>Note!</em> receiving file contents is experimental feature depending
* on HTML 5 API's. It is supported only by modern web browsers like Firefox
* 3.6 and above and recent webkit based browsers (Safari 5, Chrome 6) at
* this time.
*
* @param streamVariable
* the callback that returns stream where the implementation
* writes the file contents as it arrives.
*/
public void setStreamVariable(StreamVariable streamVariable) {
this.streamVariable = streamVariable;
} | 3.68 |
framework_DropTargetExtensionConnector_setDropEffect | /**
* Set the drop effect for the dragenter / dragover event, if one has been
* set from server side.
* <p>
* From Moz Foundation: "You can modify the dropEffect property during the
* dragenter or dragover events, if for example, a particular drop target
* only supports certain operations. You can modify the dropEffect property
* to override the user effect, and enforce a specific drop operation to
* occur. Note that this effect must be one listed within the effectAllowed
* property. Otherwise, it will be set to an alternate value that is
* allowed."
*
* @param event
* the dragenter or dragover event.
*/
private void setDropEffect(NativeEvent event) {
if (getState().dropEffect != null) {
DataTransfer.DropEffect dropEffect = DataTransfer.DropEffect
// the valueOf() needs to have equal string and name()
// doesn't return in all upper case
.valueOf(getState().dropEffect.name()
.toUpperCase(Locale.ROOT));
event.getDataTransfer().setDropEffect(dropEffect);
}
} | 3.68 |
flink_TumbleWithSize_on | /**
* Specifies the time attribute on which rows are grouped.
*
* <p>For streaming tables you can specify grouping by a event-time or processing-time
* attribute.
*
* <p>For batch tables you can specify grouping on a timestamp or long attribute.
*
* @param timeField time attribute for streaming and batch tables
* @return a tumbling window on event-time
*/
public TumbleWithSizeOnTime on(Expression timeField) {
return new TumbleWithSizeOnTime(timeField, size);
} | 3.68 |
hbase_HFileReaderImpl_positionForNextBlock | /**
* Set our selves up for the next 'next' invocation, set up next block.
* @return True is more to read else false if at the end.
*/
private boolean positionForNextBlock() throws IOException {
// Methods are small so they get inlined because they are 'hot'.
long lastDataBlockOffset = reader.getTrailer().getLastDataBlockOffset();
if (this.curBlock.getOffset() >= lastDataBlockOffset) {
setNonSeekedState();
return false;
}
return isNextBlock();
} | 3.68 |
hudi_SparkRDDReadClient_addHoodieSupport | /**
* Adds support for accessing Hoodie built tables from SparkSQL, as you normally would.
*
* @return SparkConf object to be used to construct the SparkContext by caller
*/
public static SparkConf addHoodieSupport(SparkConf conf) {
conf.set("spark.sql.hive.convertMetastoreParquet", "false");
return conf;
} | 3.68 |
flink_IterativeStream_closeWith | /**
* Closes the iteration. This method defines the end of the iterative program part that will
* be fed back to the start of the iteration as the second input in the {@link
* ConnectedStreams}.
*
* @param feedbackStream {@link DataStream} that will be used as second input to the
* iteration head.
* @return The feedback stream.
*/
public DataStream<F> closeWith(DataStream<F> feedbackStream) {
Collection<Transformation<?>> predecessors =
feedbackStream.getTransformation().getTransitivePredecessors();
if (!predecessors.contains(this.coFeedbackTransformation)) {
throw new UnsupportedOperationException(
"Cannot close an iteration with a feedback DataStream that does not originate from said iteration.");
}
coFeedbackTransformation.addFeedbackEdge(feedbackStream.getTransformation());
return feedbackStream;
} | 3.68 |
flink_HiveParserSemanticAnalyzer_processLateralView | /**
* Given the AST with TOK_LATERAL_VIEW as the root, get the alias for the table or subquery in
* the lateral view and also make a mapping from the alias to all the lateral view AST's.
*/
private String processLateralView(HiveParserQB qb, HiveParserASTNode lateralView)
throws SemanticException {
int numChildren = lateralView.getChildCount();
assert (numChildren == 2);
HiveParserASTNode next = (HiveParserASTNode) lateralView.getChild(1);
String alias;
switch (next.getToken().getType()) {
case HiveASTParser.TOK_TABREF:
alias = processTable(qb, next);
break;
case HiveASTParser.TOK_SUBQUERY:
alias = processSubQuery(qb, next);
break;
case HiveASTParser.TOK_LATERAL_VIEW:
case HiveASTParser.TOK_LATERAL_VIEW_OUTER:
alias = processLateralView(qb, next);
break;
default:
throw new SemanticException(
HiveParserErrorMsg.getMsg(
ErrorMsg.LATERAL_VIEW_INVALID_CHILD, lateralView));
}
alias = alias.toLowerCase();
qb.getParseInfo().addLateralViewForAlias(alias, lateralView);
qb.addAlias(alias);
return alias;
} | 3.68 |
hbase_HFileArchiveUtil_getTableName | /*
* @return table name given archive file path
*/
public static TableName getTableName(Path archivePath) {
Path p = archivePath;
String tbl = null;
// namespace is the 4th parent of file
for (int i = 0; i < 5; i++) {
if (p == null) return null;
if (i == 3) tbl = p.getName();
p = p.getParent();
}
if (p == null) return null;
return TableName.valueOf(p.getName(), tbl);
} | 3.68 |
framework_VUI_windowSizeMaybeChanged | /**
* Called when the window or parent div might have been resized.
*
* This immediately checks the sizes of the window and the parent div (if
* monitoring it) and triggers layout recalculation if they have changed.
*
* @param newWindowWidth
* The new width of the window
* @param newWindowHeight
* The new height of the window
*
* @deprecated use {@link #performSizeCheck()}
*/
@Deprecated
protected void windowSizeMaybeChanged(int newWindowWidth,
int newWindowHeight) {
if (connection == null) {
// Connection is null if the timer fires before the first UIDL
// update
return;
}
boolean changed = false;
ComponentConnector connector = ConnectorMap.get(connection)
.getConnector(this);
if (windowWidth != newWindowWidth) {
windowWidth = newWindowWidth;
changed = true;
connector.getLayoutManager().reportOuterWidth(connector,
newWindowWidth);
getLogger().info("New window width: " + windowWidth);
}
if (windowHeight != newWindowHeight) {
windowHeight = newWindowHeight;
changed = true;
connector.getLayoutManager().reportOuterHeight(connector,
newWindowHeight);
getLogger().info("New window height: " + windowHeight);
}
Element parentElement = getElement().getParentElement();
if (isMonitoringParentSize() && parentElement != null) {
// check also for parent size changes
int newParentWidth = parentElement.getClientWidth();
int newParentHeight = parentElement.getClientHeight();
if (parentWidth != newParentWidth) {
parentWidth = newParentWidth;
changed = true;
getLogger().info("New parent width: " + parentWidth);
}
if (parentHeight != newParentHeight) {
parentHeight = newParentHeight;
changed = true;
getLogger().info("New parent height: " + parentHeight);
}
}
if (changed) {
/*
* If the window size has changed, layout the VView again and send
* new size to the server if the size changed. (Just checking VView
* size would cause us to ignore cases when a relatively sized VView
* should shrink as the content's size is fixed and would thus not
* automatically shrink.)
*/
getLogger().info(
"Running layout functions due to window or parent resize");
// update size to avoid (most) redundant re-layout passes
// there can still be an extra layout recalculation if webkit
// overflow fix updates the size in a deferred block
if (isMonitoringParentSize() && parentElement != null) {
parentWidth = parentElement.getClientWidth();
parentHeight = parentElement.getClientHeight();
}
sendClientResized();
LayoutManager layoutManager = connector.getLayoutManager();
if (layoutManager.isLayoutRunning()) {
layoutManager.layoutLater();
} else {
layoutManager.layoutNow();
}
}
} | 3.68 |
querydsl_MapExpressionBase_containsKey | /**
* Create a {@code key in keys(this)} expression
*
* @param key key
* @return expression
*/
public final BooleanExpression containsKey(K key) {
return Expressions.booleanOperation(Ops.CONTAINS_KEY, mixin, ConstantImpl.create(key));
} | 3.68 |
pulsar_PerfClientUtils_printJVMInformation | /**
* Print useful JVM information, you need this information in order to be able
* to compare the results of executions in different environments.
* @param log
*/
public static void printJVMInformation(Logger log) {
log.info("JVM args {}", ManagementFactory.getRuntimeMXBean().getInputArguments());
log.info("Netty max memory (PlatformDependent.maxDirectMemory()) {}",
FileUtils.byteCountToDisplaySize(DirectMemoryUtils.jvmMaxDirectMemory()));
log.info("JVM max heap memory (Runtime.getRuntime().maxMemory()) {}",
FileUtils.byteCountToDisplaySize(Runtime.getRuntime().maxMemory()));
} | 3.68 |
morf_TableLoader_truncate | /**
* Empties the specified table.
*
* @param table
*/
private void truncate(Table table) {
// Get our own table definition based on the name to avoid case sensitivity bugs on some database vendors
log.debug("Clearing table [" + table.getName() + "]");
// Try to use a truncate
try {
runRecoverably(() ->
sqlExecutor.execute(sqlDialect.truncateTableStatements(table), connection)
);
} catch (SQLException | RuntimeSqlException e) {
// If that has failed try to use a delete
log.debug("Failed to truncate table, attempting a delete", e);
sqlExecutor.execute(sqlDialect.deleteAllFromTableStatements(table), connection);
}
} | 3.68 |
zxing_PDF417Common_getBitCountSum | /**
* @param moduleBitCount values to sum
* @return sum of values
* @deprecated call {@link MathUtils#sum(int[])}
*/
@Deprecated
public static int getBitCountSum(int[] moduleBitCount) {
return MathUtils.sum(moduleBitCount);
} | 3.68 |
hbase_RegionServerSpaceQuotaManager_getActiveEnforcements | /**
* Creates an object well-suited for the RegionServer to use in verifying active policies.
*/
public ActivePolicyEnforcement getActiveEnforcements() {
return new ActivePolicyEnforcement(copyActiveEnforcements(), copyQuotaSnapshots(), rsServices);
} | 3.68 |
hadoop_StageConfig_withTaskAttemptDir | /**
* Set the Task attempt directory.
* @param value new value
* @return this
*/
public StageConfig withTaskAttemptDir(final Path value) {
checkOpen();
taskAttemptDir = value;
return this;
} | 3.68 |
flink_HardwareDescription_getSizeOfManagedMemory | /**
* Returns the size of the memory managed by the system for caching, hashing, sorting, ...
*
* @return The size of the memory managed by the system.
*/
public long getSizeOfManagedMemory() {
return this.sizeOfManagedMemory;
} | 3.68 |
framework_PropertysetItem_getItemPropertyIds | /**
* Gets the collection of IDs of all Properties stored in the Item.
*
* @return unmodifiable collection containing IDs of the Properties stored
* the Item
*/
@Override
public Collection<?> getItemPropertyIds() {
return Collections.unmodifiableCollection(list);
} | 3.68 |
hudi_AbstractStreamWriteFunction_instantToWrite | /**
* Prepares the instant time to write with for next checkpoint.
*
* @param hasData Whether the task has buffering data
* @return The instant time
*/
protected String instantToWrite(boolean hasData) {
String instant = lastPendingInstant();
// if exactly-once semantics turns on,
// waits for the checkpoint notification until the checkpoint timeout threshold hits.
TimeWait timeWait = TimeWait.builder()
.timeout(config.getLong(FlinkOptions.WRITE_COMMIT_ACK_TIMEOUT))
.action("instant initialize")
.build();
while (confirming) {
// wait condition:
// 1. there is no inflight instant
// 2. the inflight instant does not change and the checkpoint has buffering data
if (instant == null || invalidInstant(instant, hasData)) {
// sleep for a while
timeWait.waitFor();
// refresh the inflight instant
instant = lastPendingInstant();
} else {
// the pending instant changed, that means the last instant was committed
// successfully.
confirming = false;
}
}
return instant;
} | 3.68 |
flink_GSCommitRecoverableSerializer_getVersion | /**
* The serializer version. Note that, if this changes, then the version of {@link
* GSResumeRecoverableSerializer} must also change, because it uses this class to serialize
* itself, in part.
*
* @return The serializer version.
*/
@Override
public int getVersion() {
return SERIALIZER_VERSION;
} | 3.68 |
hbase_MetricsSource_setWALReaderEditsBufferUsage | /**
* Sets the amount of memory in bytes used in this RegionServer by edits pending replication.
*/
public void setWALReaderEditsBufferUsage(long usageInBytes) {
globalSourceSource.setWALReaderEditsBufferBytes(usageInBytes);
} | 3.68 |
hadoop_SinglePendingCommit_getUploadId | /** @return ID of the upload. */
public String getUploadId() {
return uploadId;
} | 3.68 |
hudi_HoodieDataBlock_getRecordIterator | /**
* Batch get of keys of interest. Implementation can choose to either do full scan and return matched entries or
* do a seek based parsing and return matched entries.
*
* @param keys keys of interest.
* @return List of IndexedRecords for the keys of interest.
* @throws IOException in case of failures encountered when reading/parsing records
*/
public final <T> ClosableIterator<HoodieRecord<T>> getRecordIterator(List<String> keys, boolean fullKey, HoodieRecordType type) throws IOException {
boolean fullScan = keys.isEmpty();
if (enablePointLookups && !fullScan) {
return lookupRecords(keys, fullKey);
}
// Otherwise, we fetch all the records and filter out all the records, but the
// ones requested
ClosableIterator<HoodieRecord<T>> allRecords = getRecordIterator(type);
if (fullScan) {
return allRecords;
}
HashSet<String> keySet = new HashSet<>(keys);
return FilteringIterator.getInstance(allRecords, keySet, fullKey, this::getRecordKey);
} | 3.68 |
morf_IdTableTracker_activeNameFor | /**
* @see org.alfasoftware.morf.upgrade.TableNameResolver#activeNameFor(java.lang.String)
*/
@Override
public String activeNameFor(String tableName) {
if ("IDTABLE".equals(tableName))
return idTableName;
return tableName;
} | 3.68 |
rocketmq-connect_AbstractStateManagementService_put | /**
* Set the state of the connector to the given value.
*
* @param status the status of the task
*/
@Override
public void put(TaskStatus status) {
sendTaskStatus(status, false);
} | 3.68 |
flink_HadoopBlockLocation_stripHostname | /**
* Looks for a domain suffix in a FQDN and strips it if present.
*
* @param originalHostname the original hostname, possibly an FQDN
* @return the stripped hostname without the domain suffix
*/
private static String stripHostname(final String originalHostname) {
// Check if the hostname domains the domain separator character
final int index = originalHostname.indexOf(DOMAIN_SEPARATOR);
if (index == -1) {
return originalHostname;
}
// Make sure we are not stripping an IPv4 address
final Matcher matcher = IPV4_PATTERN.matcher(originalHostname);
if (matcher.matches()) {
return originalHostname;
}
if (index == 0) {
throw new IllegalStateException(
"Hostname " + originalHostname + " starts with a " + DOMAIN_SEPARATOR);
}
return originalHostname.substring(0, index);
} | 3.68 |
querydsl_GenericExporter_setEntityAnnotation | /**
* Set the entity annotation
*
* @param entityAnnotation entity annotation
*/
public void setEntityAnnotation(Class<? extends Annotation> entityAnnotation) {
this.entityAnnotation = entityAnnotation;
} | 3.68 |
pulsar_AdditionalServlets_load | /**
* Load the additional servlet for the given <tt>servlet name</tt> list.
*
* @param conf the pulsar service configuration
* @return the collection of additional servlet
*/
public static AdditionalServlets load(PulsarConfiguration conf) throws IOException {
String additionalServletDirectory = conf.getProperties().getProperty(ADDITIONAL_SERVLET_DIRECTORY);
if (additionalServletDirectory == null) {
// Compatible with the current proxy configuration
additionalServletDirectory = conf.getProperties().getProperty(PROXY_ADDITIONAL_SERVLET_DIRECTORY);
}
String additionalServlets = conf.getProperties().getProperty(ADDITIONAL_SERVLETS);
if (additionalServlets == null) {
additionalServlets = conf.getProperties().getProperty(PROXY_ADDITIONAL_SERVLETS);
}
String narExtractionDirectory = conf.getProperties().getProperty(NAR_EXTRACTION_DIRECTORY);
if (StringUtils.isBlank(narExtractionDirectory)) {
narExtractionDirectory = NarClassLoader.DEFAULT_NAR_EXTRACTION_DIR;
}
if (additionalServletDirectory == null || additionalServlets == null) {
return null;
}
AdditionalServletDefinitions definitions =
AdditionalServletUtils.searchForServlets(additionalServletDirectory
, narExtractionDirectory);
ImmutableMap.Builder<String, AdditionalServletWithClassLoader> builder = ImmutableMap.builder();
String[] additionalServletsList = additionalServlets.split(",");
for (String servletName : additionalServletsList) {
AdditionalServletMetadata definition = definitions.servlets().get(servletName);
if (null == definition) {
throw new RuntimeException("No additional servlet is found for name `" + servletName
+ "`. Available additional servlet are : " + definitions.servlets());
}
AdditionalServletWithClassLoader servletWithClassLoader;
try {
servletWithClassLoader = AdditionalServletUtils.load(definition, narExtractionDirectory);
if (servletWithClassLoader != null) {
builder.put(servletName, servletWithClassLoader);
}
log.info("Successfully loaded additional servlet for name `{}`", servletName);
} catch (IOException e) {
log.error("Failed to load the additional servlet for name `" + servletName + "`", e);
throw new RuntimeException("Failed to load the additional servlet for name `" + servletName + "`");
}
}
Map<String, AdditionalServletWithClassLoader> servlets = builder.build();
if (servlets != null && !servlets.isEmpty()) {
return new AdditionalServlets(servlets);
}
return null;
} | 3.68 |
hbase_KeyValue_getKeyDataStructureSize | /**
* Computes the number of bytes that a <code>KeyValue</code> instance with the provided
* characteristics would take up in its underlying data structure for the key.
* @param rlength row length
* @param flength family length
* @param qlength qualifier length
* @return the key data structure length
*/
public static long getKeyDataStructureSize(int rlength, int flength, int qlength) {
return (long) KeyValue.KEY_INFRASTRUCTURE_SIZE + rlength + flength + qlength;
} | 3.68 |
querydsl_SQLExpressions_addMonths | /**
* Add the given amount of months to the date
*
* @param date date
* @param months months to add
* @return converted date
*/
public static <D extends Comparable> DateExpression<D> addMonths(DateExpression<D> date, int months) {
return Expressions.dateOperation(date.getType(), Ops.DateTimeOps.ADD_MONTHS, date, ConstantImpl.create(months));
} | 3.68 |
hbase_ServerRegionReplicaUtil_isRegionReplicaWaitForPrimaryFlushEnabled | /** Returns True if wait for primary to flush is enabled for user-space tables. */
public static boolean isRegionReplicaWaitForPrimaryFlushEnabled(Configuration conf) {
return conf.getBoolean(REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY,
DEFAULT_REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH);
} | 3.68 |
hadoop_ExecutorServiceFuturePool_executeFunction | /**
* @param f function to run in future on executor pool
* @return future
* @throws java.util.concurrent.RejectedExecutionException can be thrown
* @throws NullPointerException if f param is null
*/
public Future<Void> executeFunction(final Supplier<Void> f) {
return executor.submit(f::get);
} | 3.68 |
flink_FreeingBufferRecycler_recycle | /**
* Frees the given memory segment.
*
* @param memorySegment The memory segment to be recycled.
*/
@Override
public void recycle(MemorySegment memorySegment) {
memorySegment.free();
} | 3.68 |
hadoop_IdentifierResolver_setInputWriterClass | /**
* Sets the {@link InputWriter} class.
*/
protected void setInputWriterClass(Class<? extends InputWriter>
inputWriterClass) {
this.inputWriterClass = inputWriterClass;
} | 3.68 |
morf_SqlDialect_getSqlForRowNumber | /**
* Produce SQL for getting the row number of the row in the partition
*
* @return a string representation of the SQL for finding the last day of the month.
*/
protected String getSqlForRowNumber(){
return "ROW_NUMBER()";
} | 3.68 |
hadoop_BlockStorageMovementNeeded_size | /**
* Returns queue size.
*/
public synchronized int size() {
return storageMovementNeeded.size();
} | 3.68 |
framework_RpcDataProviderExtension_beforeClientResponse | /**
* {@inheritDoc}
* <p>
* RpcDataProviderExtension makes all actual RPC calls from this function
* based on changes in the container.
*/
@Override
public void beforeClientResponse(boolean initial) {
if (initial || bareItemSetTriggeredSizeChange) {
/*
* Push initial set of rows, assuming Grid will initially be
* rendered scrolled to the top and with a decent amount of rows
* visible. If this guess is right, initial data can be shown
* without a round-trip and if it's wrong, the data will simply be
* discarded.
*/
int size = container.size();
rpc.resetDataAndSize(size);
int numberOfRows = Math.min(40, size);
pushRowData(0, numberOfRows, 0, 0);
} else {
// Only do row changes if not initial response.
if (rowChanges != null) {
for (Runnable r : rowChanges) {
r.run();
}
}
// Send current rows again if needed.
if (refreshCache) {
for (Object itemId : activeItemHandler.getActiveItemIds()) {
updateRowData(itemId);
}
}
}
internalUpdateRows(updatedItemIds);
// Clear all changes.
if (rowChanges != null) {
rowChanges.clear();
}
if (updatedItemIds != null) {
updatedItemIds.clear();
}
refreshCache = false;
bareItemSetTriggeredSizeChange = false;
super.beforeClientResponse(initial);
} | 3.68 |
morf_SqlDialect_getDeleteLimitSuffix | /**
* Returns the SQL that specifies the deletion limit as a suffix, if any, for the dialect.
*
* @param limit The delete limit.
* @return The SQL fragment.
*/
protected Optional<String> getDeleteLimitSuffix(@SuppressWarnings("unused") int limit) {
return Optional.empty();
} | 3.68 |
streampipes_UnicodeTokenizer_tokenize | /**
* Tokenizes the text and returns an array of tokens.
*
* @param text The text
* @return The tokens
*/
public static String[] tokenize(final CharSequence text) {
return PAT_NOT_WORD_BOUNDARY.matcher(PAT_WORD_BOUNDARY.matcher(text).replaceAll("\u2063"))
.replaceAll("$1").replaceAll("[ \u2063]+", " ").trim().split("[ ]+");
} | 3.68 |
framework_Calendar_setTimeZone | /**
* Set time zone that this component will use. Null value sets the default
* time zone.
*
* @param zone
* Time zone to use
*/
public void setTimeZone(TimeZone zone) {
timezone = zone;
if (!currentCalendar.getTimeZone().equals(zone)) {
if (zone == null) {
zone = TimeZone.getDefault();
}
currentCalendar.setTimeZone(zone);
df_date_time.setTimeZone(zone);
markAsDirty();
}
} | 3.68 |
hbase_MutableRegionInfo_getRegionId | /** Returns the regionId */
@Override
public long getRegionId() {
return regionId;
} | 3.68 |
hadoop_CopyOutputFormat_getOutputCommitter | /** {@inheritDoc} */
@Override
public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException {
return new CopyCommitter(getOutputPath(context), context);
} | 3.68 |
morf_ExceptSetOperator_accept | /**
* @see org.alfasoftware.morf.sql.SchemaAndDataChangeVisitable#accept(org.alfasoftware.morf.upgrade.SchemaAndDataChangeVisitor)
*/
@Override
public void accept(SchemaAndDataChangeVisitor visitor) {
visitor.visit(this);
selectStatement.accept(visitor);
} | 3.68 |
hudi_HoodieAvroIndexedRecord_readRecordPayload | /**
* NOTE: This method is declared final to make sure there's no polymorphism and therefore
* JIT compiler could perform more aggressive optimizations
*/
@SuppressWarnings("unchecked")
@Override
protected final IndexedRecord readRecordPayload(Kryo kryo, Input input) {
// NOTE: We're leveraging Spark's default [[GenericAvroSerializer]] to serialize Avro
Serializer<GenericRecord> avroSerializer = kryo.getSerializer(GenericRecord.class);
return kryo.readObjectOrNull(input, GenericRecord.class, avroSerializer);
} | 3.68 |