name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_VTooltip_getFinalTouchY | /**
* Return the final Y-coordinate of the tooltip based on cursor
* position, size of the tooltip, size of the page and necessary
* margins.
*
* @param offsetHeight
* @return The final y-coordinate
*
*/
private int getFinalTouchY(int offsetHeight) {
int y = 0;
int heightNeeded = 10 + offsetHeight;
int roomAbove = currentElement != null
? currentElement.getAbsoluteTop()
+ currentElement.getOffsetHeight()
: EVENT_XY_POSITION_OUTSIDE;
int roomBelow = Window.getClientHeight() - roomAbove;
if (roomBelow > heightNeeded) {
y = roomAbove;
} else {
y = roomAbove - offsetHeight
- (currentElement != null
? currentElement.getOffsetHeight()
: 0);
}
if (y + offsetHeight - Window.getScrollTop() > Window
.getClientHeight()) {
y = roomAbove - 5 - offsetHeight
+ Window.getScrollTop();
if (y - Window.getScrollTop() < 0) {
// tooltip does not fit on top of the mouse either,
// put it at the top of the screen
y = Window.getScrollTop();
}
}
if (roomAbove != EVENT_XY_POSITION_OUTSIDE) {
// Do not allow y to be zero, for otherwise the tooltip
// does not close when the mouse is moved (see
// isTooltipOpen()). #15129
int minY = Window.getScrollTop();
y = Math.max(y, minY);
}
return y;
} | 3.68 |
hadoop_InnerJoinRecordReader_combine | /**
* Return true iff the tuple is full (all data sources contain this key).
*/
protected boolean combine(Object[] srcs, TupleWritable dst) {
assert srcs.length == dst.size();
for (int i = 0; i < srcs.length; ++i) {
if (!dst.has(i)) {
return false;
}
}
return true;
} | 3.68 |
flink_FloatHashSet_contains | /** See {@link Float#equals(Object)}. */
public boolean contains(final float k) {
int intKey = Float.floatToIntBits(k);
if (intKey == 0) {
return this.containsZero;
} else {
float[] key = this.key;
int curr;
int pos;
if ((curr = Float.floatToIntBits(key[pos = MurmurHashUtil.fmix(intKey) & this.mask]))
== 0) {
return false;
} else if (intKey == curr) {
return true;
} else {
while ((curr = Float.floatToIntBits(key[pos = pos + 1 & this.mask])) != 0) {
if (intKey == curr) {
return true;
}
}
return false;
}
}
} | 3.68 |
zxing_BitMatrix_flip | /**
* <p>Flips every bit in the matrix.</p>
*/
public void flip() {
int max = bits.length;
for (int i = 0; i < max; i++) {
bits[i] = ~bits[i];
}
} | 3.68 |
flink_Configuration_getLong | /**
* Returns the value associated with the given config option as a long integer. If no value is
* mapped under any key of the option, it returns the specified default instead of the option's
* default value.
*
* @param configOption The configuration option
* @param overrideDefault The value to return if no value was mapper for any key of the option
* @return the configured value associated with the given config option, or the overrideDefault
*/
@PublicEvolving
public long getLong(ConfigOption<Long> configOption, long overrideDefault) {
return getOptional(configOption).orElse(overrideDefault);
} | 3.68 |
framework_MenuBar_addItem | /**
* Adds a menu item to the bar, that will open the specified menu when it is
* selected.
*
* @param text
* the item's text
* @param popup
* the menu to be cascaded from it
* @return the {@link MenuItem} object created
*/
public MenuItem addItem(String text, MenuBar popup) {
final MenuItem item = new MenuItem(text, popup);
addItem(item);
return item;
} | 3.68 |
hadoop_MapTaskImpl_getSplitsAsString | /**
* @return a String formatted as a comma-separated list of splits.
*/
@Override
protected String getSplitsAsString() {
String[] splits = getTaskSplitMetaInfo().getLocations();
if (splits == null || splits.length == 0)
return "";
StringBuilder sb = new StringBuilder();
for (int i = 0; i < splits.length; i++) {
if (i != 0) sb.append(",");
sb.append(splits[i]);
}
return sb.toString();
} | 3.68 |
flink_BufferConsumer_copy | /**
* Returns a retained copy with separate indexes. This allows to read from the same {@link
* MemorySegment} twice.
*
* <p>WARNING: the newly returned {@link BufferConsumer} will have its reader index copied from
* the original buffer. In other words, data already consumed before copying will not be visible
* to the returned copies.
*
* @return a retained copy of self with separate indexes
*/
public BufferConsumer copy() {
return new BufferConsumer(
buffer.retainBuffer(), writerPosition.positionMarker, currentReaderPosition);
} | 3.68 |
hbase_TableDescriptorBuilder_setNormalizationEnabled | /**
* Setting the table normalization enable flag.
* @param isEnable True if enable normalization.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) {
return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable));
} | 3.68 |
hudi_HoodieMergedLogRecordScanner_scan | /**
* Scans delta-log files processing blocks
*/
public final void scan() {
scan(false);
} | 3.68 |
flink_SessionManager_create | /** Create the {@link SessionManager} with the default configuration. */
static SessionManager create(DefaultContext defaultContext) {
return new SessionManagerImpl(defaultContext);
} | 3.68 |
hbase_ServerCommandLine_logJVMInfo | /**
* Log information about the currently running JVM.
*/
public static void logJVMInfo() {
// Print out vm stats before starting up.
RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
if (runtime != null) {
LOG.info("vmName=" + runtime.getVmName() + ", vmVendor=" + runtime.getVmVendor()
+ ", vmVersion=" + runtime.getVmVersion());
LOG.info("vmInputArguments=" + runtime.getInputArguments());
}
} | 3.68 |
flink_Task_failExternally | /**
* Marks task execution failed for an external reason (a reason other than the task code itself
* throwing an exception). If the task is already in a terminal state (such as FINISHED,
* CANCELED, FAILED), or if the task is already canceling this does nothing. Otherwise it sets
* the state to FAILED, and, if the invokable code is running, starts an asynchronous thread
* that aborts that code.
*
* <p>This method never blocks.
*/
@Override
public void failExternally(Throwable cause) {
LOG.info("Attempting to fail task externally {} ({}).", taskNameWithSubtask, executionId);
cancelOrFailAndCancelInvokable(ExecutionState.FAILED, cause);
} | 3.68 |
hbase_MasterObserver_postDeleteTable | /**
* Called after the deleteTable operation has been requested. Called as part of delete table RPC
* call.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
*/
default void postDeleteTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
} | 3.68 |
hbase_ReflectionUtils_getOneArgStaticMethodAsFunction | /**
* Creates a Function which can be called to performantly execute a reflected static method. The
* creation of the Function itself may not be fast, but executing that method thereafter should be
* much faster than {@link #invokeMethod(Object, String, Object...)}.
* @param lookupClazz the class to find the static method in
* @param methodName the method name
* @param argumentClazz the type of the argument
* @param returnValueClass the type of the return value
* @return a function which when called executes the requested static method.
* @throws Throwable exception types from the underlying reflection
*/
public static <I, R> Function<I, R> getOneArgStaticMethodAsFunction(Class<?> lookupClazz,
String methodName, Class<I> argumentClazz, Class<R> returnValueClass) throws Throwable {
MethodHandles.Lookup lookup = MethodHandles.lookup();
MethodHandle methodHandle = lookup.findStatic(lookupClazz, methodName,
MethodType.methodType(returnValueClass, argumentClazz));
CallSite site =
LambdaMetafactory.metafactory(lookup, "apply", MethodType.methodType(Function.class),
methodHandle.type().generic(), methodHandle, methodHandle.type());
return (Function<I, R>) site.getTarget().invokeExact();
} | 3.68 |
framework_Overlay_setAnimationFromCenterProgress | /**
* Offset the set values from center by given progress to create the
* state of a single animation frame. Each frame needs to be initialized
* from the beginning, since calling this method for a second time
* without resetting the size and position values would lead to
* incorrect end results.
*
* @param progress
* A value between 0.0 and 1.0, indicating the progress of
* the animation (0=start, 1=end).
*/
public void setAnimationFromCenterProgress(double progress) {
left += (int) (width * (1.0 - progress) / 2.0);
top += (int) (height * (1.0 - progress) / 2.0);
width = (int) (width * progress);
height = (int) (height * progress);
} | 3.68 |
hbase_RpcHandler_getCallRunner | /** Returns A {@link CallRunner} n */
protected CallRunner getCallRunner() throws InterruptedException {
return this.q.take();
} | 3.68 |
hbase_TableName_isMetaTableName | /** Returns True if <code>tn</code> is the hbase:meta table name. */
public static boolean isMetaTableName(final TableName tn) {
return tn.equals(TableName.META_TABLE_NAME);
} | 3.68 |
framework_DataProvider_withConvertedFilter | /**
* Wraps this data provider to create a data provider that uses a different
* filter type. This can be used for adapting this data provider to a filter
* type provided by a Component such as ComboBox.
* <p>
* For example receiving a String from ComboBox and making a Predicate based
* on it:
*
* <pre>
* DataProvider<Person, Predicate<Person>> dataProvider;
* // ComboBox uses String as the filter type
* DataProvider<Person, String> wrappedProvider = dataProvider
* .withConvertedFilter(filterText -> {
* SerializablePredicate<Person> predicate = person -> person
* .getName().startsWith(filterText);
* return predicate;
* });
* comboBox.setDataProvider(wrappedProvider);
* </pre>
*
* @param filterConverter
* callback that converts the filter in the query of the wrapped
* data provider into a filter supported by this data provider.
* Will only be called if the query contains a filter. Not
* <code>null</code>
*
* @param <C>
* the filter type that the wrapped data provider accepts;
* typically provided by a Component
*
* @return wrapped data provider, not <code>null</code>
*/
public default <C> DataProvider<T, C> withConvertedFilter(
SerializableFunction<C, F> filterConverter) {
Objects.requireNonNull(filterConverter,
"Filter converter can't be null");
return new DataProviderWrapper<T, C, F>(this) {
@Override
protected F getFilter(Query<T, C> query) {
return query.getFilter().map(filterConverter).orElse(null);
}
};
} | 3.68 |
hadoop_ManifestCommitter_commitJob | /**
* This is the big job commit stage.
* Load the manifests, prepare the destination, rename
* the files then cleanup the job directory.
* @param jobContext Context of the job whose output is being written.
* @throws IOException failure.
*/
@Override
public void commitJob(final JobContext jobContext) throws IOException {
ManifestCommitterConfig committerConfig = enterCommitter(false, jobContext);
// create the initial success data.
// this is overwritten by that created during the operation sequence,
// but if the sequence fails before that happens, it
// will be saved to the report directory.
ManifestSuccessData marker = getOrCreateSuccessData(committerConfig);
IOException failure = null;
try (CloseableTaskPoolSubmitter ioProcs =
committerConfig.createSubmitter();
ManifestStoreOperations storeOperations = createManifestStoreOperations()) {
// the stage config will be shared across all stages.
StageConfig stageConfig = committerConfig.createStageConfig()
.withOperations(storeOperations)
.withIOProcessors(ioProcs)
.build();
// commit the job, including any cleanup and validation.
final Configuration conf = jobContext.getConfiguration();
CommitJobStage.Result result = new CommitJobStage(stageConfig).apply(
new CommitJobStage.Arguments(
committerConfig.getCreateJobMarker(),
committerConfig.getValidateOutput(),
conf.getTrimmed(OPT_DIAGNOSTICS_MANIFEST_DIR, ""),
cleanupStageOptionsFromConfig(
OP_STAGE_JOB_CLEANUP, conf)
));
marker = result.getJobSuccessData();
// update the cached success with the new report.
setSuccessReport(marker);
// patch in the #of threads as it is useful
marker.putDiagnostic(OPT_IO_PROCESSORS,
conf.get(OPT_IO_PROCESSORS, Long.toString(OPT_IO_PROCESSORS_DEFAULT)));
} catch (IOException e) {
// failure. record it for the summary
failure = e;
// rethrow
throw e;
} finally {
// save the report summary, even on failure
maybeSaveSummary(activeStage,
committerConfig,
marker,
failure,
true,
true);
// print job commit stats
LOG.info("{}: Job Commit statistics {}",
committerConfig.getName(),
ioStatisticsToPrettyString(iostatistics));
// and warn of rename problems
final Long recoveries = iostatistics.counters().get(OP_COMMIT_FILE_RENAME_RECOVERED);
if (recoveries != null && recoveries > 0) {
LOG.warn("{}: rename failures were recovered from. Number of recoveries: {}",
committerConfig.getName(), recoveries);
}
updateCommonContextOnCommitterExit();
}
} | 3.68 |
hudi_MarkerUtils_stripMarkerFolderPrefix | /**
* Strips the marker folder prefix of any file path under the marker directory.
*
* @param fullMarkerPath the full path of the file
* @param markerDir marker directory
* @return file name
*/
public static String stripMarkerFolderPrefix(String fullMarkerPath, String markerDir) {
int begin = fullMarkerPath.indexOf(markerDir);
ValidationUtils.checkArgument(begin >= 0,
"Not in marker dir. Marker Path=" + fullMarkerPath + ", Expected Marker Root=" + markerDir);
return fullMarkerPath.substring(begin + markerDir.length() + 1);
} | 3.68 |
hadoop_CachingGetSpaceUsed_running | /**
* Is the background thread running.
*/
boolean running() {
return running.get();
} | 3.68 |
hbase_HFileArchiveTableMonitor_setArchiveTables | /**
* Set the tables to be archived. Internally adds each table and attempts to register it.
* <p>
* <b>Note: All previous tables will be removed in favor of these tables.</b>
* @param tables add each of the tables to be archived.
*/
public synchronized void setArchiveTables(List<String> tables) {
archivedTables.clear();
archivedTables.addAll(tables);
} | 3.68 |
hudi_HoodieRecordGlobalLocation_toLocal | /**
* Returns the record location as local.
*/
public HoodieRecordLocation toLocal(String instantTime) {
return new HoodieRecordLocation(instantTime, fileId, position);
} | 3.68 |
framework_AbsoluteLayout_setLeft | /**
* Sets the 'left' attribute; distance from the left of the component to
* the left edge of the layout.
*
* @param leftValue
* The value of the 'left' attribute
* @param leftUnits
* The unit of the 'left' attribute. See UNIT_SYMBOLS for a
* description of the available units.
*/
public void setLeft(Float leftValue, Unit leftUnits) {
this.leftValue = leftValue;
this.leftUnits = leftUnits;
markAsDirty();
} | 3.68 |
hudi_UtilHelpers_createHoodieClient | /**
* Build Hoodie write client.
*
* @param jsc Java Spark Context
* @param basePath Base Path
* @param schemaStr Schema
* @param parallelism Parallelism
*/
public static SparkRDDWriteClient<HoodieRecordPayload> createHoodieClient(JavaSparkContext jsc, String basePath, String schemaStr,
int parallelism, Option<String> compactionStrategyClass, TypedProperties properties) {
HoodieCompactionConfig compactionConfig = compactionStrategyClass
.map(strategy -> HoodieCompactionConfig.newBuilder().withInlineCompaction(false)
.withCompactionStrategy(ReflectionUtils.loadClass(strategy)).build())
.orElse(HoodieCompactionConfig.newBuilder().withInlineCompaction(false).build());
HoodieWriteConfig config =
HoodieWriteConfig.newBuilder().withPath(basePath)
.withParallelism(parallelism, parallelism)
.withBulkInsertParallelism(parallelism)
.withDeleteParallelism(parallelism)
.withSchema(schemaStr).combineInput(true, true).withCompactionConfig(compactionConfig)
.withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(HoodieIndex.IndexType.BLOOM).build())
.withProps(properties).build();
return new SparkRDDWriteClient<>(new HoodieSparkEngineContext(jsc), config);
} | 3.68 |
rocketmq-connect_WorkerConnector_awaitShutdown | /**
* Wait for this connector to finish shutting down.
*
* @param timeoutMs time in milliseconds to await shutdown
* @return true if successful, false if the timeout was reached
*/
public boolean awaitShutdown(long timeoutMs) {
try {
return shutdownLatch.await(timeoutMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
return false;
}
} | 3.68 |
framework_FlyweightRow_assertSetup | /**
* Asserts that the flyweight row has properly been set up before trying to
* access any of its data.
*/
private void assertSetup() {
assert element != null && row != BLANK
&& columnWidths != null : "Flyweight row was not "
+ "properly initialized. Make sure the setup-method is "
+ "called before retrieving data. This is either a bug "
+ "in Escalator, or the instance of the flyweight row "
+ "has been stored and accessed.";
} | 3.68 |
hbase_HFilePrettyPrinter_processFile | // HBASE-22561 introduces boolean checkRootDir for WebUI specificly
public int processFile(Path file, boolean checkRootDir) throws IOException {
if (verbose) {
out.println("Scanning -> " + file);
}
if (checkRootDir) {
Path rootPath = CommonFSUtils.getRootDir(getConf());
String rootString = rootPath + Path.SEPARATOR;
if (!file.toString().startsWith(rootString)) {
// First we see if fully-qualified URI matches the root dir. It might
// also be an absolute path in the same filesystem, so we prepend the FS
// of the root dir and see if that fully-qualified URI matches.
FileSystem rootFS = rootPath.getFileSystem(getConf());
String qualifiedFile = rootFS.getUri().toString() + file.toString();
if (!qualifiedFile.startsWith(rootString)) {
err.println(
"ERROR, file (" + file + ") is not in HBase's root directory (" + rootString + ")");
return -2;
}
}
}
FileSystem fs = file.getFileSystem(getConf());
if (!fs.exists(file)) {
err.println("ERROR, file doesnt exist: " + file);
return -2;
}
HFile.Reader reader = HFile.createReader(fs, file, CacheConfig.DISABLED, true, getConf());
Map<byte[], byte[]> fileInfo = reader.getHFileInfo();
KeyValueStatsCollector fileStats = null;
if (verbose || printKey || checkRow || checkFamily || printStats || checkMobIntegrity) {
// scan over file and read key/value's and check if requested
HFileScanner scanner = reader.getScanner(getConf(), false, false, false);
fileStats = new KeyValueStatsCollector();
boolean shouldScanKeysValues;
if (this.isSeekToRow && !Bytes.equals(row, reader.getFirstRowKey().orElse(null))) {
// seek to the first kv on this row
shouldScanKeysValues = (scanner.seekTo(PrivateCellUtil.createFirstOnRow(this.row)) != -1);
} else {
shouldScanKeysValues = scanner.seekTo();
}
if (shouldScanKeysValues) {
scanKeysValues(file, fileStats, scanner, row);
}
}
// print meta data
if (shouldPrintMeta) {
printMeta(reader, fileInfo);
}
if (printBlockIndex) {
out.println("Block Index:");
out.println(reader.getDataBlockIndexReader());
}
if (printBlockHeaders) {
out.println("Block Headers:");
/*
* TODO: this same/similar block iteration logic is used in HFileBlock#blockRange and
* TestLazyDataBlockDecompression. Refactor?
*/
FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, file);
long fileSize = fs.getFileStatus(file).getLen();
FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize);
long offset = trailer.getFirstDataBlockOffset(), max = trailer.getLastDataBlockOffset();
HFileBlock block;
while (offset <= max) {
block = reader.readBlock(offset, -1, /* cacheBlock */ false, /* pread */ false,
/* isCompaction */ false, /* updateCacheMetrics */ false, null, null);
offset += block.getOnDiskSizeWithHeader();
out.println(block);
}
}
if (printStats) {
fileStats.finish(printStatRanges);
out.println("Stats:\n" + fileStats);
}
reader.close();
return 0;
} | 3.68 |
hadoop_GenericRefreshProtocolServerSideTranslatorPB_pack | // Convert a collection of RefreshResponse objects to a
// RefreshResponseCollection proto
private GenericRefreshResponseCollectionProto pack(
Collection<RefreshResponse> responses) {
GenericRefreshResponseCollectionProto.Builder b =
GenericRefreshResponseCollectionProto.newBuilder();
for (RefreshResponse response : responses) {
GenericRefreshResponseProto.Builder respBuilder =
GenericRefreshResponseProto.newBuilder();
respBuilder.setExitStatus(response.getReturnCode());
respBuilder.setUserMessage(response.getMessage());
respBuilder.setSenderName(response.getSenderName());
// Add to collection
b.addResponses(respBuilder);
}
return b.build();
} | 3.68 |
AreaShop_RentRegion_setRentedUntil | /**
* Set the time until the region is rented (milliseconds from 1970, system time).
* @param rentedUntil The time until the region is rented
*/
public void setRentedUntil(Long rentedUntil) {
if(rentedUntil == null) {
setSetting("rent.rentedUntil", null);
} else {
setSetting("rent.rentedUntil", rentedUntil);
}
} | 3.68 |
dubbo_InstanceAddressURL_getServiceMethodParameter | /**
* method parameter only exists in ServiceInfo
*
* @param method
* @param key
* @return
*/
@Override
public String getServiceMethodParameter(String protocolServiceKey, String method, String key) {
if (consumerParamFirst(key)) {
URL consumerUrl = RpcContext.getServiceContext().getConsumerUrl();
if (consumerUrl != null) {
String v = consumerUrl.getServiceMethodParameter(protocolServiceKey, method, key);
if (StringUtils.isNotEmpty(v)) {
return v;
}
}
}
MetadataInfo.ServiceInfo serviceInfo = getServiceInfo(protocolServiceKey);
if (null == serviceInfo) {
return getParameter(key);
}
String value = serviceInfo.getMethodParameter(method, key, null);
if (StringUtils.isNotEmpty(value)) {
return value;
}
return getParameter(key);
} | 3.68 |
morf_SelectStatement_withDialectSpecificHint | /**
* Supplies a specified custom hint to the database for a query.
*
* @param databaseType a database type identifier. Eg: ORACLE, PGSQL, SQL_SERVER
* @param hintContents the hint contents themselves, without the delimiters. Eg: without /*+ and *"/ * for Oracle hints
* @return this, for method chaining.
*/
public SelectStatement withDialectSpecificHint(String databaseType, String hintContents) {
return copyOnWriteOrMutate(
(SelectStatementBuilder b) -> b.withDialectSpecificHint(databaseType, hintContents),
() -> this.hints.add(new DialectSpecificHint(databaseType, hintContents))
);
} | 3.68 |
hudi_HoodieMetaSyncOperations_databaseExists | /**
* Check if a database already exists in the metastore.
*/
default boolean databaseExists(String databaseName) {
return false;
} | 3.68 |
hbase_DynamicMetricsRegistry_newStat | /**
* Create a mutable metric with stats
* @param name of the metric
* @param desc metric description
* @param sampleName of the metric (e.g., "Ops")
* @param valueName of the metric (e.g., "Time" or "Latency")
* @return a new mutable metric object
*/
public MutableStat newStat(String name, String desc, String sampleName, String valueName) {
return newStat(name, desc, sampleName, valueName, false);
} | 3.68 |
framework_BootstrapHandler_getPushMode | /**
* Gets the push mode to use.
*
* @return the desired push mode
*/
public PushMode getPushMode() {
if (pushMode == null) {
UICreateEvent event = new UICreateEvent(getRequest(),
getUIClass());
pushMode = getBootstrapResponse().getUIProvider()
.getPushMode(event);
if (pushMode == null) {
pushMode = getRequest().getService()
.getDeploymentConfiguration().getPushMode();
}
if (pushMode.isEnabled()
&& !getRequest().getService().ensurePushAvailable()) {
/*
* Fall back if not supported (ensurePushAvailable will log
* information to the developer the first time this happens)
*/
pushMode = PushMode.DISABLED;
}
}
return pushMode;
} | 3.68 |
framework_CalendarDateRange_inRange | /**
* Is a date in the date range.
*
* @param date
* The date to check
* @return true if the date range contains a date start and end of range
* inclusive; false otherwise
*/
public boolean inRange(Date date) {
if (date == null) {
return false;
}
return date.compareTo(start) >= 0 && date.compareTo(end) <= 0;
} | 3.68 |
pulsar_ConsumerConfiguration_getSubscriptionType | /**
* @return the configured subscription type
*/
public SubscriptionType getSubscriptionType() {
return conf.getSubscriptionType();
} | 3.68 |
morf_ChangelogBuilder_produceChangelog | /**
* Produces the Changelog based on the given input settings.
*/
public void produceChangelog() {
HumanReadableStatementProducer producer = new HumanReadableStatementProducer(upgradeSteps, includeDataChanges, preferredSQLDialect);
producer.produceFor(new ChangelogStatementConsumer(outputStream));
} | 3.68 |
framework_VAbstractCalendarPanel_onSubmit | /**
* Notifies submit-listeners of a submit event
*/
private void onSubmit() {
if (getSubmitListener() != null) {
getSubmitListener().onSubmit();
}
} | 3.68 |
hbase_ScannerModel_getCacheBlocks | /** Returns true if HFile blocks should be cached on the servers for this scan, false otherwise */
@XmlAttribute
public boolean getCacheBlocks() {
return cacheBlocks;
} | 3.68 |
hadoop_CoderUtil_getEmptyChunk | /**
* Make sure to return an empty chunk buffer for the desired length.
* @param leastLength
* @return empty chunk of zero bytes
*/
static byte[] getEmptyChunk(int leastLength) {
if (emptyChunk.length >= leastLength) {
return emptyChunk; // In most time
}
synchronized (CoderUtil.class) {
emptyChunk = new byte[leastLength];
}
return emptyChunk;
} | 3.68 |
framework_Button_removeClickListener | /**
* Removes the button click listener.
*
* @param listener
* the Listener to be removed.
*
* @deprecated As of 8.0, replaced by {@link Registration#remove()} in the
* registration object returned from
* {@link #addClickListener(ClickListener)}.
*/
@Deprecated
public void removeClickListener(ClickListener listener) {
removeListener(ClickEvent.class, listener,
ClickListener.BUTTON_CLICK_METHOD);
} | 3.68 |
flink_SqlClientParserState_isEndMarkerOfState | /**
* Returns whether at current {@code pos} of {@code input} there is {@code end} marker of the
* state. In case {@code end} marker is null it returns false.
*
* @param input a string to look at
* @param pos a position to check if anything matches to {@code end} starting from this position
* @return whether end marker of the current state is reached of false case of end marker of
* current state is null.
*/
boolean isEndMarkerOfState(String input, int pos) {
if (end == null) {
return false;
}
return end.length() > 0 && input.regionMatches(pos, end, 0, end.length());
} | 3.68 |
framework_SQLContainer_removeReference | /**
* Removes the reference pointing to the given SQLContainer.
*
* @param refdCont
* Target SQLContainer of the reference
* @return true if successful, false if the reference did not exist
*/
public boolean removeReference(SQLContainer refdCont) {
if (refdCont == null) {
throw new IllegalArgumentException(
"Referenced SQLContainer can not be null.");
}
return references.remove(refdCont) == null ? false : true;
} | 3.68 |
hadoop_EntityTableRW_setMetricsTTL | /**
* @param metricsTTL time to live parameter for the metricss in this table.
* @param hbaseConf configururation in which to set the metrics TTL config
* variable.
*/
public void setMetricsTTL(int metricsTTL, Configuration hbaseConf) {
hbaseConf.setInt(METRICS_TTL_CONF_NAME, metricsTTL);
} | 3.68 |
framework_ContainerHierarchicalWrapper_getContainerProperty | /*
* Gets the Property identified by the given itemId and propertyId from the
* Container Don't add a JavaDoc comment here, we use the default
* documentation from implemented interface.
*/
@Override
public Property getContainerProperty(Object itemId, Object propertyId) {
return container.getContainerProperty(itemId, propertyId);
} | 3.68 |
querydsl_MathExpressions_sin | /**
* Create a {@code sin(num)} expression
*
* <p>Returns the sine of an angle of num radians.</p>
*
* @param num numeric expression
* @return sin(num)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Double> sin(Expression<A> num) {
return Expressions.numberOperation(Double.class, Ops.MathOps.SIN, num);
} | 3.68 |
hbase_KeyValue_getFamilyArray | /**
* Returns the backing array of the entire KeyValue (all KeyValue fields are in a single array)
*/
@Override
public byte[] getFamilyArray() {
return bytes;
} | 3.68 |
hbase_BloomFilterFactory_getBloomBlockSize | /** Returns the compound Bloom filter block size from the configuration */
public static int getBloomBlockSize(Configuration conf) {
return conf.getInt(IO_STOREFILE_BLOOM_BLOCK_SIZE, 128 * 1024);
} | 3.68 |
zxing_PDF417_generateBarcodeLogic | /**
* @param msg message to encode
* @param errorCorrectionLevel PDF417 error correction level to use
* @param autoECI automatically insert ECIs if needed
* @throws WriterException if the contents cannot be encoded in this format
*/
public void generateBarcodeLogic(String msg, int errorCorrectionLevel, boolean autoECI) throws WriterException {
//1. step: High-level encoding
int errorCorrectionCodeWords = PDF417ErrorCorrection.getErrorCorrectionCodewordCount(errorCorrectionLevel);
String highLevel = PDF417HighLevelEncoder.encodeHighLevel(msg, compaction, encoding, autoECI);
int sourceCodeWords = highLevel.length();
int[] dimension = determineDimensions(sourceCodeWords, errorCorrectionCodeWords);
int cols = dimension[0];
int rows = dimension[1];
int pad = getNumberOfPadCodewords(sourceCodeWords, errorCorrectionCodeWords, cols, rows);
//2. step: construct data codewords
if (sourceCodeWords + errorCorrectionCodeWords + 1 > 929) { // +1 for symbol length CW
throw new WriterException(
"Encoded message contains too many code words, message too big (" + msg.length() + " bytes)");
}
int n = sourceCodeWords + pad + 1;
StringBuilder sb = new StringBuilder(n);
sb.append((char) n);
sb.append(highLevel);
for (int i = 0; i < pad; i++) {
sb.append((char) 900); //PAD characters
}
String dataCodewords = sb.toString();
//3. step: Error correction
String ec = PDF417ErrorCorrection.generateErrorCorrection(dataCodewords, errorCorrectionLevel);
//4. step: low-level encoding
barcodeMatrix = new BarcodeMatrix(rows, cols);
encodeLowLevel(dataCodewords + ec, cols, rows, errorCorrectionLevel, barcodeMatrix);
} | 3.68 |
framework_AbstractMedia_getPreload | /**
* @return the configured media preload value
* @since 7.7.11
*/
public PreloadMode getPreload() {
return getState(false).preload;
} | 3.68 |
hadoop_AbstractPolicyManager_getAMRMPolicy | /**
* This default implementation validates the
* {@link FederationPolicyInitializationContext},
* then checks whether it needs to reinstantiate the class (null or
* mismatching type), and reinitialize the policy.
*
* @param federationPolicyContext the current context
* @param oldInstance the existing (possibly null) instance.
*
* @return a valid and fully reinitialized {@link FederationAMRMProxyPolicy}
* instance
*
* @throws FederationPolicyInitializationException if the reinitialization is
* not valid, and ensure
* previous state is preserved
*/
public FederationAMRMProxyPolicy getAMRMPolicy(
FederationPolicyInitializationContext federationPolicyContext,
FederationAMRMProxyPolicy oldInstance)
throws FederationPolicyInitializationException {
if (amrmProxyFederationPolicy == null) {
throw new FederationPolicyInitializationException("The parameter "
+ "amrmProxyFederationPolicy should be initialized in "
+ this.getClass().getSimpleName() + " constructor.");
}
try {
return (FederationAMRMProxyPolicy) internalPolicyGetter(
federationPolicyContext, oldInstance, amrmProxyFederationPolicy);
} catch (ClassCastException e) {
throw new FederationPolicyInitializationException(e);
}
}
/**
* This default implementation validates the
* {@link FederationPolicyInitializationContext},
* then checks whether it needs to reinstantiate the class (null or
* mismatching type), and reinitialize the policy.
*
* @param federationPolicyContext the current context
* @param oldInstance the existing (possibly null) instance.
*
* @return a valid and fully reinitialized {@link FederationRouterPolicy} | 3.68 |
druid_FileNodeListener_init | /**
* Start a Scheduler to check the specified file.
*
* @see #setIntervalSeconds(int)
* @see #update()
*/
@Override
public void init() {
super.init();
if (intervalSeconds <= 0) {
intervalSeconds = 60;
}
executor = Executors.newScheduledThreadPool(1);
executor.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
LOG.debug("Checking file " + file + " every " + intervalSeconds + "s.");
if (!lock.tryLock()) {
LOG.info("Can not acquire the lock, skip this time.");
return;
}
try {
update();
} catch (Exception e) {
LOG.error("Can NOT update the node list.", e);
} finally {
lock.unlock();
}
}
}, intervalSeconds, intervalSeconds, TimeUnit.SECONDS);
} | 3.68 |
hbase_ZNodePaths_isClientReadable | /**
* Returns whether the path is supposed to be readable by the client and DOES NOT contain
* sensitive information (world readable).
*/
public boolean isClientReadable(String path) {
// Developer notice: These znodes are world readable. DO NOT add more znodes here UNLESS
// all clients need to access this data to work. Using zk for sharing data to clients (other
// than service lookup case is not a recommended design pattern.
return path.equals(baseZNode) || isMetaZNodePath(path) || path.equals(masterAddressZNode)
|| path.equals(clusterIdZNode) || path.equals(rsZNode) ||
// /hbase/table and /hbase/table/foo is allowed, /hbase/table-lock is not
path.equals(tableZNode) || path.startsWith(tableZNode + "/");
} | 3.68 |
flink_CsvOutputFormat_setAllowNullValues | /**
* Configures the format to either allow null values (writing an empty field), or to throw an
* exception when encountering a null field.
*
* <p>by default, null values are disallowed.
*
* @param allowNulls Flag to indicate whether the output format should accept null values.
*/
public void setAllowNullValues(boolean allowNulls) {
this.allowNullValues = allowNulls;
} | 3.68 |
rocketmq-connect_RocketMqAdminUtil_topicExist | /**
* check topic exist
*
* @param config
* @param topic
* @return
*/
public static boolean topicExist(RocketMqConfig config, String topic) {
DefaultMQAdminExt defaultMQAdminExt = null;
boolean foundTopicRouteInfo = false;
try {
defaultMQAdminExt = startMQAdminTool(config);
TopicRouteData topicRouteData = defaultMQAdminExt.examineTopicRouteInfo(topic);
if (topicRouteData != null) {
foundTopicRouteInfo = true;
}
} catch (Exception e) {
foundTopicRouteInfo = false;
} finally {
if (defaultMQAdminExt != null) {
defaultMQAdminExt.shutdown();
}
}
return foundTopicRouteInfo;
} | 3.68 |
flink_HiveParserJoinCondTypeCheckProcFactory_getDefaultExprProcessor | /** Factory method to get DefaultExprProcessor. */
@Override
public HiveParserTypeCheckProcFactory.DefaultExprProcessor getDefaultExprProcessor() {
return new HiveParserJoinCondTypeCheckProcFactory.JoinCondDefaultExprProcessor();
} | 3.68 |
framework_AbstractInMemoryContainer_internalRemoveAllItems | /**
* Removes all items from the internal data structures of this class. This
* can be used to implement {@link #removeAllItems()} in subclasses.
*
* No notification is sent, the caller has to fire a suitable item set
* change notification.
*/
protected void internalRemoveAllItems() {
// Removes all Items
getAllItemIds().clear();
if (isFiltered()) {
getFilteredItemIds().clear();
}
} | 3.68 |
pulsar_LoadSimulationController_changeIfExists | // Find a topic and change it if it exists.
private int changeIfExists(final ShellArguments arguments, final String topic) throws Exception {
final int client = find(topic);
if (client != -1) {
change(arguments, topic, client);
}
return client;
} | 3.68 |
hbase_AbstractFSWALProvider_getWALFiles | /**
* List all the wal files for a logPrefix.
*/
public static List<Path> getWALFiles(Configuration c, ServerName serverName) throws IOException {
Path walRoot = new Path(CommonFSUtils.getWALRootDir(c), HConstants.HREGION_LOGDIR_NAME);
FileSystem fs = walRoot.getFileSystem(c);
List<Path> walFiles = new ArrayList<>();
Path walDir = new Path(walRoot, serverName.toString());
try {
for (FileStatus status : fs.listStatus(walDir)) {
if (status.isFile()) {
walFiles.add(status.getPath());
}
}
} catch (FileNotFoundException e) {
LOG.info("WAL dir {} not exists", walDir);
}
return walFiles;
} | 3.68 |
hbase_TableBackupClient_deleteSnapshots | /**
* Delete HBase snapshot for backup.
* @param backupInfo backup info
* @throws IOException exception
*/
protected static void deleteSnapshots(final Connection conn, BackupInfo backupInfo,
Configuration conf) throws IOException {
LOG.debug("Trying to delete snapshot for full backup.");
for (String snapshotName : backupInfo.getSnapshotNames()) {
if (snapshotName == null) {
continue;
}
LOG.debug("Trying to delete snapshot: " + snapshotName);
try (Admin admin = conn.getAdmin()) {
admin.deleteSnapshot(snapshotName);
}
LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + backupInfo.getBackupId()
+ " succeeded.");
}
} | 3.68 |
framework_HierarchicalContainer_rootItemIds | /*
* Gets the IDs of the root elements in the container. Don't add a JavaDoc
* comment here, we use the default documentation from implemented
* interface.
*/
@Override
public Collection<?> rootItemIds() {
if (filteredRoots != null) {
return Collections.unmodifiableCollection(filteredRoots);
} else {
return Collections.unmodifiableCollection(roots);
}
} | 3.68 |
morf_XmlDataSetConsumer_table | /**
* @see org.alfasoftware.morf.dataset.DataSetConsumer#table(org.alfasoftware.morf.metadata.Table, java.lang.Iterable)
*/
@Override
public void table(final Table table, final Iterable<Record> records) {
if (table == null) {
throw new IllegalArgumentException("table is null");
}
// Get a content handler for this table
try {
OutputStream outputStream = xmlStreamProvider.openOutputStreamForTable(table.getName());
try {
ContentHandler contentHandler = createContentHandler(outputStream);
contentHandler.startDocument();
AttributesImpl tableAttributes = new AttributesImpl();
tableAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.VERSION_ATTRIBUTE, XmlDataSetNode.VERSION_ATTRIBUTE, XmlDataSetNode.STRING_TYPE, FORMAT_VERSION);
contentHandler.startElement(XmlDataSetNode.URI, XmlDataSetNode.TABLE_NODE, XmlDataSetNode.TABLE_NODE, tableAttributes);
outputTableMetaData(table, contentHandler);
contentHandler.startElement(XmlDataSetNode.URI, XmlDataSetNode.DATA_NODE, XmlDataSetNode.DATA_NODE, EMPTY_ATTRIBUTES);
for (Record record : records) {
AttributesImpl rowValueAttributes = new AttributesImpl();
for (Column column : table.columns()) {
String value = getValue(record, column, table.getName());
if (value != null) {
rowValueAttributes.addAttribute(
XmlDataSetNode.URI,
column.getName(),
column.getName(),
XmlDataSetNode.STRING_TYPE,
Escaping.escapeCharacters(value));
}
}
emptyElement(contentHandler, XmlDataSetNode.RECORD_NODE, rowValueAttributes);
}
contentHandler.endElement(XmlDataSetNode.URI, XmlDataSetNode.DATA_NODE, XmlDataSetNode.DATA_NODE);
contentHandler.endElement(XmlDataSetNode.URI, XmlDataSetNode.TABLE_NODE, XmlDataSetNode.TABLE_NODE);
contentHandler.endDocument();
} finally {
outputStream.close();
}
} catch (RuntimeException|SAXException|IOException e) {
throw new RuntimeException("Error consuming table [" + table.getName() + "]", e);
}
} | 3.68 |
morf_BaseDataSetReader_tableExists | /**
* @see org.alfasoftware.morf.xml.XmlStreamProvider.XmlInputStreamProvider#tableExists(java.lang.String)
*/
@Override
public final boolean tableExists(String name) {
return tableNameToFileNameMap.containsKey(name.toUpperCase());
} | 3.68 |
hadoop_DirectoryPolicy_getOptionName | /**
* Get the option name.
* @return name of the option
*/
public String getOptionName() {
return optionName;
} | 3.68 |
hbase_MasterObserver_postMoveTables | /**
* Called after servers are moved to target region server group
* @param ctx the environment to interact with the framework and master
* @param tables set of tables to move
* @param targetGroup name of group
*/
default void postMoveTables(final ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<TableName> tables, String targetGroup) throws IOException {
} | 3.68 |
morf_AbstractSqlDialectTest_testNow | /**
* Test that now functionality behaves as expected.
*/
@Test
public void testNow() {
String result = testDialect.getSqlFrom(now());
assertEquals(expectedNow(), result);
} | 3.68 |
graphhopper_RoadDensityCalculator_calcRoadDensities | /**
* Loops over all edges of the graph and calls the given edgeHandler for each edge. This is done in parallel using
* the given number of threads. For every call we can calculate the road density using the provided thread local
* road density calculator.
*/
public static void calcRoadDensities(Graph graph, BiConsumer<RoadDensityCalculator, EdgeIteratorState> edgeHandler, int threads) {
ThreadLocal<RoadDensityCalculator> calculator = ThreadLocal.withInitial(() -> new RoadDensityCalculator(graph));
Stream<Runnable> roadDensityWorkers = IntStream.range(0, graph.getEdges())
.mapToObj(i -> () -> {
EdgeIteratorState edge = graph.getEdgeIteratorState(i, Integer.MIN_VALUE);
edgeHandler.accept(calculator.get(), edge);
});
GHUtility.runConcurrently(roadDensityWorkers, threads);
} | 3.68 |
hadoop_FlowRunColumnPrefix_getColumnPrefix | /**
* @return the column name value
*/
public String getColumnPrefix() {
return columnPrefix;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectAnd | /**
* Tests a select with a nested "and where" clause.
*/
@Test
public void testSelectAnd() {
SelectStatement stmt = new SelectStatement().from(new TableReference(TEST_TABLE))
.where(and(
eq(new FieldReference(STRING_FIELD), "A0001"),
greaterThan(new FieldReference(INT_FIELD), 20080101)
));
String value = varCharCast("'A0001'");
String expectedSql = "SELECT * FROM " + tableName(TEST_TABLE) + " WHERE ((stringField = " + stringLiteralPrefix() +value+") AND (intField > 20080101))";
assertEquals("Select with multiple where clauses", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
shardingsphere-elasticjob_JobConfiguration_failover | /**
* Set enable failover.
*
* <p>
* Only for `monitorExecution` enabled.
* </p>
*
* @param failover enable or disable failover
* @return job configuration builder
*/
public Builder failover(final boolean failover) {
this.failover = failover;
return this;
} | 3.68 |
framework_ApplicationConnection_getVersionInfo | /**
* Helper for tt initialization
*/
private JavaScriptObject getVersionInfo() {
return configuration.getVersionInfoJSObject();
} | 3.68 |
flink_RpcServiceUtils_createWildcardName | /**
* Creates a wildcard name symmetric to {@link #createRandomName(String)}.
*
* @param prefix prefix of the wildcard name
* @return wildcard name starting with the prefix
*/
public static String createWildcardName(String prefix) {
return prefix + "_*";
} | 3.68 |
flink_HiveParserTypeCheckProcFactory_getColumnExprProcessor | /** Factory method to get ColumnExprProcessor. */
public HiveParserTypeCheckProcFactory.ColumnExprProcessor getColumnExprProcessor() {
return new HiveParserTypeCheckProcFactory.ColumnExprProcessor();
} | 3.68 |
pulsar_ManagedLedgerImpl_checkFenced | /**
* Throws an exception if the managed ledger has been previously fenced.
*
* @throws ManagedLedgerException
*/
private void checkFenced() throws ManagedLedgerException {
if (STATE_UPDATER.get(this).isFenced()) {
log.error("[{}] Attempted to use a fenced managed ledger", name);
throw new ManagedLedgerFencedException();
}
} | 3.68 |
flink_NetworkBufferPool_requestUnpooledMemorySegments | /**
* Unpooled memory segments are requested directly from {@link NetworkBufferPool}, as opposed to
* pooled segments, that are requested through {@link BufferPool} that was created from this
* {@link NetworkBufferPool} (see {@link #createBufferPool}). They are used for example for
* exclusive {@link RemoteInputChannel} credits, that are permanently assigned to that channel,
* and never returned to any {@link BufferPool}. As opposed to pooled segments, when requested,
* unpooled segments needs to be accounted against {@link #numTotalRequiredBuffers}, which might
* require redistribution of the segments.
*/
@Override
public List<MemorySegment> requestUnpooledMemorySegments(int numberOfSegmentsToRequest)
throws IOException {
checkArgument(
numberOfSegmentsToRequest >= 0,
"Number of buffers to request must be non-negative.");
synchronized (factoryLock) {
if (isDestroyed) {
throw new IllegalStateException("Network buffer pool has already been destroyed.");
}
if (numberOfSegmentsToRequest == 0) {
return Collections.emptyList();
}
tryRedistributeBuffers(numberOfSegmentsToRequest);
}
try {
return internalRequestMemorySegments(numberOfSegmentsToRequest);
} catch (IOException exception) {
revertRequiredBuffers(numberOfSegmentsToRequest);
ExceptionUtils.rethrowIOException(exception);
return null;
}
} | 3.68 |
morf_FieldReference_setDirection | /**
* Sets the direction to sort the field on.
*
* @param direction the direction to set
* @deprecated Use {@link #direction(Direction)}
*/
@Deprecated
public void setDirection(Direction direction) {
AliasedField.assetImmutableDslDisabled();
this.direction = direction;
} | 3.68 |
flink_DataSet_aggregate | /**
* Applies an Aggregate transformation on a non-grouped {@link Tuple} {@link DataSet}.
*
* <p><b>Note: Only Tuple DataSets can be aggregated.</b> The transformation applies a built-in
* {@link Aggregations Aggregation} on a specified field of a Tuple DataSet. Additional
* aggregation functions can be added to the resulting {@link AggregateOperator} by calling
* {@link AggregateOperator#and(Aggregations, int)}.
*
* @param agg The built-in aggregation function that is computed.
* @param field The index of the Tuple field on which the aggregation function is applied.
* @return An AggregateOperator that represents the aggregated DataSet.
* @see Tuple
* @see Aggregations
* @see AggregateOperator
* @see DataSet
*/
public AggregateOperator<T> aggregate(Aggregations agg, int field) {
return new AggregateOperator<>(this, agg, field, Utils.getCallLocationName());
} | 3.68 |
pulsar_ConnectionPool_getConnection | /**
* Get a connection from the pool.
* <p>
* The connection can either be created or be coming from the pool itself.
* <p>
* When specifying multiple addresses, the logicalAddress is used as a tag for the broker, while the physicalAddress
* is where the connection is actually happening.
* <p>
* These two addresses can be different when the client is forced to connect through a proxy layer. Essentially, the
* pool is using the logical address as a way to decide whether to reuse a particular connection.
*
* @param logicalAddress
* the address to use as the broker tag
* @param physicalAddress
* the real address where the TCP connection should be made
* @return a future that will produce the ClientCnx object
*/
public CompletableFuture<ClientCnx> getConnection(InetSocketAddress logicalAddress,
InetSocketAddress physicalAddress, final int randomKey) {
if (maxConnectionsPerHosts == 0) {
// Disable pooling
return createConnection(logicalAddress, physicalAddress, -1);
}
final ConcurrentMap<Integer, CompletableFuture<ClientCnx>> innerPool =
pool.computeIfAbsent(logicalAddress, a -> new ConcurrentHashMap<>());
CompletableFuture<ClientCnx> completableFuture = innerPool
.computeIfAbsent(randomKey, k -> createConnection(logicalAddress, physicalAddress, randomKey));
if (completableFuture.isCompletedExceptionally()) {
// we cannot cache a failed connection, so we remove it from the pool
// there is a race condition in which
// cleanupConnection is called before caching this result
// and so the clean up fails
cleanupConnection(logicalAddress, randomKey, completableFuture);
return completableFuture;
}
return completableFuture.thenCompose(clientCnx -> {
// If connection already release, create a new one.
if (clientCnx.getIdleState().isReleased()) {
cleanupConnection(logicalAddress, randomKey, completableFuture);
return innerPool
.computeIfAbsent(randomKey, k -> createConnection(logicalAddress, physicalAddress, randomKey));
}
// Try use exists connection.
if (clientCnx.getIdleState().tryMarkUsingAndClearIdleTime()) {
return CompletableFuture.completedFuture(clientCnx);
} else {
// If connection already release, create a new one.
cleanupConnection(logicalAddress, randomKey, completableFuture);
return innerPool
.computeIfAbsent(randomKey, k -> createConnection(logicalAddress, physicalAddress, randomKey));
}
});
} | 3.68 |
hbase_HelloHBase_deleteNamespaceAndTable | /**
* Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete
* Table and delete Namespace.
* @param admin Standard Admin object
* @throws IOException If IO problem is encountered
*/
static void deleteNamespaceAndTable(final Admin admin) throws IOException {
if (admin.tableExists(MY_TABLE_NAME)) {
System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "].");
admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it.
admin.deleteTable(MY_TABLE_NAME);
}
if (namespaceExists(admin, MY_NAMESPACE_NAME)) {
System.out.println("Deleting Namespace [" + MY_NAMESPACE_NAME + "].");
admin.deleteNamespace(MY_NAMESPACE_NAME);
}
} | 3.68 |
hadoop_ManifestCommitter_getTaskAttemptPath | /**
* Compute the path where the output of a task attempt is stored until
* that task is committed.
* @param context the context of the task attempt.
* @return the path where a task attempt should be stored.
*/
@VisibleForTesting
public Path getTaskAttemptPath(TaskAttemptContext context) {
return enterCommitter(false, context).getTaskAttemptDir();
} | 3.68 |
hbase_MetricRegistryInfo_getMetricsDescription | /**
* Get the description of what this source exposes.
*/
public String getMetricsDescription() {
return metricsDescription;
} | 3.68 |
flink_FlinkBushyJoinReorderRule_createTopProject | /**
* Creates the topmost projection that will sit on top of the selected join ordering. The
* projection needs to match the original join ordering. Also, places any post-join filters on
* top of the project.
*/
private static RelNode createTopProject(
RelBuilder relBuilder,
LoptMultiJoin multiJoin,
JoinPlan finalPlan,
List<String> fieldNames) {
List<RexNode> newProjExprs = new ArrayList<>();
RexBuilder rexBuilder = multiJoin.getMultiJoinRel().getCluster().getRexBuilder();
List<Integer> newJoinOrder = new ArrayList<>(finalPlan.factorIds);
int nJoinFactors = multiJoin.getNumJoinFactors();
List<RelDataTypeField> fields = multiJoin.getMultiJoinFields();
// create a mapping from each factor to its field offset in the join
// ordering
final Map<Integer, Integer> factorToOffsetMap = new HashMap<>();
for (int pos = 0, fieldStart = 0; pos < nJoinFactors; pos++) {
factorToOffsetMap.put(newJoinOrder.get(pos), fieldStart);
fieldStart += multiJoin.getNumFieldsInJoinFactor(newJoinOrder.get(pos));
}
for (int currFactor = 0; currFactor < nJoinFactors; currFactor++) {
// if the factor is the right factor in a removable self-join,
// then where possible, remap references to the right factor to
// the corresponding reference in the left factor
Integer leftFactor = null;
if (multiJoin.isRightFactorInRemovableSelfJoin(currFactor)) {
leftFactor = multiJoin.getOtherSelfJoinFactor(currFactor);
}
for (int fieldPos = 0;
fieldPos < multiJoin.getNumFieldsInJoinFactor(currFactor);
fieldPos++) {
int newOffset =
requireNonNull(
factorToOffsetMap.get(currFactor),
() -> "factorToOffsetMap.get(currFactor)")
+ fieldPos;
if (leftFactor != null) {
Integer leftOffset = multiJoin.getRightColumnMapping(currFactor, fieldPos);
if (leftOffset != null) {
newOffset =
requireNonNull(
factorToOffsetMap.get(leftFactor),
"factorToOffsetMap.get(leftFactor)")
+ leftOffset;
}
}
newProjExprs.add(
rexBuilder.makeInputRef(
fields.get(newProjExprs.size()).getType(), newOffset));
}
}
relBuilder.clear();
relBuilder.push(finalPlan.relNode);
relBuilder.project(newProjExprs, fieldNames);
// Place the post-join filter (if it exists) on top of the final projection.
RexNode postJoinFilter = multiJoin.getMultiJoinRel().getPostJoinFilter();
if (postJoinFilter != null) {
relBuilder.filter(postJoinFilter);
}
return relBuilder.build();
} | 3.68 |
hudi_ArchivalUtils_getMinAndMaxInstantsToKeep | /**
* getMinAndMaxInstantsToKeep is used by archival service to find the
* min instants and max instants to keep in the active timeline
* @param table table implementation extending org.apache.hudi.table.HoodieTable
* @param metaClient meta client
* @return Pair containing min instants and max instants to keep.
*/
public static Pair<Integer,Integer> getMinAndMaxInstantsToKeep(HoodieTable<?, ?, ?, ?> table, HoodieTableMetaClient metaClient) {
HoodieWriteConfig config = table.getConfig();
HoodieTimeline completedCommitsTimeline = table.getCompletedCommitsTimeline();
Option<HoodieInstant> latestCommit = completedCommitsTimeline.lastInstant();
HoodieCleaningPolicy cleanerPolicy = config.getCleanerPolicy();
int cleanerCommitsRetained = config.getCleanerCommitsRetained();
int cleanerHoursRetained = config.getCleanerHoursRetained();
int maxInstantsToKeep;
int minInstantsToKeep;
Option<HoodieInstant> earliestCommitToRetain = getEarliestCommitToRetain(metaClient, latestCommit, cleanerPolicy, cleanerCommitsRetained, cleanerHoursRetained);
int configuredMinInstantsToKeep = config.getMinCommitsToKeep();
int configuredMaxInstantsToKeep = config.getMaxCommitsToKeep();
if (earliestCommitToRetain.isPresent()) {
int minInstantsToKeepBasedOnCleaning =
completedCommitsTimeline.findInstantsAfter(earliestCommitToRetain.get().getTimestamp())
.countInstants() + 2;
if (configuredMinInstantsToKeep < minInstantsToKeepBasedOnCleaning) {
maxInstantsToKeep = minInstantsToKeepBasedOnCleaning
+ configuredMaxInstantsToKeep - configuredMinInstantsToKeep;
minInstantsToKeep = minInstantsToKeepBasedOnCleaning;
LOG.warn("The configured archival configs {}={} is more aggressive than the cleaning "
+ "configs as the earliest commit to retain is {}. Adjusted the archival configs "
+ "to be {}={} and {}={}",
MIN_COMMITS_TO_KEEP.key(), configuredMinInstantsToKeep, earliestCommitToRetain.get(),
MIN_COMMITS_TO_KEEP.key(), minInstantsToKeep,
MAX_COMMITS_TO_KEEP.key(), maxInstantsToKeep);
switch (cleanerPolicy) {
case KEEP_LATEST_COMMITS:
LOG.warn("Cleaning configs: {}=KEEP_LATEST_COMMITS {}={}", CLEANER_POLICY.key(),
CLEANER_COMMITS_RETAINED.key(), cleanerCommitsRetained);
break;
case KEEP_LATEST_BY_HOURS:
LOG.warn("Cleaning configs: {}=KEEP_LATEST_BY_HOURS {}={}", CLEANER_POLICY.key(),
CLEANER_HOURS_RETAINED.key(), cleanerHoursRetained);
break;
case KEEP_LATEST_FILE_VERSIONS:
LOG.warn("Cleaning configs: {}=CLEANER_FILE_VERSIONS_RETAINED {}={}", CLEANER_POLICY.key(),
CLEANER_FILE_VERSIONS_RETAINED.key(), config.getCleanerFileVersionsRetained());
break;
default:
break;
}
} else {
maxInstantsToKeep = configuredMaxInstantsToKeep;
minInstantsToKeep = configuredMinInstantsToKeep;
}
} else {
maxInstantsToKeep = configuredMaxInstantsToKeep;
minInstantsToKeep = configuredMinInstantsToKeep;
}
return Pair.of(minInstantsToKeep, maxInstantsToKeep);
} | 3.68 |
flink_BinaryRowData_isInFixedLengthPart | /**
* If it is a fixed-length field, we can call this BinaryRowData's setXX method for in-place
* updates. If it is variable-length field, can't use this method, because the underlying data
* is stored continuously.
*/
public static boolean isInFixedLengthPart(LogicalType type) {
switch (type.getTypeRoot()) {
case BOOLEAN:
case TINYINT:
case SMALLINT:
case INTEGER:
case DATE:
case TIME_WITHOUT_TIME_ZONE:
case INTERVAL_YEAR_MONTH:
case BIGINT:
case INTERVAL_DAY_TIME:
case FLOAT:
case DOUBLE:
return true;
case DECIMAL:
return DecimalData.isCompact(((DecimalType) type).getPrecision());
case TIMESTAMP_WITHOUT_TIME_ZONE:
return TimestampData.isCompact(((TimestampType) type).getPrecision());
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
return TimestampData.isCompact(((LocalZonedTimestampType) type).getPrecision());
default:
return false;
}
} | 3.68 |
morf_ConnectionResources_getFetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming | /**
*
* The JDBC Fetch Size to use when performing bulk select operations while allowing connection use, intended to replace the default in {@link SqlDialect#fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming()}.
* The default behaviour for this method is interpreted as "not set" rather than 0.
* @return The number of rows to try and fetch at a time (default) when
* performing bulk select operations and needing to use the connection while
* the {@link ResultSet} is open.
*/
public default Integer getFetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming(){
return null;
} | 3.68 |
hadoop_MountTableStoreImpl_checkMountTableEntryPermission | /**
* Whether a mount table entry can be accessed by the current context.
*
* @param src mount entry being accessed
* @param action type of action being performed on the mount entry
* @throws AccessControlException if mount table cannot be accessed
*/
private void checkMountTableEntryPermission(String src, FsAction action)
throws IOException {
final MountTable partial = MountTable.newInstance();
partial.setSourcePath(src);
final Query<MountTable> query = new Query<>(partial);
final MountTable entry = getDriver().get(getRecordClass(), query);
if (entry != null) {
RouterPermissionChecker pc = RouterAdminServer.getPermissionChecker();
if (pc != null) {
pc.checkPermission(entry, action);
}
}
} | 3.68 |
hadoop_FilterFileSystem_rename | /**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
*/
@Override
public boolean rename(Path src, Path dst) throws IOException {
return fs.rename(src, dst);
} | 3.68 |
rocketmq-connect_MemoryClusterManagementServiceImpl_hasClusterStoreTopic | /**
* Check if Cluster Store Topic exists.
*
* @return true if Cluster Store Topic exists, otherwise return false.
*/
@Override
public boolean hasClusterStoreTopic() {
return false;
} | 3.68 |
flink_TypeStrategies_explicit | /** Type strategy that returns a fixed {@link DataType}. */
public static TypeStrategy explicit(DataType dataType) {
return new ExplicitTypeStrategy(dataType);
} | 3.68 |
hudi_HFileBootstrapIndex_writeNextPartition | /**
* Append bootstrap index entries for next partitions in sorted order.
* @param partitionPath Hudi Partition Path
* @param bootstrapPartitionPath Source Partition Path
* @param bootstrapFileMappings Bootstrap Source File to Hudi File Id mapping
*/
private void writeNextPartition(String partitionPath, String bootstrapPartitionPath,
List<BootstrapFileMapping> bootstrapFileMappings) {
try {
LOG.info("Adding bootstrap partition Index entry for partition :" + partitionPath
+ ", bootstrap Partition :" + bootstrapPartitionPath + ", Num Entries :" + bootstrapFileMappings.size());
LOG.info("ADDING entries :" + bootstrapFileMappings);
HoodieBootstrapPartitionMetadata bootstrapPartitionMetadata = new HoodieBootstrapPartitionMetadata();
bootstrapPartitionMetadata.setBootstrapPartitionPath(bootstrapPartitionPath);
bootstrapPartitionMetadata.setPartitionPath(partitionPath);
bootstrapPartitionMetadata.setFileIdToBootstrapFile(
bootstrapFileMappings.stream().map(m -> Pair.of(m.getFileId(),
m.getBootstrapFileStatus())).collect(Collectors.toMap(Pair::getKey, Pair::getValue)));
Option<byte[]> bytes = TimelineMetadataUtils.serializeAvroMetadata(bootstrapPartitionMetadata, HoodieBootstrapPartitionMetadata.class);
if (bytes.isPresent()) {
indexByPartitionWriter
.append(new KeyValue(Bytes.toBytes(getPartitionKey(partitionPath)), new byte[0], new byte[0],
HConstants.LATEST_TIMESTAMP, KeyValue.Type.Put, bytes.get()));
numPartitionKeysAdded++;
}
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
} | 3.68 |
flink_Optimizer_createPreOptimizedPlan | /**
* This function performs only the first step to the compilation process - the creation of the
* optimizer representation of the plan. No estimations or enumerations of alternatives are done
* here.
*
* @param program The plan to generate the optimizer representation for.
* @return The optimizer representation of the plan, as a collection of all data sinks from the
* plan can be traversed.
*/
public static List<DataSinkNode> createPreOptimizedPlan(Plan program) {
GraphCreatingVisitor graphCreator = new GraphCreatingVisitor(1, null);
program.accept(graphCreator);
return graphCreator.getSinks();
} | 3.68 |
flink_SegmentsUtil_setBoolean | /**
* set boolean from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setBoolean(MemorySegment[] segments, int offset, boolean value) {
if (inFirstSegment(segments, offset, 1)) {
segments[0].putBoolean(offset, value);
} else {
setBooleanMultiSegments(segments, offset, value);
}
} | 3.68 |
flink_EmbeddedRocksDBStateBackend_getPredefinedOptions | /**
* Gets the currently set predefined options for RocksDB. The default options (if nothing was
* set via {@link #setPredefinedOptions(PredefinedOptions)}) are {@link
* PredefinedOptions#DEFAULT}.
*
* <p>If user-configured options within {@link RocksDBConfigurableOptions} is set (through
* flink-conf.yaml) of a user-defined options factory is set (via {@link
* #setRocksDBOptions(RocksDBOptionsFactory)}), then the options from the factory are applied on
* top of the predefined and customized options.
*
* @return The currently set predefined options for RocksDB.
*/
@VisibleForTesting
public PredefinedOptions getPredefinedOptions() {
if (predefinedOptions == null) {
predefinedOptions = PredefinedOptions.DEFAULT;
}
return predefinedOptions;
} | 3.68 |
open-banking-gateway_FintechSecureStorage_registerFintech | /**
* Registers FinTech in Datasafe and DB.
* @param fintech new FinTech to register
* @param password FinTechs' KeyStore password.
*/
public void registerFintech(Fintech fintech, Supplier<char[]> password) {
this.userProfile()
.createDocumentKeystore(
fintech.getUserIdAuth(password),
config.defaultPrivateTemplate(fintech.getUserIdAuth(password)).buildPrivateProfile()
);
} | 3.68 |
hmily_CommonAssembler_assembleHmilyExpressionSegment | /**
* Assemble hmily expression segment.
*
* @param expression expression
* @return hmily expression segment
*/
public static HmilyExpressionSegment assembleHmilyExpressionSegment(final ExpressionSegment expression) {
HmilyExpressionSegment result = null;
if (expression instanceof BinaryOperationExpression) {
HmilyExpressionSegment hmilyLeft = assembleHmilyExpressionSegment(((BinaryOperationExpression) expression).getLeft());
HmilyExpressionSegment hmilyRight = assembleHmilyExpressionSegment(((BinaryOperationExpression) expression).getRight());
result = new HmilyBinaryOperationExpression(expression.getStartIndex(), expression.getStopIndex(), hmilyLeft, hmilyRight,
((BinaryOperationExpression) expression).getOperator(), ((BinaryOperationExpression) expression).getText());
} else if (expression instanceof ColumnSegment) {
result = CommonAssembler.assembleHmilyColumnSegment((ColumnSegment) expression);
} else if (expression instanceof CommonExpressionSegment) {
result = new HmilyCommonExpressionSegment(expression.getStartIndex(),
expression.getStopIndex(), ((CommonExpressionSegment) expression).getText());
} else if (expression instanceof ExpressionProjectionSegment) {
result = new HmilyExpressionProjectionSegment(expression.getStartIndex(),
expression.getStopIndex(), ((ExpressionProjectionSegment) expression).getText());
} else if (expression instanceof LiteralExpressionSegment) {
result = new HmilyLiteralExpressionSegment(expression.getStartIndex(),
expression.getStopIndex(), ((LiteralExpressionSegment) expression).getLiterals());
} else if (expression instanceof ParameterMarkerExpressionSegment) {
result = new HmilyParameterMarkerExpressionSegment(expression.getStartIndex(),
expression.getStopIndex(), ((ParameterMarkerExpressionSegment) expression).getParameterMarkerIndex());
} else if (expression instanceof InExpression && ((InExpression) expression).getLeft() instanceof ColumnSegment) {
// TODO
ColumnSegment column = (ColumnSegment) ((InExpression) expression).getLeft();
} else if (expression instanceof BetweenExpression && ((BetweenExpression) expression).getLeft() instanceof ColumnSegment) {
// TODO
ColumnSegment column = (ColumnSegment) ((BetweenExpression) expression).getLeft();
}
return result;
} | 3.68 |
flink_ProjectOperator_projectTuple5 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4> ProjectOperator<T, Tuple5<T0, T1, T2, T3, T4>> projectTuple5() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>> tType =
new TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>>(fTypes);
return new ProjectOperator<T, Tuple5<T0, T1, T2, T3, T4>>(
this.ds, this.fieldIndexes, tType);
} | 3.68 |
framework_VAbstractCalendarPanel_getBackwardKey | /**
* The key that selects the previous day in the calendar. By default this is
* the left arrow key but by overriding this method it can be changed to
* whatever you like.
*
* @return the backward key
*/
protected int getBackwardKey() {
return KeyCodes.KEY_LEFT;
} | 3.68 |
flink_ProducerMergedPartitionFileWriter_flushBuffers | /** Write all buffers to the disk. */
private void flushBuffers(List<Tuple2<Buffer, Integer>> bufferAndIndexes, long expectedBytes)
throws IOException {
if (bufferAndIndexes.isEmpty()) {
return;
}
ByteBuffer[] bufferWithHeaders = generateBufferWithHeaders(bufferAndIndexes);
BufferReaderWriterUtil.writeBuffers(dataFileChannel, expectedBytes, bufferWithHeaders);
totalBytesWritten += expectedBytes;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.