name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_DataNodeVolumeMetrics_getDataFileIoSampleCount | // Based on dataFileIoRate
public long getDataFileIoSampleCount() {
return dataFileIoRate.lastStat().numSamples();
} | 3.68 |
framework_CssLayoutConnector_getWidget | /*
* (non-Javadoc)
*
* @see com.vaadin.client.ui.AbstractComponentConnector#getWidget()
*/
@Override
public VCssLayout getWidget() {
return (VCssLayout) super.getWidget();
} | 3.68 |
framework_VCheckBoxGroup_setOptionEnabled | /**
* Updates the checkbox's enabled state according to the widget's enabled,
* read only and the item's enabled.
*
* @param checkBox
* the checkbox to update
* @param item
* the item for the checkbox
*/
protected void setOptionEnabled(VCheckBox checkBox, JsonObject item) {
boolean optionEnabled = !item
.getBoolean(ListingJsonConstants.JSONKEY_ITEM_DISABLED);
boolean enabled = optionEnabled && !isReadonly() && isEnabled();
checkBox.setEnabled(enabled);
// #9258 apply the v-disabled class when disabled for UX
checkBox.setStyleName(StyleConstants.DISABLED,
!isEnabled() || !optionEnabled);
} | 3.68 |
hbase_HMaster_isBalancerOn | /**
* Queries the state of the {@link LoadBalancerStateStore}. If the balancer is not initialized,
* false is returned.
* @return The state of the load balancer, or false if the load balancer isn't defined.
*/
public boolean isBalancerOn() {
return !isInMaintenanceMode() && loadBalancerStateStore != null && loadBalancerStateStore.get();
} | 3.68 |
framework_FileDownloader_setFileDownloadResource | /**
* Sets the resource that is downloaded when the extended component is
* clicked.
*
* @param resource
* the resource to download
*/
public void setFileDownloadResource(Resource resource) {
setResource("dl", resource);
} | 3.68 |
flink_WindowMapState_remove | /**
* Deletes the mapping of the given key.
*
* @param key The key of the mapping
* @throws Exception Thrown if the system cannot access the state.
*/
public void remove(W window, RowData key) throws Exception {
windowState.setCurrentNamespace(window);
windowState.remove(key);
} | 3.68 |
hbase_Get_readAllVersions | /**
* Get all available versions.
* @return this for invocation chaining
*/
public Get readAllVersions() {
this.maxVersions = Integer.MAX_VALUE;
return this;
} | 3.68 |
framework_RenderSpace_getWidth | /**
* Returns pixels available horizontally for contained widget, including
* possible scrollbars.
*/
@Override
public int getWidth() {
return super.getWidth();
} | 3.68 |
framework_UIDL_getLongVariable | /**
* Gets the value of the named variable.
*
* @param name
* the name of the variable
* @return the value of the variable
*/
public long getLongVariable(String name) {
return (long) var().getRawNumber(name);
} | 3.68 |
hbase_MetricsSource_shipBatch | /**
* Convience method to apply changes to metrics do to shipping a batch of logs.
* @param batchSize the size of the batch that was shipped to sinks.
* @param hfiles total number of hfiles shipped to sinks.
*/
public void shipBatch(long batchSize, int sizeInBytes, long hfiles) {
shipBatch(batchSize, sizeInBytes);
singleSourceSource.incrHFilesShipped(hfiles);
globalSourceSource.incrHFilesShipped(hfiles);
} | 3.68 |
cron-utils_CronDefinitionBuilder_register | /**
* Registers a certain FieldDefinition.
*
* @param definition - FieldDefinition instance, never null
*/
public void register(final FieldDefinition definition) {
//ensure that we can't register a mandatory definition if there are already optional ones
boolean hasOptionalField = false;
for (final FieldDefinition fieldDefinition : fields.values()) {
if (fieldDefinition.isOptional()) {
hasOptionalField = true;
break;
}
}
if (!definition.isOptional() && hasOptionalField) {
throw new IllegalArgumentException("Can't register mandatory definition after a optional definition.");
}
fields.put(definition.getFieldName(), definition);
} | 3.68 |
morf_AbstractSelectStatementBuilder_fields | /**
* Adds fields to the select list.
*
* @param fields The fields to add
* @return this, for method chaining.
*/
public T fields(AliasedFieldBuilder... fields) {
return fields(Arrays.asList(fields));
} | 3.68 |
hadoop_WorkReport_getRetry | /**
* @return Number of unsuccessful attempts to process work.
*/
public int getRetry() {
return retry;
} | 3.68 |
framework_ConnectorTracker_isClientSideInitialized | /**
* Checks whether the given connector has already been initialized in the
* browser. The given connector should be registered with this connector
* tracker.
*
* @param connector
* the client connector to check
* @return <code>true</code> if the initial state has previously been sent
* to the browser, <code>false</code> if the client-side doesn't
* already know anything about the connector.
*/
public boolean isClientSideInitialized(ClientConnector connector) {
assert connectorIdToConnector.get(connector
.getConnectorId()) == connector : "Connector should be registered with this ConnectorTracker";
return !uninitializedConnectors.contains(connector);
} | 3.68 |
hadoop_AbfsConfiguration_getAccountName | /**
* Gets the Azure Storage account name corresponding to this instance of configuration.
* @return the Azure Storage account name
*/
public String getAccountName() {
return accountName;
} | 3.68 |
framework_AbstractOrderedLayout_addComponent | /**
* Adds a component into indexed position in this container.
*
* @param c
* the component to be added.
* @param index
* the index of the component position. The components currently
* in and after the position are shifted forwards.
*/
public void addComponent(Component c, int index) {
// If c is already in this, we must remove it before proceeding
// see ticket #7668
if (equals(c.getParent())) {
// When c is removed, all components after it are shifted down
if (index > getComponentIndex(c)) {
index--;
}
removeComponent(c);
}
components.add(index, c);
try {
super.addComponent(c);
} catch (IllegalArgumentException e) {
components.remove(c);
throw e;
}
componentAdded(c);
} | 3.68 |
hudi_SparkRDDReadClient_checkExists | /**
* Checks if the given [Keys] exists in the hoodie table and returns [Key, Option[FullFilePath]] If the optional
* FullFilePath value is not present, then the key is not found. If the FullFilePath value is present, it is the path
* component (without scheme) of the URI underlying file
*/
public JavaPairRDD<HoodieKey, Option<Pair<String, String>>> checkExists(JavaRDD<HoodieKey> hoodieKeys) {
return HoodieJavaRDD.getJavaRDD(
index.tagLocation(HoodieJavaRDD.of(hoodieKeys.map(k -> new HoodieAvroRecord<>(k, null))),
context, hoodieTable))
.mapToPair(hr -> new Tuple2<>(hr.getKey(), hr.isCurrentLocationKnown()
? Option.of(Pair.of(hr.getPartitionPath(), hr.getCurrentLocation().getFileId()))
: Option.empty())
);
} | 3.68 |
dubbo_ServiceInstanceMetadataUtils_getExportedServicesRevision | /**
* The revision for all exported Dubbo services from the specified {@link ServiceInstance}.
*
* @param serviceInstance the specified {@link ServiceInstance}
* @return <code>null</code> if not exits
*/
public static String getExportedServicesRevision(ServiceInstance serviceInstance) {
return Optional.ofNullable(serviceInstance.getServiceMetadata())
.map(MetadataInfo::getRevision)
.filter(StringUtils::isNotEmpty)
.orElse(serviceInstance.getMetadata(EXPORTED_SERVICES_REVISION_PROPERTY_NAME));
} | 3.68 |
framework_LoginForm_getPasswordCaption | /**
* Gets the caption set with {@link #setPasswordCaption(String)}. Note that
* this method might not match what is shown to the user if
* {@link #createPasswordField()} has been overridden.
*
*
* @return the password field caption
*/
public String getPasswordCaption() {
return passwordCaption;
} | 3.68 |
hadoop_AppStoreController_search | /**
* Search for yarn applications from solr.
*
* @apiGroup AppStoreController
* @apiName search
* @api {get} /app_store/search Find application from appstore.
* @apiParam {String} q Keyword to search.
* @apiSuccess {Object} AppStoreEntry List of matched applications.
* @apiSuccessExample {json} Success-Response:
* HTTP/1.1 200 OK
* [
* {
* "id":"96b7833a-e3",
* "org":"Hortonworks",
* "name":"LAMP",
* "desc":"Linux Apache MySQL PHP web application",
* "icon":"/css/img/feather.png",
* "like":0,
* "download":0,
* "app":null
* },
* {
* ...
* }
* ]
* @param keyword - search for keyword
* @return - List of YARN applications matching keyword search.
*/
@GET
@Path("search")
@Produces(MediaType.APPLICATION_JSON)
public List<AppStoreEntry> search(@QueryParam("q") String keyword) {
AppCatalogSolrClient sc = new AppCatalogSolrClient();
return sc.search(keyword);
} | 3.68 |
flink_FunctionIdentifier_toList | /** List of the component names of this function identifier. */
public List<String> toList() {
if (objectIdentifier != null) {
return objectIdentifier.toList();
} else if (functionName != null) {
return Collections.singletonList(functionName);
} else {
throw new IllegalStateException(
"functionName and objectIdentifier are both null which should never happen.");
}
} | 3.68 |
framework_GlobalResourceHandler_unregisterConnector | /**
* Notifies this handler that resources registered for the given connector
* can be released.
*
* @param connector
* the connector for which any registered resources can be
* released.
*/
public void unregisterConnector(ClientConnector connector) {
Set<Resource> set = usedResources.remove(connector);
if (set == null) {
return;
}
for (Resource resource : set) {
Set<ClientConnector> users = resourceUsers.get(resource);
users.remove(connector);
if (users.isEmpty()) {
resourceUsers.remove(resource);
unregisterResource(resource);
}
}
} | 3.68 |
pulsar_ModularLoadManagerImpl_selectBrokerForAssignment | /**
* As the leader broker, find a suitable broker for the assignment of the given bundle.
*
* @param serviceUnit
* ServiceUnitId for the bundle.
* @return The name of the selected broker, as it appears on metadata store.
*/
@Override
public Optional<String> selectBrokerForAssignment(final ServiceUnitId serviceUnit) {
// Use brokerCandidateCache as a lock to reduce synchronization.
long startTime = System.nanoTime();
try {
synchronized (brokerCandidateCache) {
final String bundle = serviceUnit.toString();
if (preallocatedBundleToBroker.containsKey(bundle)) {
// If the given bundle is already in preallocated, return the selected broker.
return Optional.of(preallocatedBundleToBroker.get(bundle));
}
Optional<String> broker = selectBroker(serviceUnit);
if (!broker.isPresent()) {
// If no broker is selected, return empty.
return broker;
}
// Add new bundle to preallocated.
preallocateBundle(bundle, broker.get());
return broker;
}
} finally {
selectBrokerForAssignment.observe(System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
}
} | 3.68 |
hadoop_AbfsCountersImpl_getRegistry | /**
* Getter for MetricRegistry.
*
* @return MetricRegistry or null.
*/
private MetricsRegistry getRegistry() {
return registry;
} | 3.68 |
morf_InlineTableUpgrader_preUpgrade | /**
* Perform initialisation before the main upgrade steps occur.
*/
public void preUpgrade() {
sqlStatementWriter.writeSql(sqlDialect.tableDeploymentStatements(idTable));
} | 3.68 |
flink_OneShotLatch_awaitQuietly | /**
* Calls {@link #await(long, TimeUnit)} and transforms any {@link InterruptedException} or
* {@link TimeoutException} into a {@link RuntimeException}.
*/
public void awaitQuietly(long timeout, TimeUnit timeUnit) {
try {
await(timeout, timeUnit);
} catch (InterruptedException | TimeoutException e) {
throw new RuntimeException(e);
}
} | 3.68 |
hbase_HFileOutputFormat2_configureStoragePolicy | /**
* Configure block storage policy for CF after the directory is created.
*/
static void configureStoragePolicy(final Configuration conf, final FileSystem fs,
byte[] tableAndFamily, Path cfPath) {
if (null == conf || null == fs || null == tableAndFamily || null == cfPath) {
return;
}
String policy = conf.get(STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(tableAndFamily),
conf.get(STORAGE_POLICY_PROPERTY));
CommonFSUtils.setStoragePolicy(fs, cfPath, policy);
} | 3.68 |
framework_Escalator_isSpacer | /** Checks if a given element is a spacer element */
public boolean isSpacer(Element row) {
/*
* If this needs optimization, we could do a more heuristic check
* based on stylenames and stuff, instead of iterating through the
* map.
*/
for (SpacerImpl spacer : rowIndexToSpacer.values()) {
if (spacer.getRootElement().equals(row)) {
return true;
}
}
return false;
} | 3.68 |
framework_VCalendar_updateEventsToMonthGrid | /**
* Adds events to the month grid.
*
* @param events
* The events to add
* @param drawImmediately
* Should the grid be rendered immediately. (currently not in
* use)
*
*/
public void updateEventsToMonthGrid(Collection<CalendarEvent> events,
boolean drawImmediately) {
for (CalendarEvent e : sortEvents(events)) {
// FIXME Why is drawImmediately not used ?????
addEventToMonthGrid(e, false);
}
} | 3.68 |
hadoop_ServiceRegistryUtils_registryPathForInstance | /**
* Get the registry path for an instance under the user's home node
* @param instanceName application instance
* @return a path to the registry location for this application instance.
*/
public static String registryPathForInstance(String instanceName) {
return RegistryUtils.servicePath(
RegistryUtils.currentUser(), YarnServiceConstants.APP_TYPE, instanceName
);
} | 3.68 |
hbase_RegionServerFlushTableProcedureManager_getRegionsToFlush | /**
* Get the list of regions to flush for the table on this server It is possible that if a region
* moves somewhere between the calls we'll miss the region.
* @return the list of online regions. Empty list is returned if no regions.
*/
private List<HRegion> getRegionsToFlush(String table) throws IOException {
return (List<HRegion>) rss.getRegions(TableName.valueOf(table));
} | 3.68 |
hibernate-validator_MethodValidationConfiguration_isAllowOverridingMethodAlterParameterConstraint | /**
* @return {@code true} if more than one return value within a class hierarchy can be marked for cascaded
* validation, {@code false} otherwise.
*/
public boolean isAllowOverridingMethodAlterParameterConstraint() {
return this.allowOverridingMethodAlterParameterConstraint;
} | 3.68 |
hadoop_S3ACommitterFactory_createTaskCommitter | /**
* Create a task committer.
* @param fileSystem destination FS.
* @param outputPath final output path for work
* @param context job context
* @return a committer
* @throws IOException instantiation failure
*/
@Override
public PathOutputCommitter createTaskCommitter(S3AFileSystem fileSystem,
Path outputPath,
TaskAttemptContext context) throws IOException {
AbstractS3ACommitterFactory factory = chooseCommitterFactory(fileSystem,
outputPath,
context.getConfiguration());
if (factory != null) {
PathOutputCommitter committer = factory.createTaskCommitter(
fileSystem, outputPath, context);
LOG.info("Using committer {} to output data to {}",
(committer instanceof AbstractS3ACommitter
? ((AbstractS3ACommitter) committer).getName()
: committer.toString()),
outputPath);
return committer;
} else {
LOG.warn("Using standard FileOutputCommitter to commit work."
+ " This is slow and potentially unsafe.");
return createFileOutputCommitter(outputPath, context);
}
} | 3.68 |
hadoop_Trash_checkpoint | /**
* Create a trash checkpoint.
* @throws IOException raised on errors performing I/O.
*/
public void checkpoint() throws IOException {
trashPolicy.createCheckpoint();
} | 3.68 |
hadoop_ClientDatanodeProtocolServerSideTranslatorPB_cancelDiskBalancerPlan | /**
* Cancel an executing plan.
* @param controller - RpcController
* @param request - Request
* @return Response.
* @throws ServiceException
*/
@Override
public CancelPlanResponseProto cancelDiskBalancerPlan(
RpcController controller, CancelPlanRequestProto request)
throws ServiceException {
try {
impl.cancelDiskBalancePlan(request.getPlanID());
return CancelPlanResponseProto.newBuilder().build();
} catch (Exception e) {
throw new ServiceException(e);
}
} | 3.68 |
flink_CompilerHints_addUniqueFields | /**
* Adds multiple FieldSets to be unique
*
* @param uniqueFieldSets A set of unique FieldSet
*/
public void addUniqueFields(Set<FieldSet> uniqueFieldSets) {
if (this.uniqueFields == null) {
this.uniqueFields = new HashSet<FieldSet>();
}
this.uniqueFields.addAll(uniqueFieldSets);
} | 3.68 |
framework_Slider_setResolution | /**
* Set a new resolution for the slider. The resolution is the number of
* digits after the decimal point.
*
* @throws IllegalArgumentException
* if resolution is negative.
*
* @param resolution
* the number of digits after the decimal point
*/
public void setResolution(int resolution) {
if (resolution < 0) {
throw new IllegalArgumentException(
"Cannot set a negative resolution to Slider");
}
getState().resolution = resolution;
} | 3.68 |
framework_TabSheet_getTabSheet | /**
* The TabSheet where the event occurred.
*
* @return the TabSheet where the event occurred
*/
public TabSheet getTabSheet() {
return (TabSheet) getSource();
} | 3.68 |
hbase_Scan_setTimeRange | /**
* Get versions of columns only within the specified timestamp range, [minStamp, maxStamp). Note,
* default maximum versions to return is 1. If your time range spans more than one version and you
* want all versions returned, up the number of versions beyond the default.
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
* @see #readAllVersions()
* @see #readVersions(int)
*/
public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
tr = TimeRange.between(minStamp, maxStamp);
return this;
} | 3.68 |
flink_FactoryUtil_createDynamicTableSink | /**
* @deprecated Use {@link #createDynamicTableSink(DynamicTableSinkFactory, ObjectIdentifier,
* ResolvedCatalogTable, Map, ReadableConfig, ClassLoader, boolean)}
*/
@Deprecated
public static DynamicTableSink createDynamicTableSink(
@Nullable DynamicTableSinkFactory preferredFactory,
ObjectIdentifier objectIdentifier,
ResolvedCatalogTable catalogTable,
ReadableConfig configuration,
ClassLoader classLoader,
boolean isTemporary) {
return createDynamicTableSink(
preferredFactory,
objectIdentifier,
catalogTable,
Collections.emptyMap(),
configuration,
classLoader,
isTemporary);
} | 3.68 |
hadoop_MetricsCache_getMetricInstance | /**
* Lookup a metric instance
* @param key name of the metric
* @return the metric instance
*/
public AbstractMetric getMetricInstance(String key) {
return metrics.get(key);
} | 3.68 |
hbase_RollingStatCalculator_removeData | /**
* Update the statistics after removing the given data value
*/
private void removeData(long data) {
currentSum = currentSum - (double) data;
currentSqrSum = currentSqrSum - ((double) data * data);
numberOfDataValues--;
} | 3.68 |
framework_CssLayout_getCss | /**
* Returns styles to be applied to given component. Override this method to
* inject custom style rules to components.
*
* <p>
* Note that styles are injected over previous styles before actual child
* rendering. Previous styles are not cleared, but overridden.
*
* <p>
* Note that one most often achieves better code style, by separating
* styling to theme (with custom theme and {@link #addStyleName(String)}.
* With own custom styles it is also very easy to break browser
* compatibility.
*
* @param c
* the component
* @return css rules to be applied to component
*/
protected String getCss(Component c) {
return null;
} | 3.68 |
hudi_CompactionCommand_printAllCompactions | /**
* Prints all compaction details.
*/
private static String printAllCompactions(HoodieDefaultTimeline timeline,
Function<HoodieInstant, HoodieCompactionPlan> compactionPlanReader,
boolean includeExtraMetadata,
String sortByField,
boolean descending,
int limit,
boolean headerOnly) {
Stream<HoodieInstant> instantsStream = timeline.getWriteTimeline().getReverseOrderedInstants();
List<Pair<HoodieInstant, HoodieCompactionPlan>> compactionPlans = instantsStream
.map(instant -> Pair.of(instant, compactionPlanReader.apply(instant)))
.filter(pair -> pair.getRight() != null)
.collect(Collectors.toList());
Set<String> committedInstants = timeline.getCommitTimeline().filterCompletedInstants()
.getInstantsAsStream().map(HoodieInstant::getTimestamp).collect(Collectors.toSet());
List<Comparable[]> rows = new ArrayList<>();
for (Pair<HoodieInstant, HoodieCompactionPlan> compactionPlan : compactionPlans) {
HoodieCompactionPlan plan = compactionPlan.getRight();
HoodieInstant instant = compactionPlan.getLeft();
final HoodieInstant.State state;
if (committedInstants.contains(instant.getTimestamp())) {
state = HoodieInstant.State.COMPLETED;
} else {
state = instant.getState();
}
if (includeExtraMetadata) {
rows.add(new Comparable[] {instant.getTimestamp(), state.toString(),
plan.getOperations() == null ? 0 : plan.getOperations().size(),
plan.getExtraMetadata().toString()});
} else {
rows.add(new Comparable[] {instant.getTimestamp(), state.toString(),
plan.getOperations() == null ? 0 : plan.getOperations().size()});
}
}
Map<String, Function<Object, String>> fieldNameToConverterMap = new HashMap<>();
TableHeader header = new TableHeader()
.addTableHeaderField(HoodieTableHeaderFields.HEADER_COMPACTION_INSTANT_TIME)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_STATE)
.addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_TO_BE_COMPACTED);
if (includeExtraMetadata) {
header = header.addTableHeaderField(HoodieTableHeaderFields.HEADER_EXTRA_METADATA);
}
return HoodiePrintHelper.print(header, fieldNameToConverterMap, sortByField, descending, limit, headerOnly, rows);
} | 3.68 |
hadoop_NamenodeStatusReport_getNumDecomLiveDatanodes | /**
* Get the number of live decommissioned nodes.
*
* @return The number of live decommissioned nodes.
*/
public int getNumDecomLiveDatanodes() {
return this.liveDecomDatanodes;
} | 3.68 |
hbase_QuotaObserverChore_getNumRegions | /**
* Computes the total number of regions in a table.
*/
int getNumRegions(TableName table) throws IOException {
List<RegionInfo> regions = this.conn.getAdmin().getRegions(table);
if (regions == null) {
return 0;
}
// Filter the region replicas if any and return the original number of regions for a table.
RegionReplicaUtil.removeNonDefaultRegions(regions);
return regions.size();
} | 3.68 |
hbase_HRegionFileSystem_writeRegionInfoOnFilesystem | /**
* Write out an info file under the region directory. Useful recovering mangled regions.
* @param regionInfoContent serialized version of the {@link RegionInfo}
* @param useTempDir indicate whether or not using the region .tmp dir for a safer file
* creation.
*/
private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent, final boolean useTempDir)
throws IOException {
Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
if (useTempDir) {
// Create in tmpDir and then move into place in case we crash after
// create but before close. If we don't successfully close the file,
// subsequent region reopens will fail the below because create is
// registered in NN.
// And then create the file
Path tmpPath = new Path(getTempDir(), REGION_INFO_FILE);
// If datanode crashes or if the RS goes down just before the close is called while trying to
// close the created regioninfo file in the .tmp directory then on next
// creation we will be getting AlreadyCreatedException.
// Hence delete and create the file if exists.
if (CommonFSUtils.isExists(fs, tmpPath)) {
CommonFSUtils.delete(fs, tmpPath, true);
}
// Write HRI to a file in case we need to recover hbase:meta
writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);
// Move the created file to the original path
if (fs.exists(tmpPath) && !rename(tmpPath, regionInfoFile)) {
throw new IOException("Unable to rename " + tmpPath + " to " + regionInfoFile);
}
} else {
// Write HRI to a file in case we need to recover hbase:meta
writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
}
} | 3.68 |
hmily_GrpcInvokeContext_getArgs | /**
* get args.
*
* @return args args
*/
public Object[] getArgs() {
return args;
} | 3.68 |
flink_JobEdge_getOperatorLevelCachingDescription | /**
* Gets the operator-level caching description for this input.
*
* @return The description of operator-level caching, or null, is none was set.
*/
public String getOperatorLevelCachingDescription() {
return operatorLevelCachingDescription;
} | 3.68 |
hbase_ServerManager_moveFromOnlineToDeadServers | /**
* Called when server has expired.
*/
// Locking in this class needs cleanup.
public synchronized void moveFromOnlineToDeadServers(final ServerName sn) {
synchronized (this.onlineServers) {
boolean online = this.onlineServers.containsKey(sn);
if (online) {
// Remove the server from the known servers lists and update load info BUT
// add to deadservers first; do this so it'll show in dead servers list if
// not in online servers list.
this.deadservers.putIfAbsent(sn);
this.onlineServers.remove(sn);
onlineServers.notifyAll();
} else {
// If not online, that is odd but may happen if 'Unknown Servers' -- where meta
// has references to servers not online nor in dead servers list. If
// 'Unknown Server', don't add to DeadServers else will be there for ever.
LOG.trace("Expiration of {} but server not online", sn);
}
}
} | 3.68 |
flink_Plugin_getClassLoader | /**
* Helper method to get the class loader used to load the plugin. This may be needed for some
* plugins that use dynamic class loading afterwards the plugin was loaded.
*
* @return the class loader used to load the plugin.
*/
default ClassLoader getClassLoader() {
return Preconditions.checkNotNull(
this.getClass().getClassLoader(),
"%s plugin with null class loader",
this.getClass().getName());
} | 3.68 |
morf_SqlDialect_appendExceptSet | /**
* appends except set operators to the result
*
* @param result except set operators will be appended here
* @param stmt statement with set operators
*/
protected void appendExceptSet(StringBuilder result, SelectStatement stmt) {
if (stmt.getSetOperators() != null) {
for (SetOperator operator : stmt.getSetOperators()) {
if (operator instanceof ExceptSetOperator) {
result.append(getSqlFrom((ExceptSetOperator) operator));
}
}
}
} | 3.68 |
hbase_HBaseTestingUtility_modifyTableSync | /**
* Modify a table, synchronous.
* @deprecated since 3.0.0 and will be removed in 4.0.0. Just use
* {@link Admin#modifyTable(TableDescriptor)} directly as it is synchronous now.
* @see Admin#modifyTable(TableDescriptor)
* @see <a href="https://issues.apache.org/jira/browse/HBASE-22002">HBASE-22002</a>
*/
@Deprecated
public static void modifyTableSync(Admin admin, TableDescriptor desc)
throws IOException, InterruptedException {
admin.modifyTable(desc);
} | 3.68 |
hbase_MultiByteBuff_limit | /**
* Returns the limit of this MBB
* @return limit of the MBB
*/
@Override
public int limit() {
return this.limit;
} | 3.68 |
hbase_Connection_getClusterId | /**
* Returns the cluster ID unique to this HBase cluster. <br>
* The default implementation is added to keep client compatibility.
*/
default String getClusterId() {
return null;
} | 3.68 |
hudi_TableSchemaResolver_getTableAvroSchema | /**
* Fetches tables schema in Avro format as of the given instant
*
* @param instant as of which table's schema will be fetched
*/
public Schema getTableAvroSchema(HoodieInstant instant, boolean includeMetadataFields) throws Exception {
return getTableAvroSchemaInternal(includeMetadataFields, Option.of(instant)).orElseThrow(schemaNotFoundError());
} | 3.68 |
framework_Upload_addFinishedListener | /**
* Adds the upload received event listener.
*
* @param listener
* the Listener to be added, not null
* @since 8.0
*/
public Registration addFinishedListener(FinishedListener listener) {
return addListener(FinishedEvent.class, listener,
UPLOAD_FINISHED_METHOD);
} | 3.68 |
flink_BatchTask_constructLogString | // --------------------------------------------------------------------------------------------
// Logging
// --------------------------------------------------------------------------------------------
/**
* Utility function that composes a string for logging purposes. The string includes the given
* message, the given name of the task and the index in its subtask group as well as the number
* of instances that exist in its subtask group.
*
* @param message The main message for the log.
* @param taskName The name of the task.
* @param parent The task that contains the code producing the message.
* @return The string for logging.
*/
public static String constructLogString(
String message, String taskName, AbstractInvokable parent) {
return message
+ ": "
+ taskName
+ " ("
+ (parent.getEnvironment().getTaskInfo().getIndexOfThisSubtask() + 1)
+ '/'
+ parent.getEnvironment().getTaskInfo().getNumberOfParallelSubtasks()
+ ')';
} | 3.68 |
flink_TableConfig_get | /**
* {@inheritDoc}
*
* <p>This method gives read-only access to the full configuration. However,
* application-specific configuration has precedence. Configuration of outer layers is used for
* defaults and fallbacks. See the docs of {@link TableConfig} for more information.
*
* @param option metadata of the option to read
* @param <T> type of the value to read
* @return read value or {@link ConfigOption#defaultValue()} if not found
*/
@Override
public <T> T get(ConfigOption<T> option) {
return configuration.getOptional(option).orElseGet(() -> rootConfiguration.get(option));
} | 3.68 |
framework_GridLayout_isSpacing | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.Layout.SpacingHandler#isSpacing()
*/
@Override
public boolean isSpacing() {
return getState(false).spacing;
} | 3.68 |
hudi_SecondaryIndexUtils_fromJsonString | /**
* Parse secondary index str to List<HoodieSecondaryIndex>
*
* @param jsonStr Secondary indexes with json format
* @return List<HoodieSecondaryIndex>
*/
public static List<HoodieSecondaryIndex> fromJsonString(String jsonStr) {
try {
return SecondaryIndexUtils.fromJsonString(jsonStr,
new TypeReference<List<HoodieSecondaryIndex>>() {
});
} catch (Exception e) {
throw new HoodieSecondaryIndexException("Fail to get secondary indexes", e);
}
} | 3.68 |
hmily_HmilyXaResource_end | /**
* End.
*
* @param i the
* @throws XAException the xa exception
*/
public void end(final int i) throws XAException {
this.end(this.xid, i);
} | 3.68 |
flink_SpillChannelManager_registerOpenChannelToBeRemovedAtShutdown | /**
* Adds a channel reader/writer to the list of channels that are to be removed at shutdown.
*
* @param channel The channel reader/writer.
*/
synchronized void registerOpenChannelToBeRemovedAtShutdown(FileIOChannel channel) {
openChannels.add(channel);
} | 3.68 |
open-banking-gateway_PsuLoginService_anonymousPsuAssociateAuthSession | /**
* Used for the cases when there is no need to identify PSU - i.e. single time payment, so that requesting FinTech can
* manage associated entities.
*/
public CompletableFuture<Outcome> anonymousPsuAssociateAuthSession(UUID authorizationId, String authorizationPassword) {
var exchange = oper.execute(callback -> {
AuthSession session = authRepository.findById(authorizationId)
.orElseThrow(() -> new IllegalStateException("Missing authorization session: " + authorizationId));
if (!session.isPsuAnonymous()) {
throw new IllegalStateException("Session does not support anonymous PSU: " + authorizationId);
}
FintechConsentSpecSecureStorage.FinTechUserInboxData inbox = associationService.readInboxFromFinTech(session, authorizationPassword);
session.setStatus(SessionStatus.STARTED);
authRepository.save(session);
return new SessionAndInbox(session.getRedirectCode(), inbox);
});
return executeOnLoginAndMap(exchange.getInbox(), authorizationId, exchange.getRedirectCode());
} | 3.68 |
hudi_HoodieParquetDataBlock_readRecordsFromBlockPayload | /**
* NOTE: We're overriding the whole reading sequence to make sure we properly respect
* the requested Reader's schema and only fetch the columns that have been explicitly
* requested by the caller (providing projected Reader's schema)
*/
@Override
protected <T> ClosableIterator<HoodieRecord<T>> readRecordsFromBlockPayload(HoodieRecordType type) throws IOException {
HoodieLogBlockContentLocation blockContentLoc = getBlockContentLocation().get();
// NOTE: It's important to extend Hadoop configuration here to make sure configuration
// is appropriately carried over
Configuration inlineConf = FSUtils.buildInlineConf(blockContentLoc.getHadoopConf());
Path inlineLogFilePath = InLineFSUtils.getInlineFilePath(
blockContentLoc.getLogFile().getPath(),
blockContentLoc.getLogFile().getPath().toUri().getScheme(),
blockContentLoc.getContentPositionInLogFile(),
blockContentLoc.getBlockSize());
Schema writerSchema = new Schema.Parser().parse(this.getLogBlockHeader().get(HeaderMetadataType.SCHEMA));
ClosableIterator<HoodieRecord<T>> iterator = HoodieFileReaderFactory.getReaderFactory(type).getFileReader(inlineConf, inlineLogFilePath, PARQUET)
.getRecordIterator(writerSchema, readerSchema);
return iterator;
} | 3.68 |
pulsar_ManagedCursor_asyncReadEntriesWithSkip | /**
* Asynchronously read entries from the ManagedLedger.
*
* @param numberOfEntriesToRead maximum number of entries to return
* @param maxSizeBytes max size in bytes of the entries to return
* @param callback callback object
* @param ctx opaque context
* @param maxPosition max position can read
* @param skipCondition predicate of read filter out
*/
default void asyncReadEntriesWithSkip(int numberOfEntriesToRead, long maxSizeBytes, ReadEntriesCallback callback,
Object ctx, PositionImpl maxPosition, Predicate<PositionImpl> skipCondition) {
asyncReadEntries(numberOfEntriesToRead, maxSizeBytes, callback, ctx, maxPosition);
} | 3.68 |
flink_CallContext_newValidationError | /**
* Creates a validation exception for exiting the type inference process with a meaningful
* exception.
*/
default ValidationException newValidationError(String message, Object... args) {
return new ValidationException(String.format(message, args));
} | 3.68 |
flink_Configuration_getBoolean | /**
* Returns the value associated with the given config option as a boolean. If no value is mapped
* under any key of the option, it returns the specified default instead of the option's default
* value.
*
* @param configOption The configuration option
* @param overrideDefault The value to return if no value was mapper for any key of the option
* @return the configured value associated with the given config option, or the overrideDefault
*/
@PublicEvolving
public boolean getBoolean(ConfigOption<Boolean> configOption, boolean overrideDefault) {
return getOptional(configOption).orElse(overrideDefault);
} | 3.68 |
hbase_RequestConverter_buildIsCleanerChoreEnabledRequest | /**
* Creates a request for querying the master whether the cleaner chore is enabled
* @return A {@link IsCleanerChoreEnabledRequest}
*/
public static IsCleanerChoreEnabledRequest buildIsCleanerChoreEnabledRequest() {
return IsCleanerChoreEnabledRequest.getDefaultInstance();
} | 3.68 |
hadoop_LocatedFileStatusFetcher_addResultStatistics | /**
* Add the statistics of an individual thread's scan.
* @param stats possibly null statistics.
*/
private void addResultStatistics(IOStatistics stats) {
if (stats != null) {
// demand creation of IO statistics.
synchronized (this) {
LOG.debug("Adding IOStatistics: {}", stats);
if (iostats == null) {
// demand create the statistics
iostats = snapshotIOStatistics(stats);
} else {
iostats.aggregate(stats);
}
}
}
} | 3.68 |
streampipes_EpProperties_stringEp | /**
* Creates a new primitive property of type string and the provided domain property. In addition, the value range
* of the property is restricted to the defined {@link org.apache.streampipes.model.schema.Enumeration}
*
* @param runtimeName The field identifier of the event property at runtime.
* @param domainProperties The semantics of the list property as a list of URIs. Use one of the vocabularies
* provided in
* {@link org.apache.streampipes.vocabulary} or create your own domain-specific vocabulary.
* @return {@link org.apache.streampipes.model.schema.EventPropertyPrimitive}
*/
public static EventPropertyPrimitive stringEp(Label label, String runtimeName, List<URI> domainProperties) {
return ep(label, XSD.STRING.toString(), runtimeName, domainProperties);
} | 3.68 |
framework_Escalator_onScroll | /**
* Logical scrolling event handler for the entire widget.
*/
public void onScroll() {
final double scrollTop = verticalScrollbar.getScrollPos();
final double scrollLeft = horizontalScrollbar.getScrollPos();
if (lastScrollLeft != scrollLeft) {
for (int i = 0; i < columnConfiguration.frozenColumns; i++) {
header.updateFreezePosition(i, scrollLeft);
body.updateFreezePosition(i, scrollLeft);
footer.updateFreezePosition(i, scrollLeft);
}
position.set(headElem, -scrollLeft, 0);
/*
* TODO [[optimize]]: cache this value in case the instanceof
* check has undesirable overhead. This could also be a
* candidate for some deferred binding magic so that e.g.
* AbsolutePosition is not even considered in permutations that
* we know support something better. That would let the compiler
* completely remove the entire condition since it knows that
* the if will never be true.
*/
if (position instanceof AbsolutePosition) {
/*
* we don't want to put "top: 0" on the footer, since it'll
* render wrong, as we already have
* "bottom: $footer-height".
*/
footElem.getStyle().setLeft(-scrollLeft, Unit.PX);
} else {
position.set(footElem, -scrollLeft, 0);
}
lastScrollLeft = scrollLeft;
}
body.setBodyScrollPosition(scrollLeft, scrollTop);
lastScrollTop = scrollTop;
body.updateEscalatorRowsOnScroll();
body.spacerContainer.updateSpacerDecosVisibility();
/*
* TODO [[optimize]]: Might avoid a reflow by first calculating new
* scrolltop and scrolleft, then doing the escalator magic based on
* those numbers and only updating the positions after that.
*/
} | 3.68 |
hadoop_JsonSerDeser_fromStream | /**
* Convert from an input stream, closing the stream afterwards.
* @param stream
* @return the parsed JSON
* @throws IOException IO problems
*/
public T fromStream(InputStream stream) throws IOException {
try {
return (T) (mapper.readValue(stream, classType));
} catch (IOException e) {
log.error("Exception while parsing json input stream", e);
throw e;
} finally {
IOUtils.closeStream(stream);
}
} | 3.68 |
shardingsphere-elasticjob_AbstractDistributeOnceElasticJobListener_notifyWaitingTaskStart | /**
* Notify waiting task start.
*/
public void notifyWaitingTaskStart() {
synchronized (startedWait) {
startedWait.notifyAll();
}
} | 3.68 |
hmily_PropertyName_getInvalidChars | /**
* Gets invalid chars.
*
* @param elementValue the element value
* @return the invalid chars
*/
static List<Character> getInvalidChars(final CharSequence elementValue) {
List<Character> chars = new ArrayList<>();
for (int i = 0; i < elementValue.length(); i++) {
char ch = elementValue.charAt(i);
if (!isValidChar(ch, i)) {
chars.add(ch);
}
}
return chars;
} | 3.68 |
hibernate-validator_AbstractStaxBuilder_readSingleElement | /**
* Reads a value between a simple tag element. In case of a {@code <someTag>some-value</someTag>} will
* return {@code some-value} as a string.
*
* @param xmlEventReader a current {@link XMLEventReader}
*
* @return a value of a current xml tag as a string
*/
protected String readSingleElement(XMLEventReader xmlEventReader) throws XMLStreamException {
// trimming the string value as it might contain leading/trailing spaces or \n
XMLEvent xmlEvent = xmlEventReader.nextEvent();
StringBuilder stringBuilder = new StringBuilder( xmlEvent.asCharacters().getData() );
while ( xmlEventReader.peek().isCharacters() ) {
xmlEvent = xmlEventReader.nextEvent();
stringBuilder.append( xmlEvent.asCharacters().getData() );
}
return stringBuilder.toString().trim();
} | 3.68 |
querydsl_GenericExporter_addAnnotationHelper | /**
* Add a annotation helper object to process custom annotations
*
* @param annotationHelper
*/
public void addAnnotationHelper(AnnotationHelper annotationHelper) {
annotationHelpers.add(annotationHelper);
} | 3.68 |
hbase_MetricsConnection_getAppendTracker | /** appendTracker metric */
public CallTracker getAppendTracker() {
return appendTracker;
} | 3.68 |
morf_ChangeColumn_accept | /**
* @see org.alfasoftware.morf.upgrade.SchemaChange#accept(org.alfasoftware.morf.upgrade.SchemaChangeVisitor)
*/
@Override
public void accept(SchemaChangeVisitor visitor) {
visitor.visit(this);
} | 3.68 |
querydsl_GenericExporter_setPropertyHandling | /**
* Set the property handling mode
*
* @param propertyHandling
*/
public void setPropertyHandling(PropertyHandling propertyHandling) {
this.propertyHandling = propertyHandling;
} | 3.68 |
morf_SelectStatementBuilder_getGroupBys | /**
* Gets the grouped fields
*
* @return the group by fields
*/
List<AliasedField> getGroupBys() {
return groupBys;
} | 3.68 |
framework_Window_setTabStopBottomAssistiveText | /**
* Sets the message that is provided to users of assistive devices when the
* user reaches the bottom of the window when leaving a window with the tab
* key is prevented.
* <p>
* This message is not visible on the screen.
*
* @param bottomMessage
* String provided when the user navigates with the Tab key to
* the bottom of the window
*/
public void setTabStopBottomAssistiveText(String bottomMessage) {
getState().assistiveTabStopBottomText = bottomMessage;
} | 3.68 |
hadoop_Paths_getStagingUploadsParentDirectory | /**
* Build a qualified parent path for the temporary multipart upload commit
* directory built by {@link #getMultipartUploadCommitsDirectory(Configuration, String)}.
* @param conf configuration defining default FS.
* @param uuid uuid of job
* @return a path which can be used for temporary work
* @throws IOException on an IO failure.
*/
public static Path getStagingUploadsParentDirectory(Configuration conf,
String uuid) throws IOException {
return getMultipartUploadCommitsDirectory(conf, uuid).getParent();
} | 3.68 |
querydsl_StringExpression_prepend | /**
* Create a {@code concat(str, this)} expression
*
* <p>Prepend the given String and return the result</p>
*
* @param str string
* @return str + this
*/
public StringExpression prepend(String str) {
return prepend(ConstantImpl.create(str));
} | 3.68 |
framework_AbstractSplitPanel_getFirstComponent | /**
* Gets the first component of this split panel. Depending on the direction
* this is either the component shown at the top or to the left.
*
* @return the first component of this split panel
*/
public Component getFirstComponent() {
return (Component) getState(false).firstChild;
} | 3.68 |
querydsl_JTSGeometryExpression_envelope | /**
* The minimum bounding box for this Geometry, returned as a Geometry. The
* polygon is defined by the corner points of the bounding box [(MINX, MINY), (MAXX, MINY), (MAXX, MAXY),
* (MINX, MAXY), (MINX, MINY)]. Minimums for Z and M may be added. The simplest representation of an
* Envelope is as two direct positions, one containing all the minimums, and another all the maximums. In some
* cases, this coordinate will be outside the range of validity for the Spatial Reference System.
*
* @return envelope
*/
public JTSGeometryExpression<Geometry> envelope() {
if (envelope == null) {
envelope = JTSGeometryExpressions.geometryOperation(SpatialOps.ENVELOPE, mixin);
}
return envelope;
} | 3.68 |
hudi_EmbeddedTimelineServerHelper_createEmbeddedTimelineService | /**
* Instantiate Embedded Timeline Server.
* @param context Hoodie Engine Context
* @param config Hoodie Write Config
* @return TimelineServer if configured to run
* @throws IOException
*/
public static Option<EmbeddedTimelineService> createEmbeddedTimelineService(
HoodieEngineContext context, HoodieWriteConfig config) throws IOException {
if (config.isEmbeddedTimelineServerEnabled()) {
Option<String> hostAddr = context.getProperty(EngineProperty.EMBEDDED_SERVER_HOST);
EmbeddedTimelineService timelineService = EmbeddedTimelineService.getOrStartEmbeddedTimelineService(context, hostAddr.orElse(null), config);
updateWriteConfigWithTimelineServer(timelineService, config);
return Option.of(timelineService);
} else {
return Option.empty();
}
} | 3.68 |
hadoop_RenameOperation_queueToDelete | /**
* Queue a single marker for deletion.
* <p>
* See {@link #queueToDelete(Path, String)} for
* details on safe use of this method.
*
* @param marker markers
*/
private void queueToDelete(final DirMarkerTracker.Marker marker) {
queueToDelete(marker.getPath(), marker.getKey());
} | 3.68 |
flink_LocalProperties_addUniqueFields | /**
* Adds a combination of fields that are unique in these data properties.
*
* @param uniqueFields The fields that are unique in these data properties.
*/
public LocalProperties addUniqueFields(FieldSet uniqueFields) {
LocalProperties copy = clone();
if (copy.uniqueFields == null) {
copy.uniqueFields = new HashSet<FieldSet>();
}
copy.uniqueFields.add(uniqueFields);
return copy;
} | 3.68 |
flink_BernoulliSampler_sample | /**
* Sample the input elements, for each input element, take a Bernoulli trail for sampling.
*
* @param input Elements to be sampled.
* @return The sampled result which is lazy computed upon input elements.
*/
@Override
public Iterator<T> sample(final Iterator<T> input) {
if (fraction == 0) {
return emptyIterable;
}
return new SampledIterator<T>() {
T current = null;
@Override
public boolean hasNext() {
if (current == null) {
current = getNextSampledElement();
}
return current != null;
}
@Override
public T next() {
if (current == null) {
return getNextSampledElement();
} else {
T result = current;
current = null;
return result;
}
}
private T getNextSampledElement() {
if (fraction <= THRESHOLD) {
double rand = random.nextDouble();
double u = Math.max(rand, EPSILON);
int gap = (int) (Math.log(u) / Math.log(1 - fraction));
int elementCount = 0;
if (input.hasNext()) {
T element = input.next();
while (input.hasNext() && elementCount < gap) {
element = input.next();
elementCount++;
}
if (elementCount < gap) {
return null;
} else {
return element;
}
} else {
return null;
}
} else {
while (input.hasNext()) {
T element = input.next();
if (random.nextDouble() <= fraction) {
return element;
}
}
return null;
}
}
};
} | 3.68 |
hadoop_StateStoreSerializableImpl_serializeString | /**
* Serialize a record using the serializer.
*
* @param record Record to serialize.
* @param <T> Type of the state store record.
* @return String with the serialization of the record.
*/
protected <T extends BaseRecord> String serializeString(T record) {
return serializer.serializeString(record);
} | 3.68 |
zxing_AddressBookParsedResult_getGeo | /**
* @return a location as a latitude/longitude pair
*/
public String[] getGeo() {
return geo;
} | 3.68 |
hbase_TableDescriptorBuilder_setReadOnly | /**
* Setting the table as read only sets all the columns in the table as read only. By default all
* tables are modifiable, but if the readOnly flag is set to true then the contents of the table
* can only be read but not modified.
* @param readOnly True if all of the columns in the table should be read only.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setReadOnly(final boolean readOnly) {
return setValue(READONLY_KEY, Boolean.toString(readOnly));
} | 3.68 |
hadoop_CallableSupplier_maybeAwaitCompletion | /**
* Block awaiting completion for any non-null future passed in;
* No-op if a null arg was supplied.
* @param <T> return type
* @param future future
* @return the outcome; is empty if the future was null/had no return value
* @throws IOException if one of the called futures raised an IOE.
* @throws RuntimeException if one of the futures raised one.
*/
public static <T> Optional<T> maybeAwaitCompletion(
@Nullable final CompletableFuture<T> future)
throws IOException {
if (future != null) {
return Optional.ofNullable(waitForCompletion(future));
} else {
return Optional.empty();
}
} | 3.68 |
hbase_TableListModel_add | /**
* Add the table name model to the list
* @param table the table model
*/
public void add(TableModel table) {
tables.add(table);
} | 3.68 |
hadoop_CounterGroupFactory_updateFrameworkGroupMapping | // Update static mappings (c2i, i2s) of framework groups
private static synchronized void updateFrameworkGroupMapping(Class<?> cls) {
String name = cls.getName();
Integer i = s2i.get(name);
if (i != null) return;
i2s.add(name);
s2i.put(name, i2s.size() - 1);
} | 3.68 |
dubbo_NettyHttpRestServer_getChildChannelOptionMap | /**
* create child channel options map
*
* @param url
* @return
*/
protected Map<ChannelOption, Object> getChildChannelOptionMap(URL url) {
Map<ChannelOption, Object> channelOption = new HashMap<>();
channelOption.put(
ChannelOption.SO_KEEPALIVE, url.getParameter(Constants.KEEP_ALIVE_KEY, Constants.DEFAULT_KEEP_ALIVE));
return channelOption;
} | 3.68 |
flink_LastDatedValueFunction_getTypeInference | /**
* Declares the {@link TypeInference} of this function. It specifies:
*
* <ul>
* <li>which argument types are supported when calling this function,
* <li>which {@link DataType#getConversionClass()} should be used when calling the JVM method
* {@link #accumulate(Accumulator, Object, LocalDate)} during runtime,
* <li>a similar strategy how to derive an accumulator type,
* <li>and a similar strategy how to derive the output type.
* </ul>
*/
@Override
public TypeInference getTypeInference(DataTypeFactory typeFactory) {
return TypeInference.newBuilder()
// accept a signature (ANY, DATE) both with default conversion classes,
// the input type strategy is mostly used to produce nicer validation exceptions
// during planning, implementers can decide to skip it if they are fine with failing
// at a later stage during code generation when the runtime method is checked
.inputTypeStrategy(
InputTypeStrategies.sequence(
InputTypeStrategies.ANY,
InputTypeStrategies.explicit(DataTypes.DATE())))
// let the accumulator data type depend on the first input argument
.accumulatorTypeStrategy(
callContext -> {
final DataType argDataType = callContext.getArgumentDataTypes().get(0);
final DataType accDataType =
DataTypes.STRUCTURED(
Accumulator.class,
DataTypes.FIELD("value", argDataType),
DataTypes.FIELD("date", DataTypes.DATE()));
return Optional.of(accDataType);
})
// let the output data type depend on the first input argument
.outputTypeStrategy(
callContext -> {
final DataType argDataType = callContext.getArgumentDataTypes().get(0);
final DataType outputDataType =
DataTypes.ROW(
DataTypes.FIELD("value", argDataType),
DataTypes.FIELD("date", DataTypes.DATE()));
return Optional.of(outputDataType);
})
.build();
} | 3.68 |
hbase_SegmentScanner_reseek | /**
* Reseek the scanner at or after the specified KeyValue. This method is guaranteed to seek at or
* after the required key only if the key comes after the current position of the scanner. Should
* not be used to seek to a key which may come before the current position.
* @param cell seek value (should be non-null)
* @return true if scanner has values left, false if end of scanner
*/
@Override
public boolean reseek(Cell cell) throws IOException {
if (closed) {
return false;
}
/*
* See HBASE-4195 & HBASE-3855 & HBASE-6591 for the background on this implementation. This code
* is executed concurrently with flush and puts, without locks. The ideal implementation for
* performance would use the sub skip list implicitly pointed by the iterator. Unfortunately the
* Java API does not offer a method to get it. So we remember the last keys we iterated to and
* restore the reseeked set to at least that point.
*/
iter = getIterator(getHighest(cell, last));
updateCurrent();
return (current != null);
} | 3.68 |
flink_StreamGraphGenerator_determineSlotSharingGroup | /**
* Determines the slot sharing group for an operation based on the slot sharing group set by the
* user and the slot sharing groups of the inputs.
*
* <p>If the user specifies a group name, this is taken as is. If nothing is specified and the
* input operations all have the same group name then this name is taken. Otherwise the default
* group is chosen.
*
* @param specifiedGroup The group specified by the user.
* @param inputIds The IDs of the input operations.
*/
private String determineSlotSharingGroup(String specifiedGroup, Collection<Integer> inputIds) {
if (specifiedGroup != null) {
return specifiedGroup;
} else {
String inputGroup = null;
for (int id : inputIds) {
String inputGroupCandidate = streamGraph.getSlotSharingGroup(id);
if (inputGroup == null) {
inputGroup = inputGroupCandidate;
} else if (!inputGroup.equals(inputGroupCandidate)) {
return DEFAULT_SLOT_SHARING_GROUP;
}
}
return inputGroup == null ? DEFAULT_SLOT_SHARING_GROUP : inputGroup;
}
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.