name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
morf_SelectStatement_toString | /**
* @see org.alfasoftware.morf.sql.AbstractSelectStatement#toString()
*/
@Override
public String toString() {
StringBuilder result = new StringBuilder("SQL SELECT ");
if (distinct) {
result.append("DISTINCT ");
}
if (!hints.isEmpty()) {
result.append("with hints: ");
result.append(hints);
result.append(" ");
}
result.append(super.toString());
if (!groupBys.isEmpty()) result.append(" GROUP BY ").append(groupBys);
if (having != null) result.append(" HAVING [").append(having).append("]");
for (SetOperator setOperator : setOperators) {
result.append(" ").append(setOperator);
}
if (StringUtils.isNotEmpty(getAlias())) result.append(" AS ").append(getAlias());
if (forUpdate) result.append(" (FOR UPDATE)");
return result.toString();
} | 3.68 |
dubbo_AbstractConfigManager_getConfigIdsFromProps | /**
* Search props and extract config ids of specify type.
* <pre>
* # properties
* dubbo.registries.registry1.address=xxx
* dubbo.registries.registry2.port=xxx
*
* # extract
* Set configIds = getConfigIds(RegistryConfig.class)
*
* # result
* configIds: ["registry1", "registry2"]
* </pre>
*
* @param clazz config type
* @return ids of specify config type
*/
private Set<String> getConfigIdsFromProps(Class<? extends AbstractConfig> clazz) {
String prefix = CommonConstants.DUBBO + "." + AbstractConfig.getPluralTagName(clazz) + ".";
return ConfigurationUtils.getSubIds(environment.getConfigurationMaps(), prefix);
} | 3.68 |
flink_MemoryMappedBoundedData_create | /** Creates new MemoryMappedBoundedData, creating a memory mapped file at the given path. */
public static MemoryMappedBoundedData create(Path memMappedFilePath) throws IOException {
return createWithRegionSize(memMappedFilePath, Integer.MAX_VALUE);
} | 3.68 |
flink_ActorSystemBootstrapTools_startLocalActorSystem | /**
* Starts a local Actor System.
*
* @param configuration The Flink configuration.
* @param actorSystemName Name of the started ActorSystem.
* @param logger The logger to output log information.
* @param actorSystemExecutorConfiguration Configuration for the ActorSystem's underlying
* executor.
* @param customConfig Custom Pekko config to be combined with the config derived from Flink
* configuration.
* @return The ActorSystem which has been started.
* @throws Exception
*/
public static ActorSystem startLocalActorSystem(
Configuration configuration,
String actorSystemName,
Logger logger,
Config actorSystemExecutorConfiguration,
Config customConfig)
throws Exception {
logger.info("Trying to start local actor system");
try {
Config pekkoConfig =
PekkoUtils.getConfig(
configuration, null, null, actorSystemExecutorConfiguration);
if (customConfig != null) {
pekkoConfig = customConfig.withFallback(pekkoConfig);
}
return startActorSystem(pekkoConfig, actorSystemName, logger);
} catch (Throwable t) {
throw new Exception("Could not create actor system", t);
}
} | 3.68 |
flink_KryoSerializer_buildKryoRegistrations | /**
* Utility method that takes lists of registered types and their serializers, and resolve them
* into a single list such that the result will resemble the final registration result in Kryo.
*/
private static LinkedHashMap<String, KryoRegistration> buildKryoRegistrations(
Class<?> serializedType,
LinkedHashSet<Class<?>> registeredTypes,
LinkedHashMap<Class<?>, Class<? extends Serializer<?>>>
registeredTypesWithSerializerClasses,
LinkedHashMap<Class<?>, ExecutionConfig.SerializableSerializer<?>>
registeredTypesWithSerializers) {
final LinkedHashMap<String, KryoRegistration> kryoRegistrations = new LinkedHashMap<>();
kryoRegistrations.put(serializedType.getName(), new KryoRegistration(serializedType));
for (Class<?> registeredType : checkNotNull(registeredTypes)) {
kryoRegistrations.put(registeredType.getName(), new KryoRegistration(registeredType));
}
for (Map.Entry<Class<?>, Class<? extends Serializer<?>>>
registeredTypeWithSerializerClassEntry :
checkNotNull(registeredTypesWithSerializerClasses).entrySet()) {
kryoRegistrations.put(
registeredTypeWithSerializerClassEntry.getKey().getName(),
new KryoRegistration(
registeredTypeWithSerializerClassEntry.getKey(),
registeredTypeWithSerializerClassEntry.getValue()));
}
for (Map.Entry<Class<?>, ExecutionConfig.SerializableSerializer<?>>
registeredTypeWithSerializerEntry :
checkNotNull(registeredTypesWithSerializers).entrySet()) {
kryoRegistrations.put(
registeredTypeWithSerializerEntry.getKey().getName(),
new KryoRegistration(
registeredTypeWithSerializerEntry.getKey(),
registeredTypeWithSerializerEntry.getValue()));
}
// add Avro support if flink-avro is available; a dummy otherwise
AvroUtils.getAvroUtils().addAvroGenericDataArrayRegistration(kryoRegistrations);
return kryoRegistrations;
} | 3.68 |
framework_VScrollTable_getWidth | /**
* Returns the pixels width of the footer cell.
*
* @return The width in pixels
*/
public int getWidth() {
return width;
} | 3.68 |
framework_VRadioButtonGroup_focus | /**
* Set focus to the selected radio button (or first radio button if there is
* no selection).
*/
@Override
public void focus() {
// If focus is set on creation, need to wait until options are populated
Scheduler.get().scheduleDeferred(() -> {
// if there's a selected radio button, focus it
for (String key : keyToOptions.keySet()) {
RadioButton radioButton = keyToOptions.get(key);
if (radioButton != null && radioButton.getValue()) {
radioButton.setFocus(true);
return;
}
}
// otherwise focus the first enabled child
getWidget().focusFirstEnabledChild();
});
} | 3.68 |
framework_DateField_isLenient | /**
* Returns whether date/time interpretation is to be lenient.
*
* @see #setLenient(boolean)
*
* @return true if the interpretation mode of this calendar is lenient;
* false otherwise.
*/
public boolean isLenient() {
return lenient;
} | 3.68 |
framework_DesignAttributeHandler_readAlignment | /**
* Read the alignment from the given child component attributes.
*
* @since 7.6.4
* @param attr
* the child component attributes
* @return the component alignment
*/
public static Alignment readAlignment(Attributes attr) {
int bitMask = 0;
if (attr.hasKey(":middle")) {
bitMask += AlignmentInfo.Bits.ALIGNMENT_VERTICAL_CENTER;
} else if (attr.hasKey(":bottom")) {
bitMask += AlignmentInfo.Bits.ALIGNMENT_BOTTOM;
} else {
bitMask += AlignmentInfo.Bits.ALIGNMENT_TOP;
}
if (attr.hasKey(":center")) {
bitMask += AlignmentInfo.Bits.ALIGNMENT_HORIZONTAL_CENTER;
} else if (attr.hasKey(":right")) {
bitMask += AlignmentInfo.Bits.ALIGNMENT_RIGHT;
} else {
bitMask += AlignmentInfo.Bits.ALIGNMENT_LEFT;
}
return new Alignment(bitMask);
} | 3.68 |
hudi_HoodieTable_logCompact | /**
* Run Log Compaction on the table. Log Compaction arranges the data so that it is optimized for data access.
*
* @param context HoodieEngineContext
* @param logCompactionInstantTime Instant Time
*/
public HoodieWriteMetadata<O> logCompact(HoodieEngineContext context,
String logCompactionInstantTime) {
throw new UnsupportedOperationException("Log compaction is not supported for this table type");
} | 3.68 |
flink_StateDescriptor_getQueryableStateName | /**
* Returns the queryable state name.
*
* @return Queryable state name or <code>null</code> if not set.
* @deprecated The Queryable State feature is deprecated since Flink 1.18, and will be removed
* in a future Flink major version.
*/
@Nullable
@Deprecated
public String getQueryableStateName() {
return queryableStateName;
} | 3.68 |
rocketmq-connect_LRUCache_size | /**
* cache size
*
* @return
*/
@Override
public long size() {
return cache.size();
} | 3.68 |
framework_Calendar_fireDateClick | /**
* Fires event when a date was clicked in the calendar. Creates a new event
* for the date and passes it to the listener.
*
* @param date
* The date and time that was clicked
*/
protected void fireDateClick(Date date) {
fireEvent(new DateClickEvent(this, date));
} | 3.68 |
pulsar_MultiTopicsConsumerImpl_createPartitionedConsumer | // create consumer for a single topic with already known partitions.
// first create a consumer with no topic, then do subscription for already know partitionedTopic.
public static <T> MultiTopicsConsumerImpl<T> createPartitionedConsumer(
PulsarClientImpl client,
ConsumerConfigurationData<T> conf,
ExecutorProvider executorProvider,
CompletableFuture<Consumer<T>> subscribeFuture,
int numPartitions,
Schema<T> schema, ConsumerInterceptors<T> interceptors) {
checkArgument(conf.getTopicNames().size() == 1,
"Should have only 1 topic for partitioned consumer");
// get topic name, then remove it from conf, so constructor will create a consumer with no topic.
ConsumerConfigurationData<T> cloneConf = conf.clone();
String topicName = cloneConf.getSingleTopic();
cloneConf.getTopicNames().remove(topicName);
CompletableFuture<Consumer<T>> future = new CompletableFuture<>();
MultiTopicsConsumerImpl<T> consumer = new MultiTopicsConsumerImpl<T>(client, topicName, cloneConf,
executorProvider, future, schema, interceptors, true /* createTopicIfDoesNotExist */);
future.thenCompose(c -> ((MultiTopicsConsumerImpl<T>) c).subscribeAsync(topicName, numPartitions))
.thenRun(()-> subscribeFuture.complete(consumer))
.exceptionally(e -> {
log.warn("Failed subscription for createPartitionedConsumer: {} {}, e:{}",
topicName, numPartitions, e);
consumer.cleanupMultiConsumer();
subscribeFuture.completeExceptionally(
PulsarClientException.wrap(((Throwable) e).getCause(),
String.format("Failed to subscribe %s with %d partitions", topicName, numPartitions)));
return null;
});
return consumer;
} | 3.68 |
morf_SqlDialect_getSqlForRandom | /**
* Converts the RANDOM function into SQL. This returns a random number between 0 and 1.
*
* @return a string representation of the SQL.
*/
protected String getSqlForRandom() {
return "RAND()";
} | 3.68 |
MagicPlugin_TranslatingConfigurationSection_createSection | /**
* Borrowed from Bukkit's MemorySection
*/
@Override
public ConfigurationSection createSection(String path) {
Validate.notEmpty(path, "Cannot create section at empty path");
Configuration root = getRoot();
if (root == null) {
throw new IllegalStateException("Cannot create section without a root");
}
final char separator = root.options().pathSeparator();
// i1 is the leading (higher) index
// i2 is the trailing (lower) index
int i1 = -1;
int i2;
ConfigurationSection section = this;
while ((i1 = path.indexOf(separator, i2 = i1 + 1)) != -1) {
String node = path.substring(i2, i1);
ConfigurationSection subSection = section.getConfigurationSection(node);
if (subSection == null) {
section = section.createSection(node);
} else {
section = subSection;
}
}
String key = path.substring(i2);
if (section == this) {
ConfigurationSection result = createSection(this, key);
super.set(key, result);
return result;
}
return section.createSection(key);
} | 3.68 |
morf_DatabaseMetaDataProvider_setColumnDefaultValue | /**
* Sets column default value.
*
* Note: Uses an empty string for any column other than version.
* Database-schema level default values are not supported by ALFA's domain model
* hence we don't want to include a default value in the definition of tables.
*
* @param tableName Name of the table.
* @param column Column builder to set to.
* @param columnResultSet Result set to be read.
* @return Resulting column builder.
* @throws SQLException Upon errors.
*/
@SuppressWarnings("unused")
protected ColumnBuilder setColumnDefaultValue(RealName tableName, ColumnBuilder column, ResultSet columnResultSet) throws SQLException {
String defaultValue = "version".equalsIgnoreCase(column.getName()) ? "0" : "";
String actualDefaultValue = getActualDefaultValue(tableName, column, columnResultSet);
if (!defaultValue.equals(actualDefaultValue) && !column.isAutoNumbered()) {
log.warn("DEFAULT value for " + tableName.getDbName() + "." + column.getName() + " expected to be [" + defaultValue + "], but was [" + actualDefaultValue + "]");
}
return column.defaultValue(defaultValue);
} | 3.68 |
flink_TaskKvStateRegistry_unregisterAll | /** Unregisters all registered KvState instances from the KvStateRegistry. */
public void unregisterAll() {
for (KvStateInfo kvState : registeredKvStates) {
registry.unregisterKvState(
jobId,
jobVertexId,
kvState.keyGroupRange,
kvState.registrationName,
kvState.kvStateId);
}
} | 3.68 |
dubbo_CallbackServiceCodec_exportOrUnexportCallbackService | /**
* export or unexport callback service on client side
*
* @param channel
* @param url
* @param clazz
* @param inst
* @param export
* @throws IOException
*/
@SuppressWarnings({"unchecked", "rawtypes"})
private String exportOrUnexportCallbackService(
Channel channel, RpcInvocation inv, URL url, Class clazz, Object inst, Boolean export) throws IOException {
int instid = System.identityHashCode(inst);
Map<String, String> params = new HashMap<>(3);
// no need to new client again
params.put(IS_SERVER_KEY, Boolean.FALSE.toString());
// mark it's a callback, for troubleshooting
params.put(IS_CALLBACK_SERVICE, Boolean.TRUE.toString());
String group = (inv == null ? null : (String) inv.getObjectAttachmentWithoutConvert(GROUP_KEY));
if (group != null && group.length() > 0) {
params.put(GROUP_KEY, group);
}
// add method, for verifying against method, automatic fallback (see dubbo protocol)
params.put(METHODS_KEY, StringUtils.join(ClassUtils.getDeclaredMethodNames(clazz), ","));
Map<String, String> tmpMap = new HashMap<>();
if (url != null) {
Map<String, String> parameters = url.getParameters();
if (parameters != null && !parameters.isEmpty()) {
tmpMap.putAll(parameters);
}
}
tmpMap.putAll(params);
tmpMap.remove(VERSION_KEY); // doesn't need to distinguish version for callback
tmpMap.remove(Constants.BIND_PORT_KEY); // callback doesn't needs bind.port
tmpMap.put(INTERFACE_KEY, clazz.getName());
URL exportUrl = new ServiceConfigURL(
DubboProtocol.NAME,
channel.getLocalAddress().getAddress().getHostAddress(),
channel.getLocalAddress().getPort(),
clazz.getName() + "." + instid,
tmpMap);
// no need to generate multiple exporters for different channel in the same JVM, cache key cannot collide.
String cacheKey = getClientSideCallbackServiceCacheKey(instid);
String countKey = getClientSideCountKey(clazz.getName());
if (export) {
// one channel can have multiple callback instances, no need to re-export for different instance.
if (!channel.hasAttribute(cacheKey)) {
if (!isInstancesOverLimit(channel, url, clazz.getName(), instid, false)) {
ModuleModel moduleModel;
if (inv.getServiceModel() == null) {
// TODO should get scope model from url?
moduleModel = ApplicationModel.defaultModel().getDefaultModule();
logger.error(
PROTOCOL_FAILED_LOAD_MODEL,
"",
"",
"Unable to get Service Model from Invocation. Please check if your invocation failed! "
+ "This error only happen in UT cases! Invocation:" + inv);
} else {
moduleModel = inv.getServiceModel().getModuleModel();
}
ServiceDescriptor serviceDescriptor =
moduleModel.getServiceRepository().registerService(clazz);
ServiceMetadata serviceMetadata = new ServiceMetadata(
clazz.getName() + "." + instid, exportUrl.getGroup(), exportUrl.getVersion(), clazz);
String serviceKey =
BaseServiceMetadata.buildServiceKey(exportUrl.getPath(), group, exportUrl.getVersion());
ProviderModel providerModel = new ProviderModel(
serviceKey,
inst,
serviceDescriptor,
moduleModel,
serviceMetadata,
ClassUtils.getClassLoader(clazz));
moduleModel.getServiceRepository().registerProvider(providerModel);
exportUrl = exportUrl.setScopeModel(moduleModel);
exportUrl = exportUrl.setServiceModel(providerModel);
Invoker<?> invoker = proxyFactory.getInvoker(inst, clazz, exportUrl);
// should destroy resource?
Exporter<?> exporter = protocolSPI.export(invoker);
// this is used for tracing if instid has published service or not.
channel.setAttribute(cacheKey, exporter);
logger.info("Export a callback service :" + exportUrl + ", on " + channel + ", url is: " + url);
increaseInstanceCount(channel, countKey);
}
}
} else {
if (channel.hasAttribute(cacheKey)) {
Exporter<?> exporter = (Exporter<?>) channel.getAttribute(cacheKey);
exporter.unexport();
channel.removeAttribute(cacheKey);
decreaseInstanceCount(channel, countKey);
}
}
return String.valueOf(instid);
} | 3.68 |
framework_AbstractSelect_writeItem | /**
* Writes a data source Item to a design. Hierarchical select components
* should override this method to recursively write any child items as well.
*
* @since 7.5.0
* @param design
* the element into which to insert the item
* @param itemId
* the id of the item to write
* @param context
* the DesignContext instance used in writing
* @return
*/
protected Element writeItem(Element design, Object itemId,
DesignContext context) {
Element element = design.appendElement("option");
String caption = getItemCaption(itemId);
if (caption != null && !caption.equals(itemId.toString())) {
element.html(DesignFormatter.encodeForTextNode(caption));
element.attr("item-id", itemId.toString());
} else {
element.html(DesignFormatter.encodeForTextNode(itemId.toString()));
}
Resource icon = getItemIcon(itemId);
if (icon != null) {
DesignAttributeHandler.writeAttribute("icon", element.attributes(),
icon, null, Resource.class, context);
}
if (isSelected(itemId)) {
element.attr("selected", "");
}
return element;
} | 3.68 |
pulsar_AuthenticationProvider_authenticateHttpRequest | /**
* Set response, according to passed in request.
* and return whether we should do following chain.doFilter or not.
*
* <p>Implementations of this method MUST modify the request by adding the {@link AuthenticatedRoleAttributeName}
* and the {@link AuthenticatedDataAttributeName} attributes.</p>
*
* @return Set response, according to passed in request, and return whether we should do following chain.doFilter.
* @throws Exception when authentication failed
* @deprecated use and implement {@link AuthenticationProvider#authenticateHttpRequestAsync} instead.
*/
@Deprecated
default boolean authenticateHttpRequest(HttpServletRequest request, HttpServletResponse response) throws Exception {
try {
AuthenticationState authenticationState = newHttpAuthState(request);
String role = authenticateAsync(authenticationState.getAuthDataSource()).get();
request.setAttribute(AuthenticatedRoleAttributeName, role);
request.setAttribute(AuthenticatedDataAttributeName, authenticationState.getAuthDataSource());
return true;
} catch (AuthenticationException e) {
throw e;
} catch (Exception e) {
if (e instanceof ExecutionException && e.getCause() instanceof AuthenticationException) {
throw (AuthenticationException) e.getCause();
} else {
throw new AuthenticationException("Failed to authentication http request");
}
}
} | 3.68 |
hadoop_RawErasureDecoder_allowVerboseDump | /**
* Allow to dump verbose info during encoding/decoding.
* @return true if it's allowed to do verbose dump, false otherwise.
*/
public boolean allowVerboseDump() {
return coderOptions.allowVerboseDump();
} | 3.68 |
streampipes_MqttClient_connect | /**
* Start blocking connection to MQTT broker.
*/
public void connect() {
try {
this.conn = mqtt.blockingConnection();
this.conn.connect();
} catch (Exception e) {
throw new SpRuntimeException("Could not connect to MQTT broker: "
+ uri.toString() + ", " + e.getMessage(), e);
}
} | 3.68 |
zxing_CaptureActivity_handleDecodeExternally | // Briefly show the contents of the barcode, then handle the result outside Barcode Scanner.
private void handleDecodeExternally(Result rawResult, ResultHandler resultHandler, Bitmap barcode) {
if (barcode != null) {
viewfinderView.drawResultBitmap(barcode);
}
long resultDurationMS;
if (getIntent() == null) {
resultDurationMS = DEFAULT_INTENT_RESULT_DURATION_MS;
} else {
resultDurationMS = getIntent().getLongExtra(Intents.Scan.RESULT_DISPLAY_DURATION_MS,
DEFAULT_INTENT_RESULT_DURATION_MS);
}
if (resultDurationMS > 0) {
String rawResultString = String.valueOf(rawResult);
if (rawResultString.length() > 32) {
rawResultString = rawResultString.substring(0, 32) + " ...";
}
statusView.setText(getString(resultHandler.getDisplayTitle()) + " : " + rawResultString);
}
maybeSetClipboard(resultHandler);
switch (source) {
case NATIVE_APP_INTENT:
// Hand back whatever action they requested - this can be changed to Intents.Scan.ACTION when
// the deprecated intent is retired.
Intent intent = new Intent(getIntent().getAction());
intent.addFlags(Intents.FLAG_NEW_DOC);
intent.putExtra(Intents.Scan.RESULT, rawResult.toString());
intent.putExtra(Intents.Scan.RESULT_FORMAT, rawResult.getBarcodeFormat().toString());
byte[] rawBytes = rawResult.getRawBytes();
if (rawBytes != null && rawBytes.length > 0) {
intent.putExtra(Intents.Scan.RESULT_BYTES, rawBytes);
}
Map<ResultMetadataType, ?> metadata = rawResult.getResultMetadata();
if (metadata != null) {
if (metadata.containsKey(ResultMetadataType.UPC_EAN_EXTENSION)) {
intent.putExtra(Intents.Scan.RESULT_UPC_EAN_EXTENSION,
metadata.get(ResultMetadataType.UPC_EAN_EXTENSION).toString());
}
Number orientation = (Number) metadata.get(ResultMetadataType.ORIENTATION);
if (orientation != null) {
intent.putExtra(Intents.Scan.RESULT_ORIENTATION, orientation.intValue());
}
String ecLevel = (String) metadata.get(ResultMetadataType.ERROR_CORRECTION_LEVEL);
if (ecLevel != null) {
intent.putExtra(Intents.Scan.RESULT_ERROR_CORRECTION_LEVEL, ecLevel);
}
@SuppressWarnings("unchecked")
Iterable<byte[]> byteSegments = (Iterable<byte[]>) metadata.get(ResultMetadataType.BYTE_SEGMENTS);
if (byteSegments != null) {
int i = 0;
for (byte[] byteSegment : byteSegments) {
intent.putExtra(Intents.Scan.RESULT_BYTE_SEGMENTS_PREFIX + i, byteSegment);
i++;
}
}
}
sendReplyMessage(R.id.return_scan_result, intent, resultDurationMS);
break;
case PRODUCT_SEARCH_LINK:
// Reformulate the URL which triggered us into a query, so that the request goes to the same
// TLD as the scan URL.
int end = sourceUrl.lastIndexOf("/scan");
String productReplyURL = sourceUrl.substring(0, end) + "?q=" +
resultHandler.getDisplayContents() + "&source=zxing";
sendReplyMessage(R.id.launch_product_query, productReplyURL, resultDurationMS);
break;
case ZXING_LINK:
if (scanFromWebPageManager != null && scanFromWebPageManager.isScanFromWebPage()) {
String linkReplyURL = scanFromWebPageManager.buildReplyURL(rawResult, resultHandler);
scanFromWebPageManager = null;
sendReplyMessage(R.id.launch_product_query, linkReplyURL, resultDurationMS);
}
break;
}
} | 3.68 |
framework_VaadinSession_getNextConnectorId | /**
* Gets the next unused numerical id for connector ids.
*
* @since 8.1
*
* @return the next unused numerical id for connector ids, not
* <code>null</code>
*
*/
public String getNextConnectorId() {
assert hasLock();
return String.valueOf(connectorIdSequence++);
} | 3.68 |
hmily_OriginTrackedYamlLoader_load | /**
* Load list.
*
* @return the list
*/
public List<Map<String, Object>> load() {
final List<Map<String, Object>> result = new ArrayList<>();
process((properties, map) -> result.add(getFlattenedMap(map)));
return result;
} | 3.68 |
hadoop_DocumentStoreFactory_createDocumentStoreWriter | /**
* Creates a DocumentStoreWriter for a {@link DocumentStoreVendor}.
* @param conf
* for creating client connection
* @param <Document> type of Document for which the writer has to be created,
* i.e TimelineEntityDocument, FlowActivityDocument etc
* @return document store writer
* @throws DocumentStoreNotSupportedException if there is no implementation
* for a configured {@link DocumentStoreVendor} or unknown
* {@link DocumentStoreVendor} is configured.
* @throws YarnException if the required configs for DocumentStore is missing.
*/
public static <Document extends TimelineDocument>
DocumentStoreWriter <Document> createDocumentStoreWriter(
Configuration conf) throws YarnException {
final DocumentStoreVendor storeType = getStoreVendor(conf);
switch (storeType) {
case COSMOS_DB:
DocumentStoreUtils.validateCosmosDBConf(conf);
return new CosmosDBDocumentStoreWriter<>(conf);
default:
throw new DocumentStoreNotSupportedException(
"Unable to create DocumentStoreWriter for type : "
+ storeType);
}
} | 3.68 |
hadoop_OBSDataBlocks_verifyOpen | /**
* Verify that the stream is open.
*
* @throws IOException if the stream is closed
*/
private void verifyOpen() throws IOException {
if (byteBuffer == null) {
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
}
} | 3.68 |
hbase_BalanceRequest_newBuilder | /**
* Create a builder to construct a custom {@link BalanceRequest}.
*/
public static Builder newBuilder() {
return new Builder();
} | 3.68 |
framework_HierarchySection_stopFind | /**
* Stop any current find operation, regardless of the handler
*/
private void stopFind() {
if (!isFindMode()) {
return;
}
highlightModeRegistration.removeHandler();
highlightModeRegistration = null;
find.removeStyleDependentName(VDebugWindow.STYLENAME_ACTIVE);
generateDesign.removeStyleDependentName(VDebugWindow.STYLENAME_ACTIVE);
activeFindHandler = null;
} | 3.68 |
hadoop_S3ClientFactory_getTransferManagerExecutor | /**
* Get the executor that the transfer manager will use to execute background tasks.
* @return part size
*/
public Executor getTransferManagerExecutor() {
return transferManagerExecutor;
} | 3.68 |
hbase_AsyncRegionLocatorHelper_removeRegionLocation | /**
* Create a new {@link RegionLocations} based on the given {@code oldLocs}, and remove the
* location for the given {@code replicaId}.
* <p/>
* All the {@link RegionLocations} in async locator related class are immutable because we want to
* access them concurrently, so here we need to create a new one, instead of calling
* {@link RegionLocations#remove(int)}.
*/
static RegionLocations removeRegionLocation(RegionLocations oldLocs, int replicaId) {
HRegionLocation[] locs = oldLocs.getRegionLocations();
if (locs.length < replicaId + 1) {
// Here we do not modify the oldLocs so it is safe to return it.
return oldLocs;
}
locs = Arrays.copyOf(locs, locs.length);
locs[replicaId] = null;
if (ObjectUtils.firstNonNull(locs) != null) {
return new RegionLocations(locs);
} else {
// if all the locations are null, just return null
return null;
}
} | 3.68 |
framework_VFilterSelect_isrelativeUnits | /**
* @since 7.7
* @param suggestionPopupWidth
* @return
*/
private boolean isrelativeUnits(String suggestionPopupWidth) {
return suggestionPopupWidth.trim().endsWith("%");
} | 3.68 |
morf_UpgradePath_writeSql | /**
*
* @see org.alfasoftware.morf.upgrade.SqlStatementWriter#writeSql(Collection)
*/
@Override
public void writeSql(Collection<String> statements) {
this.sql.addAll(statements);
} | 3.68 |
hmily_AbstractHmilySQLComputeEngine_buildTuple | /**
* Build tuple.
*
* @param tableName table name
* @param manipulation manipulation
* @param primaryKeyValues primary key values
* @param before before
* @param after after
* @return hmily SQL tuple
*/
protected HmilySQLTuple buildTuple(final String tableName, final HmilySQLManipulation manipulation,
final List<Object> primaryKeyValues, final Map<String, Object> before, final Map<String, Object> after) {
HmilySQLTuple result = new HmilySQLTuple();
result.setTableName(tableName);
result.setManipulationType(manipulation);
result.setPrimaryKeyValues(primaryKeyValues);
result.setBeforeImage(before);
result.setAfterImage(after);
return result;
} | 3.68 |
hudi_HoodieDataSourceHelpers_listCommitsSince | /**
* Get a list of instant times that have occurred, from the given instant timestamp.
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.STABLE)
public static List<String> listCommitsSince(FileSystem fs, String basePath, String instantTimestamp) {
HoodieTimeline timeline = allCompletedCommitsCompactions(fs, basePath);
return timeline.findInstantsAfter(instantTimestamp, Integer.MAX_VALUE).getInstantsAsStream()
.map(HoodieInstant::getTimestamp).collect(Collectors.toList());
} | 3.68 |
hbase_HRegion_buildWALEdits | /**
* Builds separate WALEdit per nonce by applying input mutations. If WALEdits from CP are
* present, they are merged to result WALEdit.
*/
public List<Pair<NonceKey, WALEdit>>
buildWALEdits(final MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
List<Pair<NonceKey, WALEdit>> walEdits = new ArrayList<>();
visitBatchOperations(true, nextIndexToProcess + miniBatchOp.size(), new Visitor() {
private Pair<NonceKey, WALEdit> curWALEditForNonce;
@Override
public boolean visit(int index) throws IOException {
Mutation m = getMutation(index);
// we use durability of the original mutation for the mutation passed by CP.
if (region.getEffectiveDurability(m.getDurability()) == Durability.SKIP_WAL) {
region.recordMutationWithoutWal(m.getFamilyCellMap());
/**
* Here is for HBASE-26993,in order to make the new framework for region replication
* could work for SKIP_WAL, we save the {@link Mutation} which
* {@link Mutation#getDurability} is {@link Durability#SKIP_WAL} in miniBatchOp.
*/
cacheSkipWALMutationForRegionReplication(miniBatchOp, walEdits, familyCellMaps[index]);
return true;
}
// the batch may contain multiple nonce keys (replay case). If so, write WALEdit for each.
// Given how nonce keys are originally written, these should be contiguous.
// They don't have to be, it will still work, just write more WALEdits than needed.
long nonceGroup = getNonceGroup(index);
long nonce = getNonce(index);
if (
curWALEditForNonce == null
|| curWALEditForNonce.getFirst().getNonceGroup() != nonceGroup
|| curWALEditForNonce.getFirst().getNonce() != nonce
) {
curWALEditForNonce =
new Pair<>(new NonceKey(nonceGroup, nonce), createWALEdit(miniBatchOp));
walEdits.add(curWALEditForNonce);
}
WALEdit walEdit = curWALEditForNonce.getSecond();
// Add WAL edits from CPs.
WALEdit fromCP = walEditsFromCoprocessors[index];
List<Cell> cellsFromCP = fromCP == null ? Collections.emptyList() : fromCP.getCells();
addNonSkipWALMutationsToWALEdit(miniBatchOp, walEdit, cellsFromCP, familyCellMaps[index]);
return true;
}
});
return walEdits;
} | 3.68 |
hbase_ClientUtils_printRow | /**
* copy values into a TreeMap to get them in sorted order and print it
* @param rowResult Holds row name and then a map of columns to cells
*/
public static void printRow(final TRowResult rowResult) {
TreeMap<String, TCell> sorted = new TreeMap<>();
for (Map.Entry<ByteBuffer, TCell> column : rowResult.columns.entrySet()) {
sorted.put(utf8(column.getKey().array()), column.getValue());
}
StringBuilder rowStr = new StringBuilder();
for (Map.Entry<String, TCell> entry : sorted.entrySet()) {
rowStr.append(entry.getKey());
rowStr.append(" => ");
rowStr.append(utf8(entry.getValue().value.array()));
rowStr.append("; ");
}
System.out.println("row: " + utf8(rowResult.row.array()) + ", cols: " + rowStr);
} | 3.68 |
flink_VertexInputInfoComputationUtils_computeConsumedSubpartitionRange | /**
* Compute the consumed subpartition range for a subtask. This computation algorithm will evenly
* distribute subpartitions to downstream subtasks according to the number of subpartitions.
* Different downstream subtasks consume roughly the same number of subpartitions.
*
* @param consumerSubtaskIndex the subtask index
* @param numConsumers the total number of consumers
* @param numOfSubpartitionsSupplier a supplier to get the number of subpartitions
* @param isDynamicGraph whether is dynamic graph
* @param isBroadcast whether the edge is broadcast
* @return the computed subpartition range
*/
@VisibleForTesting
static IndexRange computeConsumedSubpartitionRange(
int consumerSubtaskIndex,
int numConsumers,
Supplier<Integer> numOfSubpartitionsSupplier,
boolean isDynamicGraph,
boolean isBroadcast) {
int consumerIndex = consumerSubtaskIndex % numConsumers;
if (!isDynamicGraph) {
return new IndexRange(consumerIndex, consumerIndex);
} else {
int numSubpartitions = numOfSubpartitionsSupplier.get();
if (isBroadcast) {
// broadcast results have only one subpartition, and be consumed multiple times.
checkArgument(numSubpartitions == 1);
return new IndexRange(0, 0);
} else {
checkArgument(consumerIndex < numConsumers);
checkArgument(numConsumers <= numSubpartitions);
int start = consumerIndex * numSubpartitions / numConsumers;
int nextStart = (consumerIndex + 1) * numSubpartitions / numConsumers;
return new IndexRange(start, nextStart - 1);
}
}
} | 3.68 |
hbase_BucketCache_getIOEngineFromName | /**
* Get the IOEngine from the IO engine name
* @return the IOEngine
*/
private IOEngine getIOEngineFromName(String ioEngineName, long capacity, String persistencePath)
throws IOException {
if (ioEngineName.startsWith("file:") || ioEngineName.startsWith("files:")) {
// In order to make the usage simple, we only need the prefix 'files:' in
// document whether one or multiple file(s), but also support 'file:' for
// the compatibility
String[] filePaths =
ioEngineName.substring(ioEngineName.indexOf(":") + 1).split(FileIOEngine.FILE_DELIMITER);
return new FileIOEngine(capacity, persistencePath != null, filePaths);
} else if (ioEngineName.startsWith("offheap")) {
return new ByteBufferIOEngine(capacity);
} else if (ioEngineName.startsWith("mmap:")) {
return new ExclusiveMemoryMmapIOEngine(ioEngineName.substring(5), capacity);
} else if (ioEngineName.startsWith("pmem:")) {
// This mode of bucket cache creates an IOEngine over a file on the persistent memory
// device. Since the persistent memory device has its own address space the contents
// mapped to this address space does not get swapped out like in the case of mmapping
// on to DRAM. Hence the cells created out of the hfile blocks in the pmem bucket cache
// can be directly referred to without having to copy them onheap. Once the RPC is done,
// the blocks can be returned back as in case of ByteBufferIOEngine.
return new SharedMemoryMmapIOEngine(ioEngineName.substring(5), capacity);
} else {
throw new IllegalArgumentException(
"Don't understand io engine name for cache- prefix with file:, files:, mmap: or offheap");
}
} | 3.68 |
flink_CheckpointProperties_isSynchronous | /**
* Returns whether the checkpoint properties describe a synchronous savepoint/checkpoint.
*
* @return <code>true</code> if the properties describe a synchronous operation, <code>false
* </code> otherwise.
*/
public boolean isSynchronous() {
return isSavepoint() && ((SavepointType) checkpointType).isSynchronous();
} | 3.68 |
flink_FlinkImageBuilder_useCustomStartupCommand | /** Use a custom command for starting up the container. */
public FlinkImageBuilder useCustomStartupCommand(String command) {
checkStartupCommandNotSet();
this.startupCommand = command;
this.imageNameSuffix = "custom";
return this;
} | 3.68 |
dubbo_Version_compare | /**
* Compare versions
*
* @return the value {@code 0} if {@code version1 == version2};
* a value less than {@code 0} if {@code version1 < version2}; and
* a value greater than {@code 0} if {@code version1 > version2}
*/
public static int compare(String version1, String version2) {
return Integer.compare(getIntVersion(version1), getIntVersion(version2));
} | 3.68 |
flink_EmbeddedLeaderService_confirmLeader | /** Callback from leader contenders when they confirm a leader grant. */
private void confirmLeader(
final EmbeddedLeaderElection embeddedLeaderElection,
final UUID leaderSessionId,
final String leaderAddress) {
synchronized (lock) {
// if the leader election was shut down in the meantime, ignore this confirmation
if (!embeddedLeaderElection.running || shutdown) {
return;
}
try {
// check if the confirmation is for the same grant, or whether it is a stale grant
if (embeddedLeaderElection == currentLeaderProposed
&& currentLeaderSessionId.equals(leaderSessionId)) {
LOG.info(
"Received confirmation of leadership for leader {} , session={}",
leaderAddress,
leaderSessionId);
// mark leadership
currentLeaderConfirmed = embeddedLeaderElection;
currentLeaderAddress = leaderAddress;
currentLeaderProposed = null;
// notify all listeners
notifyAllListeners(leaderAddress, leaderSessionId);
} else {
LOG.debug(
"Received confirmation of leadership for a stale leadership grant. Ignoring.");
}
} catch (Throwable t) {
fatalError(t);
}
}
} | 3.68 |
pulsar_ManagedLedgerInterceptor_processPayloadBeforeEntryCache | /**
* Intercept after entry is read from ledger, before it gets cached.
* @param dataReadFromLedger data from ledger
* @return handle to the processor
*/
default PayloadProcessorHandle processPayloadBeforeEntryCache(ByteBuf dataReadFromLedger){
return null;
} | 3.68 |
flink_OneInputStateTransformation_transform | /**
* Method for passing user defined operators along with the type information that will transform
* the OperatorTransformation.
*
* <p><b>IMPORTANT:</b> Any output from this operator will be discarded.
*
* @param factory A factory returning transformation logic type of the return stream
* @return An {@link StateBootstrapTransformation} that can be added to a {@link Savepoint}.
*/
public StateBootstrapTransformation<T> transform(SavepointWriterOperatorFactory factory) {
return new StateBootstrapTransformation<>(stream, operatorMaxParallelism, factory);
} | 3.68 |
hudi_ArrayUtils_toPrimitive | // Long array converters
// ----------------------------------------------------------------------
/**
* <p>Converts an array of object Longs to primitives.</p>
*
* <p>This method returns {@code null} for a {@code null} input array.</p>
*
* @param array a {@code Long} array, may be {@code null}
* @return a {@code long} array, {@code null} if null array input
* @throws NullPointerException if array content is {@code null}
*/
public static long[] toPrimitive(Long[] array) {
if (array == null) {
return null;
} else if (array.length == 0) {
return EMPTY_LONG_ARRAY;
}
final long[] result = new long[array.length];
for (int i = 0; i < array.length; i++) {
result[i] = array[i].longValue();
}
return result;
} | 3.68 |
flink_FlinkResultSetMetaData_getType | /**
* Get column type name according type in {@link Types}.
*
* @param type the type in {@link Types}
* @return type class name
*/
static String getType(int type) throws SQLException {
// see javax.sql.rowset.RowSetMetaDataImpl
switch (type) {
case Types.NUMERIC:
case Types.DECIMAL:
return BigDecimal.class.getName();
case Types.BOOLEAN:
case Types.BIT:
return Boolean.class.getName();
case Types.TINYINT:
return Byte.class.getName();
case Types.SMALLINT:
return Short.class.getName();
case Types.INTEGER:
return Integer.class.getName();
case Types.BIGINT:
return Long.class.getName();
case Types.REAL:
case Types.FLOAT:
return Float.class.getName();
case Types.DOUBLE:
return Double.class.getName();
case Types.VARCHAR:
case Types.CHAR:
return String.class.getName();
case Types.BINARY:
case Types.VARBINARY:
case Types.LONGVARBINARY:
return "byte[]";
case Types.DATE:
return Date.class.getName();
case Types.TIME:
return Time.class.getName();
case Types.TIMESTAMP:
return Timestamp.class.getName();
case Types.TIMESTAMP_WITH_TIMEZONE:
return OffsetDateTime.class.getName();
case Types.JAVA_OBJECT:
return Map.class.getName();
case Types.ARRAY:
return Array.class.getName();
case Types.STRUCT:
return RowData.class.getName();
}
throw new SQLFeatureNotSupportedException(
String.format("Not support data type [%s]", type));
} | 3.68 |
hbase_HFileInfo_append | /**
* Append the given key/value pair to the file info, optionally checking the key prefix.
* @param k key to add
* @param v value to add
* @param checkPrefix whether to check that the provided key does not start with the reserved
* prefix
* @return this file info object
* @throws IOException if the key or value is invalid
* @throws NullPointerException if {@code key} or {@code value} is {@code null}
*/
public HFileInfo append(final byte[] k, final byte[] v, final boolean checkPrefix)
throws IOException {
Objects.requireNonNull(k, "key cannot be null");
Objects.requireNonNull(v, "value cannot be null");
if (checkPrefix && isReservedFileInfoKey(k)) {
throw new IOException("Keys with a " + HFileInfo.RESERVED_PREFIX + " are reserved");
}
put(k, v);
return this;
} | 3.68 |
hadoop_S3ClientFactory_withExecutionInterceptors | /**
* List of execution interceptors.
* @param interceptors interceptors list.
* @return this object
*/
public S3ClientCreationParameters withExecutionInterceptors(
@Nullable final List<ExecutionInterceptor> interceptors) {
executionInterceptors = interceptors;
return this;
} | 3.68 |
framework_VUI_onLoad | /**
* Start to periodically monitor for parent element resizes if embedded
* application (e.g. portlet).
*/
@Override
protected void onLoad() {
super.onLoad();
if (isMonitoringParentSize()) {
resizeTimer = new Timer() {
@Override
public void run() {
// trigger check to see if parent size has changed,
// recalculate layouts
performSizeCheck();
resizeTimer.schedule(MONITOR_PARENT_TIMER_INTERVAL);
}
};
resizeTimer.schedule(MONITOR_PARENT_TIMER_INTERVAL);
}
} | 3.68 |
querydsl_MetaDataExporter_setSchemaPattern | /**
* Set the schema pattern filter to be used
*
* @param schemaPattern a schema name pattern; must match the schema name
* as it is stored in the database; "" retrieves those without a schema;
* {@code null} means that the schema name should not be used to narrow
* the search (default: null)
*/
public void setSchemaPattern(@Nullable String schemaPattern) {
this.schemaPattern = schemaPattern;
} | 3.68 |
framework_DataCommunicator_onDropRows | /**
* Triggered when rows have been dropped from the client side cache.
*
* @param keys
* the keys of the rows that have been dropped
* @since 8.0.6
*/
protected void onDropRows(JsonArray keys) {
for (int i = 0; i < keys.length(); ++i) {
handler.dropActiveData(keys.getString(i));
}
} | 3.68 |
morf_HumanReadableStatementHelper_isComplexField | /**
* Tests if a field instance is sufficiently complicated to warrant putting brackets around it
* when it is used as a criterion operand.
*
* @param field the field to test.
* @return {@code true} if it should be placed in parenthesis.
*/
private static boolean isComplexField(final AliasedField field) {
if (field instanceof Cast) {
return isComplexField(((Cast)field).getExpression());
} else if (field instanceof ConcatenatedField
|| field instanceof FieldLiteral
|| field instanceof FieldReference) {
return false;
} else if (field instanceof Function) {
final FunctionTypeMetaData metaData = functionTypeMetaData.get(((Function)field).getType());
return metaData != null && metaData.paren;
} else {
return true;
}
} | 3.68 |
hbase_VisibilityController_getRegionObserver | /**************************** Observer/Service Getters ************************************/
@Override
public Optional<RegionObserver> getRegionObserver() {
return Optional.of(this);
} | 3.68 |
flink_ArrowUtils_createArrowReader | /** Creates an {@link ArrowReader} for the specified {@link VectorSchemaRoot}. */
public static ArrowReader createArrowReader(VectorSchemaRoot root, RowType rowType) {
List<ColumnVector> columnVectors = new ArrayList<>();
List<FieldVector> fieldVectors = root.getFieldVectors();
for (int i = 0; i < fieldVectors.size(); i++) {
columnVectors.add(createColumnVector(fieldVectors.get(i), rowType.getTypeAt(i)));
}
return new ArrowReader(columnVectors.toArray(new ColumnVector[0]));
} | 3.68 |
flink_TwoInputOperator_getInput2Type | /**
* Gets the type information of the data type of the second input data set. This method returns
* equivalent information as {@code getInput2().getType()}.
*
* @return The second input data type.
*/
public TypeInformation<IN2> getInput2Type() {
return this.input2.getType();
} | 3.68 |
flink_BlobServer_getFileInternalWithReadLock | /**
* Retrieves the local path of a file associated with a job and a blob key.
*
* <p>The blob server looks the blob key up in its local storage. If the file exists, it is
* returned. If the file does not exist, it is retrieved from the HA blob store (if available)
* or a {@link FileNotFoundException} is thrown.
*
* @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param blobKey blob key associated with the requested file
* @return file referring to the local storage location of the BLOB
* @throws IOException Thrown if the file retrieval failed.
*/
private File getFileInternalWithReadLock(@Nullable JobID jobId, BlobKey blobKey)
throws IOException {
checkArgument(blobKey != null, "BLOB key cannot be null.");
readWriteLock.readLock().lock();
try {
return getFileInternal(jobId, blobKey);
} finally {
readWriteLock.readLock().unlock();
}
} | 3.68 |
flink_CompletedCheckpointStatsSummary_createSnapshot | /**
* Creates a snapshot of the current state.
*
* @return A snapshot of the current state.
*/
CompletedCheckpointStatsSummarySnapshot createSnapshot() {
return new CompletedCheckpointStatsSummarySnapshot(
duration.createSnapshot(),
processedData.createSnapshot(),
persistedData.createSnapshot(),
stateSize.createSnapshot(),
checkpointedSize.createSnapshot());
} | 3.68 |
querydsl_Expressions_collectionPath | /**
* Create a new Path expression
*
* @param type element type
* @param queryType element expression type
* @param metadata path metadata
* @param <E> element type
* @param <Q> element expression type
* @return path expression
*/
public static <E, Q extends SimpleExpression<? super E>> CollectionPath<E, Q> collectionPath(Class<E> type, Class<Q> queryType, PathMetadata metadata) {
return new CollectionPath<E, Q>(type, queryType, metadata);
} | 3.68 |
hbase_FileMmapIOEngine_sync | /**
* Sync the data to file after writing
*/
@Override
public void sync() throws IOException {
if (fileChannel != null) {
fileChannel.force(true);
}
} | 3.68 |
open-banking-gateway_PsuLoginService_loginInPsuScopeAndAssociateAuthSession | /**
* Used for the cases when PSU should be identified i.e. for consent sharing, so that PSU can manage associated entities.
*/
public CompletableFuture<Outcome> loginInPsuScopeAndAssociateAuthSession(String psuLogin, String psuPassword, UUID authorizationId, String authorizationPassword) {
var exchange = oper.execute(callback -> {
AuthSession session = authRepository.findById(authorizationId)
.orElseThrow(() -> new IllegalStateException("Missing authorization session: " + authorizationId));
session.setPsu(psus.findByLogin(psuLogin).orElseThrow(() -> new IllegalStateException("No PSU found: " + psuLogin)));
associationService.sharePsuAspspSecretKeyWithFintech(psuPassword, session);
FintechConsentSpecSecureStorage.FinTechUserInboxData inbox = associationService.readInboxFromFinTech(session, authorizationPassword);
session.setStatus(SessionStatus.STARTED);
authRepository.save(session);
return new SessionAndInbox(session.getRedirectCode(), inbox);
});
return executeOnLoginAndMap(exchange.getInbox(), authorizationId, exchange.getRedirectCode());
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithGreaterThanOrEqualToClause | /**
* Tests a select with a greater than or equals to clause.
*/
@Test
public void testSelectWithGreaterThanOrEqualToClause() {
SelectStatement stmt = new SelectStatement()
.from(new TableReference(TEST_TABLE))
.where(greaterThanOrEqualTo(new FieldReference(INT_FIELD), 20090101));
String expectedSql = "SELECT * FROM " + tableName(TEST_TABLE) + " WHERE (intField >= 20090101)";
assertEquals("Select with greater than or equal to clause", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
zxing_IntentResult_getOrientation | /**
* @return rotation of the image, in degrees, which resulted in a successful scan. May be null.
*/
public Integer getOrientation() {
return orientation;
} | 3.68 |
framework_TreeGridElement_expandWithClick | /**
* Expands the row at the given index in the grid with the given
* hierarchical column index.
*
* @param rowIndex
* 0-based row index to expand
* @param hierarchyColumnIndex
* 0-based index of the hierarchy column
*/
public void expandWithClick(int rowIndex, int hierarchyColumnIndex) {
if (isRowExpanded(rowIndex, hierarchyColumnIndex)) {
throw new IllegalStateException(
"The element at row " + rowIndex + " was expanded already");
}
getExpandElement(rowIndex, hierarchyColumnIndex).click();
} | 3.68 |
flink_ResourceCounter_containsResource | /**
* Checks whether resourceProfile is contained in this counter.
*
* @param resourceProfile resourceProfile to check whether it is contained
* @return {@code true} if the counter has a positive count for the given resourceProfile;
* otherwise {@code false}
*/
public boolean containsResource(ResourceProfile resourceProfile) {
return resources.containsKey(resourceProfile);
} | 3.68 |
hbase_CatalogFamilyFormat_getRegionLocations | /**
* Returns an HRegionLocationList extracted from the result.
* @return an HRegionLocationList containing all locations for the region range or null if we
* can't deserialize the result.
*/
@Nullable
public static RegionLocations getRegionLocations(final Result r) {
if (r == null) {
return null;
}
RegionInfo regionInfo = getRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
if (regionInfo == null) {
return null;
}
List<HRegionLocation> locations = new ArrayList<>(1);
NavigableMap<byte[], NavigableMap<byte[], byte[]>> familyMap = r.getNoVersionMap();
locations.add(getRegionLocation(r, regionInfo, 0));
NavigableMap<byte[], byte[]> infoMap = familyMap.get(HConstants.CATALOG_FAMILY);
if (infoMap == null) {
return new RegionLocations(locations);
}
// iterate until all serverName columns are seen
int replicaId = 0;
byte[] serverColumn = getServerColumn(replicaId);
SortedMap<byte[], byte[]> serverMap;
serverMap = infoMap.tailMap(serverColumn, false);
if (serverMap.isEmpty()) {
return new RegionLocations(locations);
}
for (Map.Entry<byte[], byte[]> entry : serverMap.entrySet()) {
replicaId = parseReplicaIdFromServerColumn(entry.getKey());
if (replicaId < 0) {
break;
}
HRegionLocation location = getRegionLocation(r, regionInfo, replicaId);
// In case the region replica is newly created, it's location might be null. We usually do not
// have HRL's in RegionLocations object with null ServerName. They are handled as null HRLs.
if (location.getServerName() == null) {
locations.add(null);
} else {
locations.add(location);
}
}
return new RegionLocations(locations);
} | 3.68 |
hadoop_YarnConfigurationStore_getUpdates | /**
* Get key-value configuration updates.
* @return map of configuration updates
*/
public Map<String, String> getUpdates() {
return updates;
} | 3.68 |
framework_Profiler_isEnabled | /**
* Checks whether the profiling gathering is enabled.
*
* @return <code>true</code> if the profiling is enabled, else
* <code>false</code>
*/
public static boolean isEnabled() {
// This will be fully inlined by the compiler
Profiler create = GWT.create(Profiler.class);
return create.isImplEnabled();
} | 3.68 |
flink_TableFunction_finish | /**
* This method is called at the end of data processing. After this method is called, no more
* records can be produced for the downstream operators.
*
* <p><b>NOTE:</b>This method does not need to close any resources. You should release external
* resources in the {@link #close()} method. More details can see {@link
* StreamOperator#finish()}.
*
* <p><b>Important:</b>Emit record in the {@link #close()} method is impossible since
* flink-1.14, if you need to emit records at the end of data processing, do so in the {@link
* #finish()} method.
*/
public void finish() throws Exception {
// do nothing
} | 3.68 |
hbase_FileArchiverNotifierImpl_persistSnapshotSizes | /**
* Writes the snapshot sizes to the provided {@code table}.
*/
void persistSnapshotSizes(Table table, List<SnapshotWithSize> snapshotSizes) throws IOException {
// Convert each entry in the map to a Put and write them to the quota table
table.put(snapshotSizes.stream()
.map(sws -> QuotaTableUtil.createPutForSnapshotSize(tn, sws.getName(), sws.getSize()))
.collect(Collectors.toList()));
} | 3.68 |
flink_Rowtime_timestampsFromSource | /**
* Sets a built-in timestamp extractor that converts the assigned timestamps from a DataStream
* API record into the rowtime attribute and thus preserves the assigned timestamps from the
* source.
*
* <p>Note: This extractor only works in streaming environments.
*/
public Rowtime timestampsFromSource() {
internalProperties.putString(
ROWTIME_TIMESTAMPS_TYPE, ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_SOURCE);
return this;
} | 3.68 |
framework_VaadinPortletSession_addPortletListener | /**
* Adds a listener for various types of portlet requests.
*
* @param listener
* to add
* @since 8.0
*/
public Registration addPortletListener(PortletListener listener) {
portletListeners.add(listener);
return () -> portletListeners.remove(listener);
} | 3.68 |
flink_StreamExecutionEnvironment_getStreamTimeCharacteristic | /**
* Gets the time characteristic.
*
* @deprecated See {@link #setStreamTimeCharacteristic(TimeCharacteristic)} for deprecation
* notice.
*/
@PublicEvolving
@Deprecated
public TimeCharacteristic getStreamTimeCharacteristic() {
return timeCharacteristic;
} | 3.68 |
hbase_ConnectionCache_updateConnectionAccessTime | /**
* Updates the access time for the current connection. Used to keep Connections alive for
* long-lived scanners.
* @return whether we successfully updated the last access time
*/
public boolean updateConnectionAccessTime() {
String userName = getEffectiveUser();
ConnectionInfo connInfo = connections.get(userName);
if (connInfo != null) {
return connInfo.updateAccessTime();
}
return false;
} | 3.68 |
flink_CollectionUtil_newHashMapWithExpectedSize | /**
* Creates a new {@link HashMap} of the expected size, i.e. a hash map that will not rehash if
* expectedSize many keys are inserted, considering the load factor.
*
* @param expectedSize the expected size of the created hash map.
* @return a new hash map instance with enough capacity for the expected size.
* @param <K> the type of keys maintained by this map.
* @param <V> the type of mapped values.
*/
public static <K, V> HashMap<K, V> newHashMapWithExpectedSize(int expectedSize) {
return new HashMap<>(
computeRequiredCapacity(expectedSize, HASH_MAP_DEFAULT_LOAD_FACTOR),
HASH_MAP_DEFAULT_LOAD_FACTOR);
} | 3.68 |
open-banking-gateway_ExpirableDataConfig_protocolCacheBuilder | /**
* @param flowableProperties contains 'expire-after-write' property - Duration for which the record will be alive
* and it will be removed when this time frame passes.
* @return Builder to build expirable maps.
*/
@Bean(PROTOCOL_CACHE_BUILDER)
CacheBuilder protocolCacheBuilder(FlowableProperties flowableProperties) {
Duration expireAfterWrite = flowableProperties.getExpirable().getExpireAfterWrite();
if (expireAfterWrite.getSeconds() < MIN_EXPIRE_SECONDS) {
throw new IllegalArgumentException("It is not recommended to have short transient data expiration time, "
+ "it must be at least equal to request timeout");
}
return newBuilder()
.expireAfterWrite(expireAfterWrite)
.maximumSize(Integer.MAX_VALUE);
} | 3.68 |
framework_DateTimeField_getAssistiveText | /**
* Get the description that explains the usage of the Widget for users of
* assistive devices.
*
* @return String with the description
*/
public String getAssistiveText() {
return getState(false).descriptionForAssistiveDevices;
} | 3.68 |
flink_DelimitedInputFormat_getCharset | /**
* Get the character set used for the row delimiter. This is also used by subclasses to
* interpret field delimiters, comment strings, and for configuring {@link FieldParser}s.
*
* @return the charset
*/
@PublicEvolving
public Charset getCharset() {
if (this.charset == null) {
this.charset = Charset.forName(charsetName);
}
return this.charset;
} | 3.68 |
flink_ExtractionUtils_validateStructuredSelfReference | /**
* Validates if a given type is not already contained in the type hierarchy of a structured
* type.
*
* <p>Otherwise this would lead to infinite data type extraction cycles.
*/
static void validateStructuredSelfReference(Type t, List<Type> typeHierarchy) {
final Class<?> clazz = toClass(t);
if (clazz != null
&& !clazz.isInterface()
&& clazz != Object.class
&& typeHierarchy.contains(t)) {
throw extractionError(
"Cyclic reference detected for class '%s'. Attributes of structured types must not "
+ "(transitively) reference the structured type itself.",
clazz.getName());
}
} | 3.68 |
hudi_HoodieMergeHandleFactory_create | /**
* Creates a merge handle for compaction path.
*/
public static <T, I, K, O> HoodieMergeHandle<T, I, K, O> create(
HoodieWriteConfig writeConfig,
String instantTime,
HoodieTable<T, I, K, O> table,
Map<String, HoodieRecord<T>> keyToNewRecords,
String partitionPath,
String fileId,
HoodieBaseFile dataFileToBeMerged,
TaskContextSupplier taskContextSupplier,
Option<BaseKeyGenerator> keyGeneratorOpt) {
LOG.info("Get updateHandle for fileId {} and partitionPath {} at commit {}", fileId, partitionPath, instantTime);
if (table.requireSortedRecords()) {
return new HoodieSortedMergeHandle<>(writeConfig, instantTime, table, keyToNewRecords, partitionPath, fileId,
dataFileToBeMerged, taskContextSupplier, keyGeneratorOpt);
} else {
return new HoodieMergeHandle<>(writeConfig, instantTime, table, keyToNewRecords, partitionPath, fileId,
dataFileToBeMerged, taskContextSupplier, keyGeneratorOpt);
}
} | 3.68 |
flink_CrossOperator_projectTuple17 | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>
ProjectCross<
I1,
I2,
Tuple17<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16>>
projectTuple17() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<
Tuple17<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16>>
tType =
new TupleTypeInfo<
Tuple17<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16>>(fTypes);
return new ProjectCross<
I1,
I2,
Tuple17<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16>>(
this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.68 |
hbase_MiniZooKeeperCluster_startup | /**
* @param baseDir the base directory to use
* @param numZooKeeperServers the number of ZooKeeper servers
* @return ClientPort server bound to, -1 if there was a binding problem and we couldn't pick
* another port.
* @throws IOException if an operation fails during the startup
* @throws InterruptedException if the startup fails
*/
public int startup(File baseDir, int numZooKeeperServers)
throws IOException, InterruptedException {
if (numZooKeeperServers <= 0) {
return -1;
}
setupTestEnv();
shutdown();
int tentativePort = -1; // the seed port
int currentClientPort;
// running all the ZK servers
for (int i = 0; i < numZooKeeperServers; i++) {
File dir = new File(baseDir, "zookeeper_" + i).getAbsoluteFile();
createDir(dir);
int tickTimeToUse;
if (this.tickTime > 0) {
tickTimeToUse = this.tickTime;
} else {
tickTimeToUse = TICK_TIME;
}
// Set up client port - if we have already had a list of valid ports, use it.
if (hasValidClientPortInList(i)) {
currentClientPort = clientPortList.get(i);
} else {
tentativePort = selectClientPort(tentativePort); // update the seed
currentClientPort = tentativePort;
}
ZooKeeperServer server = new ZooKeeperServer(dir, dir, tickTimeToUse);
// Setting {min,max}SessionTimeout defaults to be the same as in Zookeeper
server.setMinSessionTimeout(
configuration.getInt("hbase.zookeeper.property.minSessionTimeout", -1));
server.setMaxSessionTimeout(
configuration.getInt("hbase.zookeeper.property.maxSessionTimeout", -1));
NIOServerCnxnFactory standaloneServerFactory;
while (true) {
try {
standaloneServerFactory = new NIOServerCnxnFactory();
String bindAddr =
configuration.get("hbase.zookeeper.property.clientPortAddress", LOOPBACK_HOST);
standaloneServerFactory.configure(new InetSocketAddress(bindAddr, currentClientPort),
configuration.getInt(HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS,
HConstants.DEFAULT_ZOOKEEPER_MAX_CLIENT_CNXNS));
} catch (BindException e) {
LOG.debug("Failed binding ZK Server to client port: " + currentClientPort, e);
// We're told to use some port but it's occupied, fail
if (hasValidClientPortInList(i)) {
return -1;
}
// This port is already in use, try to use another.
tentativePort = selectClientPort(tentativePort);
currentClientPort = tentativePort;
continue;
}
break;
}
// Start up this ZK server. Dump its stats.
standaloneServerFactory.startup(server);
LOG.info("Started connectionTimeout={}, dir={}, {}", connectionTimeout, dir,
getServerConfigurationOnOneLine(server));
// Runs a 'stat' against the servers.
if (!waitForServerUp(currentClientPort, connectionTimeout)) {
Threads.printThreadInfo(System.out, "Why is zk standalone server not coming up?");
throw new IOException(
"Waiting for startup of standalone server; " + "server isRunning=" + server.isRunning());
}
// We have selected a port as a client port. Update clientPortList if necessary.
if (clientPortList.size() <= i) { // it is not in the list, add the port
clientPortList.add(currentClientPort);
} else if (clientPortList.get(i) <= 0) { // the list has invalid port, update with valid port
clientPortList.remove(i);
clientPortList.add(i, currentClientPort);
}
standaloneServerFactoryList.add(standaloneServerFactory);
zooKeeperServers.add(server);
}
// set the first one to be active ZK; Others are backups
activeZKServerIndex = 0;
started = true;
int clientPort = clientPortList.get(activeZKServerIndex);
LOG.info("Started MiniZooKeeperCluster and ran 'stat' on client port={}", clientPort);
return clientPort;
} | 3.68 |
hadoop_StorageStatistics_getValue | /**
* @return The value of this statistic.
*/
public long getValue() {
return value;
} | 3.68 |
flink_MetricConfig_getBoolean | /**
* Searches for the property with the specified key in this property list. If the key is not
* found in this property list, the default property list, and its defaults, recursively, are
* then checked. The method returns the default value argument if the property is not found.
*
* @param key the hashtable key.
* @param defaultValue a default value.
* @return the value in this property list with the specified key value parsed as a boolean.
*/
public boolean getBoolean(String key, boolean defaultValue) {
String argument = getProperty(key, null);
return argument == null ? defaultValue : Boolean.parseBoolean(argument);
} | 3.68 |
morf_DatabaseDataSetProducer_isTableEmpty | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#isTableEmpty(java.lang.String)
*/
@Override
public boolean isTableEmpty(String tableName) {
SelectStatement countQuery = new SelectStatement().from(new TableReference(tableName));
String sql = sqlDialect.convertStatementToSQL(countQuery);
if (connection == null) {
throw new IllegalStateException("Dataset has not been opened");
}
try (Statement statement = connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
ResultSet resultSet = statement.executeQuery(sql)) {
// the table is empty if there are no rows returned.
return !resultSet.next();
} catch (SQLException sqlException) {
throw new RuntimeSqlException("Failed to execute count of rows in table [" + tableName + "]: [" + sql + "]", sqlException);
}
} | 3.68 |
hbase_IncrementCoalescer_dynamicallySetCoreSize | /**
* This method samples the incoming requests and, if selected, will check if the corePoolSize
* should be changed.
* @param countersMapSize the size of the counters map
*/
private void dynamicallySetCoreSize(int countersMapSize) {
// Here we are using countersMapSize as a random number, meaning this
// could be a Random object
if (countersMapSize % 10 != 0) {
return;
}
double currentRatio = (double) countersMapSize / (double) maxQueueSize;
int newValue;
if (currentRatio < 0.1) {
newValue = 1;
} else if (currentRatio < 0.3) {
newValue = 2;
} else if (currentRatio < 0.5) {
newValue = 4;
} else if (currentRatio < 0.7) {
newValue = 8;
} else if (currentRatio < 0.9) {
newValue = 14;
} else {
newValue = 22;
}
if (pool.getCorePoolSize() != newValue) {
pool.setCorePoolSize(newValue);
}
} | 3.68 |
flink_RemoteInputChannel_retriggerSubpartitionRequest | /** Retriggers a remote subpartition request. */
void retriggerSubpartitionRequest() throws IOException {
checkPartitionRequestQueueInitialized();
if (increaseBackoff()) {
partitionRequestClient.requestSubpartition(
partitionId, consumedSubpartitionIndex, this, 0);
} else {
failPartitionRequest();
}
} | 3.68 |
flink_ProcessingTimeServiceUtil_getProcessingTimeDelay | /**
* Returns the remaining delay of the processing time specified by {@code processingTimestamp}.
* This delay guarantees that the timer will be fired at least 1ms after the time it's
* registered for.
*
* @param processingTimestamp the processing time in milliseconds
* @param currentTimestamp the current processing timestamp; it usually uses {@link
* ProcessingTimeService#getCurrentProcessingTime()} to get
* @return the remaining delay of the processing time
*/
public static long getProcessingTimeDelay(long processingTimestamp, long currentTimestamp) {
// Two cases of timers here:
// (1) future/now timers(processingTimestamp >= currentTimestamp): delay the firing of the
// timer by 1 ms to align the semantics with watermark. A watermark T says we
// won't see elements in the future with a timestamp smaller or equal to T. Without this
// 1ms delay, if we had fired the timer for T at the timestamp T, it would be possible
// that we would process another record for timestamp == T in the same millisecond, but
// after the timer for the timsetamp T has already been fired.
// (2) past timers(processingTimestamp < currentTimestamp): do not need to delay the firing
// because currentTimestamp is larger than processingTimestamp pluses the 1ms offset.
// TODO. The processing timers' performance can be further improved.
// see FLINK-23690 and https://github.com/apache/flink/pull/16744
if (processingTimestamp >= currentTimestamp) {
return processingTimestamp - currentTimestamp + 1;
} else {
return 0;
}
} | 3.68 |
MagicPlugin_ActionFactory_getActionResolvers | /**
* @return An unmodifiable list of action resolvers.
*/
public static List<ActionResolver> getActionResolvers() {
return Collections.unmodifiableList(resolvers);
} | 3.68 |
hbase_SnapshotDescriptionUtils_getSnapshotsDir | /**
* @param rootDir hbase root directory
* @return the directory for all completed snapshots;
*/
public static final Path getSnapshotsDir(Path rootDir) {
return new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
} | 3.68 |
flink_NettyPartitionRequestClient_sendTaskEvent | /**
* Sends a task event backwards to an intermediate result partition producer.
*
* <p>Backwards task events flow between readers and writers and therefore will only work when
* both are running at the same time, which is only guaranteed to be the case when both the
* respective producer and consumer task run pipelined.
*/
@Override
public void sendTaskEvent(
ResultPartitionID partitionId, TaskEvent event, final RemoteInputChannel inputChannel)
throws IOException {
checkNotClosed();
tcpChannel
.writeAndFlush(
new TaskEventRequest(event, partitionId, inputChannel.getInputChannelId()))
.addListener(
(ChannelFutureListener)
future -> {
if (!future.isSuccess()) {
inputChannel.onError(
new LocalTransportException(
String.format(
"Sending the task event to '%s [%s] (#%d)' failed.",
connectionId.getAddress(),
connectionId
.getResourceID()
.getStringWithMetadata(),
connectionId.getConnectionIndex()),
future.channel().localAddress(),
future.cause()));
sendToChannel(
new ConnectionErrorMessage(
future.cause() == null
? new RuntimeException(
"Cannot send task event.")
: future.cause()));
}
});
} | 3.68 |
hadoop_PoolAlignmentContext_receiveResponseState | /**
* Router updates a globally shared value using response from
* namenodes.
*/
@Override
public void receiveResponseState(RpcHeaderProtos.RpcResponseHeaderProto header) {
sharedGlobalStateId.accumulate(header.getStateId());
} | 3.68 |
hbase_MetricsConnection_incrNormalRunners | /** Increment the number of normal runner counts. */
public void incrNormalRunners() {
this.runnerStats.incrNormalRunners();
} | 3.68 |
hbase_HFileCorruptionChecker_getMissedMobFiles | /**
* @return the set of paths that were missing. Likely due to table deletion or deletion/moves from
* compaction.
*/
public Collection<Path> getMissedMobFiles() {
return new HashSet<>(missedMobFiles);
} | 3.68 |
pulsar_ManagedLedgerConfig_setMetadataWriteQuorumSize | /**
* @param metadataWriteQuorumSize
* the metadataWriteQuorumSize to set
*/
public ManagedLedgerConfig setMetadataWriteQuorumSize(int metadataWriteQuorumSize) {
this.metadataWriteQuorumSize = metadataWriteQuorumSize;
return this;
} | 3.68 |
flink_Task_triggerCheckpointBarrier | /**
* Calls the invokable to trigger a checkpoint.
*
* @param checkpointID The ID identifying the checkpoint.
* @param checkpointTimestamp The timestamp associated with the checkpoint.
* @param checkpointOptions Options for performing this checkpoint.
*/
public void triggerCheckpointBarrier(
final long checkpointID,
final long checkpointTimestamp,
final CheckpointOptions checkpointOptions) {
final TaskInvokable invokable = this.invokable;
final CheckpointMetaData checkpointMetaData =
new CheckpointMetaData(
checkpointID, checkpointTimestamp, System.currentTimeMillis());
if (executionState == ExecutionState.RUNNING) {
checkState(invokable instanceof CheckpointableTask, "invokable is not checkpointable");
try {
((CheckpointableTask) invokable)
.triggerCheckpointAsync(checkpointMetaData, checkpointOptions)
.handle(
(triggerResult, exception) -> {
if (exception != null || !triggerResult) {
declineCheckpoint(
checkpointID,
CheckpointFailureReason.TASK_FAILURE,
exception);
return false;
}
return true;
});
} catch (RejectedExecutionException ex) {
// This may happen if the mailbox is closed. It means that the task is shutting
// down, so we just ignore it.
LOG.debug(
"Triggering checkpoint {} for {} ({}) was rejected by the mailbox",
checkpointID,
taskNameWithSubtask,
executionId);
declineCheckpoint(
checkpointID, CheckpointFailureReason.CHECKPOINT_DECLINED_TASK_CLOSING);
} catch (Throwable t) {
if (getExecutionState() == ExecutionState.RUNNING) {
failExternally(
new Exception(
"Error while triggering checkpoint "
+ checkpointID
+ " for "
+ taskNameWithSubtask,
t));
} else {
LOG.debug(
"Encountered error while triggering checkpoint {} for "
+ "{} ({}) while being not in state running.",
checkpointID,
taskNameWithSubtask,
executionId,
t);
}
}
} else {
LOG.debug(
"Declining checkpoint request for non-running task {} ({}).",
taskNameWithSubtask,
executionId);
// send back a message that we did not do the checkpoint
declineCheckpoint(
checkpointID, CheckpointFailureReason.CHECKPOINT_DECLINED_TASK_NOT_READY);
}
} | 3.68 |
druid_IbatisUtils_getResource | /**
* 通过反射的方式得到resource,能够兼容2.3.0和2.3.4
*
* @return
*/
protected static String getResource(Object statement) {
try {
if (methodGetResource == null) {
methodGetResource = statement.getClass().getMethod("getResource");
}
return (String) methodGetResource.invoke(statement);
} catch (Exception ex) {
return null;
}
} | 3.68 |
flink_MapStateDescriptor_getValueSerializer | /**
* Gets the serializer for the values in the state.
*
* @return The serializer for the values in the state.
*/
public TypeSerializer<UV> getValueSerializer() {
final TypeSerializer<Map<UK, UV>> rawSerializer = getSerializer();
if (!(rawSerializer instanceof MapSerializer)) {
throw new IllegalStateException("Unexpected serializer type.");
}
return ((MapSerializer<UK, UV>) rawSerializer).getValueSerializer();
} | 3.68 |
hbase_RegionServerSnapshotManager_submitTask | /**
* Submit a task to the pool. NOTE: all must be submitted before you can safely
* {@link #waitForOutstandingTasks()}. This version does not support issuing tasks from multiple
* concurrent table snapshots requests.
*/
void submitTask(final Callable<Void> task) {
Future<Void> f = this.taskPool.submit(task);
futures.add(f);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.