name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
pulsar_AuthorizationProvider_allowTenantOperationAsync | /**
* Check if a given <tt>role</tt> is allowed to execute a given <tt>operation</tt> on the tenant.
*
* @param tenantName tenant name
* @param role role name
* @param operation tenant operation
* @param authData authenticated data of the role
* @return a completable future represents check result
*/
default CompletableFuture<Boolean> allowTenantOperationAsync(String tenantName, String role,
TenantOperation operation,
AuthenticationDataSource authData) {
return FutureUtil.failedFuture(new IllegalStateException(
String.format("allowTenantOperation(%s) on tenant %s is not supported by the Authorization"
+ " provider you are using.",
operation.toString(), tenantName)));
} | 3.68 |
framework_MultiSelectionModelConnector_updateRowSelected | /**
* Marks the given row to be selected or deselected. Returns true if the
* value actually changed.
* <p>
* Note: If selection model is in batch select state, the row will be pinned
* on select.
*
* @param row
* row handle
* @param selected
* {@code true} if row should be selected; {@code false} if not
*/
protected void updateRowSelected(RowHandle<JsonObject> row,
boolean selected) {
boolean itemWasMarkedSelected = SelectionModel
.isItemSelected(row.getRow());
if (selected && !itemWasMarkedSelected) {
row.getRow().put(DataCommunicatorConstants.SELECTED, true);
} else if (!selected && itemWasMarkedSelected) {
row.getRow().remove(DataCommunicatorConstants.SELECTED);
}
row.updateRow();
} | 3.68 |
framework_Table_setRowHeaderMode | /**
* Sets the row header mode.
* <p>
* The mode can be one of the following ones:
* <ul>
* <li>{@link #ROW_HEADER_MODE_HIDDEN}: The row captions are hidden.</li>
* <li>{@link #ROW_HEADER_MODE_ID}: Items Id-objects <code>toString()</code>
* is used as row caption.
* <li>{@link #ROW_HEADER_MODE_ITEM}: Item-objects <code>toString()</code>
* is used as row caption.
* <li>{@link #ROW_HEADER_MODE_PROPERTY}: Property set with
* {@link #setItemCaptionPropertyId(Object)} is used as row header.
* <li>{@link #ROW_HEADER_MODE_EXPLICIT_DEFAULTS_ID}: Items Id-objects
* <code>toString()</code> is used as row header. If caption is explicitly
* specified, it overrides the id-caption.
* <li>{@link #ROW_HEADER_MODE_EXPLICIT}: The row headers must be explicitly
* specified.</li>
* <li>{@link #ROW_HEADER_MODE_INDEX}: The index of the item is used as row
* caption. The index mode can only be used with the containers implementing
* <code>Container.Indexed</code> interface.</li>
* </ul>
* The default value is {@link #ROW_HEADER_MODE_HIDDEN}
* </p>
*
* @param mode
* the One of the modes listed above.
*/
public void setRowHeaderMode(RowHeaderMode mode) {
if (mode != null) {
rowHeaderMode = mode;
if (mode != RowHeaderMode.HIDDEN) {
setItemCaptionMode(mode.getItemCaptionMode());
}
// Assures the visual refresh. No need to reset the page buffer
// before
// as the content has not changed, only the alignments.
refreshRenderedCells();
}
} | 3.68 |
flink_Tuple25_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple25<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22,
T23,
T24>
copy() {
return new Tuple25<>(
this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8,
this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16,
this.f17, this.f18, this.f19, this.f20, this.f21, this.f22, this.f23, this.f24);
} | 3.68 |
framework_VCalendarPanel_setSubmitListener | /**
* The submit listener is called when the user selects a value from the
* calendar either by clicking the day or selects it by keyboard.
*
* @param submitListener
* The listener to trigger
*/
public void setSubmitListener(SubmitListener submitListener) {
this.submitListener = submitListener;
} | 3.68 |
hbase_ActivePolicyEnforcement_getLocallyCachedPolicies | /**
* Returns an unmodifiable version of the policy enforcements that were cached because they are
* not in violation of their quota.
*/
Map<TableName, SpaceViolationPolicyEnforcement> getLocallyCachedPolicies() {
return Collections.unmodifiableMap(locallyCachedPolicies);
} | 3.68 |
hbase_HMaster_createNamespace | /**
* Create a new Namespace.
* @param namespaceDescriptor descriptor for new Namespace
* @param nonceGroup Identifier for the source of the request, a client or process.
* @param nonce A unique identifier for this operation from the client or process
* identified by <code>nonceGroup</code> (the source must ensure each
* operation gets a unique id).
* @return procedure id
*/
long createNamespace(final NamespaceDescriptor namespaceDescriptor, final long nonceGroup,
final long nonce) throws IOException {
checkInitialized();
TableName.isLegalNamespaceName(Bytes.toBytes(namespaceDescriptor.getName()));
return MasterProcedureUtil
.submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
@Override
protected void run() throws IOException {
getMaster().getMasterCoprocessorHost().preCreateNamespace(namespaceDescriptor);
// We need to wait for the procedure to potentially fail due to "prepare" sanity
// checks. This will block only the beginning of the procedure. See HBASE-19953.
ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
LOG.info(getClientIdAuditPrefix() + " creating " + namespaceDescriptor);
// Execute the operation synchronously - wait for the operation to complete before
// continuing.
setProcId(getClusterSchema().createNamespace(namespaceDescriptor, getNonceKey(), latch));
latch.await();
getMaster().getMasterCoprocessorHost().postCreateNamespace(namespaceDescriptor);
}
@Override
protected String getDescription() {
return "CreateNamespaceProcedure";
}
});
} | 3.68 |
flink_SubsequenceInputTypeStrategy_finishWithVarying | /**
* Defines a common {@link InputTypeStrategy} for the next arguments. Given input strategy
* must expect a varying number of arguments. That means that the maximum number of
* arguments must not be defined.
*/
public InputTypeStrategy finishWithVarying(InputTypeStrategy inputTypeStrategy) {
final ArgumentCount strategyArgumentCount = inputTypeStrategy.getArgumentCount();
strategyArgumentCount
.getMaxCount()
.ifPresent(
c -> {
throw new IllegalArgumentException(
"The maximum number of arguments must not be defined.");
});
argumentsSplits.add(new ArgumentsSplit(currentPos, null, inputTypeStrategy));
final int minCount = currentPos + strategyArgumentCount.getMinCount().orElse(0);
return new SubsequenceInputTypeStrategy(
argumentsSplits, ConstantArgumentCount.from(minCount));
} | 3.68 |
hadoop_QueueResourceQuotas_getConfiguredMaxResource | /*
* Configured Maximum Resource
*/
public Resource getConfiguredMaxResource() {
return getConfiguredMaxResource(NL);
} | 3.68 |
flink_BinarySegmentUtils_setBoolean | /**
* set boolean from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setBoolean(MemorySegment[] segments, int offset, boolean value) {
if (inFirstSegment(segments, offset, 1)) {
segments[0].putBoolean(offset, value);
} else {
setBooleanMultiSegments(segments, offset, value);
}
} | 3.68 |
hadoop_ExitUtil_disableSystemExit | /**
* Disable the use of System.exit for testing.
*/
public static void disableSystemExit() {
systemExitDisabled = true;
} | 3.68 |
hadoop_AllocationTags_getTags | /**
* @return the allocation tags.
*/
public Set<String> getTags() {
return this.tags;
} | 3.68 |
flink_RetractableTopNFunction_stateStaledErrorHandle | /**
* Handle state staled error by configured lenient option. If option is true, warning log only,
* otherwise a {@link RuntimeException} will be thrown.
*/
private void stateStaledErrorHandle() {
// Skip the data if it's state is cleared because of state ttl.
if (lenient) {
LOG.warn(STATE_CLEARED_WARN_MSG);
} else {
throw new RuntimeException(STATE_CLEARED_WARN_MSG);
}
} | 3.68 |
hadoop_AzureNativeFileSystemStore_createAzureStorageSession | /**
* Establish a session with Azure blob storage based on the target URI. The
* method determines whether or not the URI target contains an explicit
* account or an implicit default cluster-wide account.
*
* @throws AzureException
* @throws IOException
*/
private void createAzureStorageSession()
throws AzureException, IOException {
// Make sure this object was properly initialized with references to
// the sessionUri and sessionConfiguration.
if (null == sessionUri || null == sessionConfiguration) {
throw new AzureException("Filesystem object not initialized properly."
+ "Unable to start session with Azure Storage server.");
}
// File system object initialized, attempt to establish a session
// with the Azure storage service for the target URI string.
try {
// Inspect the URI authority to determine the account and use the account
// to start an Azure blob client session using an account key for the
// the account or anonymously.
// For all URI's do the following checks in order:
// 1. Validate that <account> can be used with the current Hadoop
// cluster by checking it exists in the list of configured accounts
// for the cluster.
// 2. Look up the AccountKey in the list of configured accounts for the
// cluster.
// 3. If there is no AccountKey, assume anonymous public blob access
// when accessing the blob.
//
// If the URI does not specify a container use the default root container
// under the account name.
// Assertion: Container name on the session Uri should be non-null.
if (getContainerFromAuthority(sessionUri) == null) {
throw new AssertionError(String.format(
"Non-null container expected from session URI: %s.",
sessionUri.toString()));
}
// Get the account name.
String accountName = getAccountFromAuthority(sessionUri);
if (null == accountName) {
// Account name is not specified as part of the URI. Throw indicating
// an invalid account name.
final String errMsg = String.format(
"Cannot load WASB file system account name not"
+ " specified in URI: %s.", sessionUri.toString());
throw new AzureException(errMsg);
}
instrumentation.setAccountName(accountName);
String containerName = getContainerFromAuthority(sessionUri);
instrumentation.setContainerName(containerName);
// Check whether this is a storage emulator account.
if (isStorageEmulatorAccount(accountName)) {
// It is an emulator account, connect to it with no credentials.
connectUsingCredentials(accountName, null, containerName);
return;
}
// If the securemode flag is set, WASB uses SecureStorageInterfaceImpl instance
// to communicate with Azure storage. In SecureStorageInterfaceImpl SAS keys
// are used to communicate with Azure storage, so connectToAzureStorageInSecureMode
// instantiates the default container using a SAS Key.
if (useSecureMode) {
connectToAzureStorageInSecureMode(accountName, containerName, sessionUri);
return;
}
// Check whether we have a shared access signature for that container.
String propertyValue = sessionConfiguration.get(KEY_ACCOUNT_SAS_PREFIX
+ containerName + "." + accountName);
if (propertyValue != null) {
// SAS was found. Connect using that.
connectUsingSASCredentials(accountName, containerName, propertyValue);
return;
}
// Check whether the account is configured with an account key.
propertyValue = getAccountKeyFromConfiguration(accountName,
sessionConfiguration);
if (StringUtils.isNotEmpty(propertyValue)) {
// Account key was found.
// Create the Azure storage session using the account key and container.
connectUsingConnectionStringCredentials(
getAccountFromAuthority(sessionUri),
getContainerFromAuthority(sessionUri), propertyValue);
} else {
LOG.debug("The account access key is not configured for {}. "
+ "Now try anonymous access.", sessionUri);
connectUsingAnonymousCredentials(sessionUri);
}
} catch (Exception e) {
// Caught exception while attempting to initialize the Azure File
// System store, re-throw the exception.
throw new AzureException(e);
}
} | 3.68 |
pulsar_StickyKeyConsumerSelector_select | /**
* Select a consumer by sticky key.
*
* @param stickyKey sticky key
* @return consumer
*/
default Consumer select(byte[] stickyKey) {
return select(makeStickyKeyHash(stickyKey));
} | 3.68 |
shardingsphere-elasticjob_BlockUtils_waitingShortTime | /**
* Waiting short time.
*/
public static void waitingShortTime() {
try {
Thread.sleep(SLEEP_INTERVAL_MILLIS);
} catch (final InterruptedException ex) {
Thread.currentThread().interrupt();
}
} | 3.68 |
framework_ValueContext_getComponent | /**
* Returns an {@code Optional} for the {@code Component} related to value
* conversion.
*
* @return the optional of component
*/
public Optional<Component> getComponent() {
return Optional.ofNullable(component);
} | 3.68 |
hbase_SplitWALManager_releaseSplitWALWorker | /**
* After the worker finished the split WAL task, it will release the worker, and wake up all the
* suspend procedures in the ProcedureEvent
* @param worker worker which is about to release
* @param scheduler scheduler which is to wake up the procedure event
*/
public void releaseSplitWALWorker(ServerName worker, MasterProcedureScheduler scheduler) {
LOG.debug("Release split WAL worker={}", worker);
splitWorkerAssigner.release(worker);
splitWorkerAssigner.wake(scheduler);
} | 3.68 |
hbase_WindowMovingAverage_moveForwardMostRecentPosition | /**
* Move forward the most recent index.
* @return the most recent index
*/
protected int moveForwardMostRecentPosition() {
int index = ++mostRecent;
if (!oneRound && index == getNumberOfStatistics()) {
// Back to the head of the lastN, from now on will
// start to evict oldest value.
oneRound = true;
}
mostRecent = index % getNumberOfStatistics();
return mostRecent;
} | 3.68 |
hadoop_HttpReferrerAuditHeader_withFilter | /**
* Declare the fields to filter.
* @param fields iterable of field names.
* @return the builder
*/
public Builder withFilter(final Collection<String> fields) {
this.filter = new HashSet<>(fields);
return this;
} | 3.68 |
dubbo_Bytes_hex2bytes | /**
* from hex string.
*
* @param str hex string.
* @param off offset.
* @param len length.
* @return byte array.
*/
public static byte[] hex2bytes(final String str, final int off, int len) {
if ((len & 1) == 1) {
throw new IllegalArgumentException("hex2bytes: ( len & 1 ) == 1.");
}
if (off < 0) {
throw new IndexOutOfBoundsException("hex2bytes: offset < 0, offset is " + off);
}
if (len < 0) {
throw new IndexOutOfBoundsException("hex2bytes: length < 0, length is " + len);
}
if (off + len > str.length()) {
throw new IndexOutOfBoundsException("hex2bytes: offset + length > array length.");
}
int num = len / 2, r = off, w = 0;
byte[] b = new byte[num];
for (int i = 0; i < num; i++) {
b[w++] = (byte) (hex(str.charAt(r++)) << 4 | hex(str.charAt(r++)));
}
return b;
} | 3.68 |
framework_Payload_setValueType | /**
* Sets the value type of this payload.
*
* @param valueType
* type of the payload value
*/
public void setValueType(ValueType valueType) {
this.valueType = valueType;
} | 3.68 |
hadoop_Exec_addEnvironment | /**
* Add environment variables to a ProcessBuilder.
*
* @param pb The ProcessBuilder
* @param env A map of environment variable names to values.
*/
public static void addEnvironment(ProcessBuilder pb,
Map<String, String> env) {
if (env == null) {
return;
}
Map<String, String> processEnv = pb.environment();
for (Map.Entry<String, String> entry : env.entrySet()) {
String val = entry.getValue();
if (val == null) {
val = "";
}
processEnv.put(entry.getKey(), val);
}
} | 3.68 |
hbase_ZKSplitLogManagerCoordination_rescan | /**
* signal the workers that a task was resubmitted by creating the RESCAN node.
*/
private void rescan(long retries) {
// The RESCAN node will be deleted almost immediately by the
// SplitLogManager as soon as it is created because it is being
// created in the DONE state. This behavior prevents a buildup
// of RESCAN nodes. But there is also a chance that a SplitLogWorker
// might miss the watch-trigger that creation of RESCAN node provides.
// Since the TimeoutMonitor will keep resubmitting UNASSIGNED tasks
// therefore this behavior is safe.
SplitLogTask slt = new SplitLogTask.Done(this.details.getServerName());
this.watcher.getRecoverableZooKeeper().getZooKeeper().create(ZKSplitLog.getRescanNode(watcher),
slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL,
new CreateRescanAsyncCallback(), Long.valueOf(retries));
} | 3.68 |
pulsar_Schema_requireFetchingSchemaInfo | /**
* Check if this schema requires fetching schema info to configure the schema.
*
* @return true if the schema requires fetching schema info to configure the schema,
* otherwise false.
*/
default boolean requireFetchingSchemaInfo() {
return false;
} | 3.68 |
hbase_VersionInfo_getVersionComponents | /**
* Returns the version components as String objects Examples: "1.4.3" returns ["1", "4", "3"],
* "4.5.6-SNAPSHOT" returns ["4", "5", "6", "-1"] "4.5.6-beta" returns ["4", "5", "6", "-2"],
* "4.5.6-alpha" returns ["4", "5", "6", "-3"] "4.5.6-UNKNOW" returns ["4", "5", "6", "-4"]
* @return the components of the version string
*/
private static String[] getVersionComponents(final String version) {
assert (version != null);
List<String> list = Splitter.onPattern("[\\.-]").splitToList(version);
String[] strComps = list.toArray(new String[list.size()]);
assert (strComps.length > 0);
String[] comps = new String[strComps.length];
for (int i = 0; i < strComps.length; ++i) {
if (StringUtils.isNumeric(strComps[i])) {
comps[i] = strComps[i];
} else if (StringUtils.isEmpty(strComps[i])) {
comps[i] = String.valueOf(VERY_LARGE_NUMBER);
} else {
if ("SNAPSHOT".equals(strComps[i])) {
comps[i] = "-1";
} else if ("beta".equals(strComps[i])) {
comps[i] = "-2";
} else if ("alpha".equals(strComps[i])) {
comps[i] = "-3";
} else {
comps[i] = "-4";
}
}
}
return comps;
} | 3.68 |
flink_InstantiationUtil_resolveClassByName | /**
* Loads a class by name from the given input stream and reflectively instantiates it.
*
* <p>This method will use {@link DataInputView#readUTF()} to read the class name, and then
* attempt to load the class from the given ClassLoader.
*
* <p>The resolved class is checked to be equal to or a subtype of the given supertype class.
*
* @param in The stream to read the class name from.
* @param cl The class loader to resolve the class.
* @param supertype A class that the resolved class must extend.
* @throws IOException Thrown, if the class name could not be read, the class could not be
* found, or the class is not a subtype of the given supertype class.
*/
public static <T> Class<T> resolveClassByName(
DataInputView in, ClassLoader cl, Class<? super T> supertype) throws IOException {
final String className = in.readUTF();
final Class<?> rawClazz;
try {
rawClazz = Class.forName(className, false, cl);
} catch (ClassNotFoundException e) {
String error = "Could not find class '" + className + "' in classpath.";
if (className.contains("SerializerConfig")) {
error +=
" TypeSerializerConfigSnapshot and it's subclasses are not supported since Flink 1.17."
+ " If you are using built-in serializers, please first migrate to Flink 1.16."
+ " If you are using custom serializers, please migrate them to"
+ " TypeSerializerSnapshot using Flink 1.16.";
}
throw new IOException(error, e);
}
if (!supertype.isAssignableFrom(rawClazz)) {
throw new IOException(
"The class " + className + " is not a subclass of " + supertype.getName());
}
@SuppressWarnings("unchecked")
Class<T> clazz = (Class<T>) rawClazz;
return clazz;
} | 3.68 |
flink_HiveParserQB_setTabAlias | /**
* Maintain table alias -> (originTableName, qualifiedName).
*
* @param alias table alias
* @param originTableName table name that be actually specified, may be "table", "db.table",
* "catalog.db.table"
* @param qualifiedName table name with full path, always is "catalog.db.table"
*/
public void setTabAlias(String alias, String originTableName, String qualifiedName) {
aliasToTabsOriginName.put(alias.toLowerCase(), originTableName.toLowerCase());
aliasToTabs.put(alias.toLowerCase(), qualifiedName);
} | 3.68 |
morf_SqlDialect_usesNVARCHARforStrings | /**
* Indicates whether the dialect uses NVARCHAR or VARCHAR to store string values.
*
* @return true if NVARCHAR is used, false is VARCHAR is used.
*/
public boolean usesNVARCHARforStrings() {
return false;
} | 3.68 |
pulsar_ClientConfiguration_setStatsInterval | /**
* Set the interval between each stat info <i>(default: 60 seconds)</i> Stats will be activated with positive.
* statsIntervalSeconds It should be set to at least 1 second
*
* @param statsInterval
* the interval between each stat info
* @param unit
* time unit for {@code statsInterval}
*/
public void setStatsInterval(long statsInterval, TimeUnit unit) {
confData.setStatsIntervalSeconds(unit.toSeconds(statsInterval));
} | 3.68 |
hbase_FileMmapIOEngine_write | /**
* Transfers data from the given byte buffer to file
* @param srcBuffer the given byte buffer from which bytes are to be read
* @param offset The offset in the file where the first byte to be written
*/
@Override
public void write(ByteBuffer srcBuffer, long offset) throws IOException {
bufferArray.write(offset, ByteBuff.wrap(srcBuffer));
} | 3.68 |
flink_ContextResolvedTable_getCatalog | /** Returns empty if {@link #isPermanent()} is false. */
public Optional<Catalog> getCatalog() {
return Optional.ofNullable(catalog);
} | 3.68 |
druid_Lexer_integerValue | // QS_TODO negative number is invisible for lexer
public final Number integerValue() {
long result = 0;
boolean negative = false;
int i = mark, max = mark + bufPos;
long limit;
long multmin;
int digit;
if (charAt(mark) == '-') {
negative = true;
limit = Long.MIN_VALUE;
i++;
} else {
limit = -Long.MAX_VALUE;
}
multmin = negative ? MULTMIN_RADIX_TEN : N_MULTMAX_RADIX_TEN;
if (i < max) {
digit = charAt(i++) - '0';
result = -digit;
}
while (i < max) {
// Accumulating negatively avoids surprises near MAX_VALUE
digit = charAt(i++) - '0';
if (result < multmin) {
return new BigInteger(
numberString());
}
result *= 10;
if (result < limit + digit) {
return new BigInteger(numberString());
}
result -= digit;
}
if (negative) {
if (i > mark + 1) {
if (result >= Integer.MIN_VALUE) {
return (int) result;
}
return result;
} else { /* Only got "-" */
throw new NumberFormatException(numberString());
}
} else {
result = -result;
if (result <= Integer.MAX_VALUE) {
return (int) result;
}
return result;
}
} | 3.68 |
hadoop_SharedKeyCredentials_getHeaderValues | /**
* Gets all the values for the given header in the one to many map,
* performs a trimStart() on each return value.
*
* @param headers a one to many map of key / values representing the header values for the connection.
* @param headerName the name of the header to lookup
* @return an ArrayList<String> of all trimmed values corresponding to the requested headerName. This may be empty
* if the header is not found.
*/
private static ArrayList<String> getHeaderValues(
final Map<String, List<String>> headers,
final String headerName) {
final ArrayList<String> arrayOfValues = new ArrayList<String>();
List<String> values = null;
for (final Entry<String, List<String>> entry : headers.entrySet()) {
if (entry.getKey().toLowerCase(Locale.ROOT).equals(headerName)) {
values = entry.getValue();
break;
}
}
if (values != null) {
for (final String value : values) {
// canonicalization formula requires the string to be left
// trimmed.
arrayOfValues.add(trimStart(value));
}
}
return arrayOfValues;
} | 3.68 |
hadoop_FederationProxyProviderUtil_updateConfForFederation | /**
* Updating the conf with Federation as long as certain subclusterId.
*
* @param conf configuration
* @param subClusterId subclusterId for the conf
*/
public static void updateConfForFederation(Configuration conf,
String subClusterId) {
conf.set(YarnConfiguration.RM_CLUSTER_ID, subClusterId);
/*
* In a Federation setting, we will connect to not just the local cluster RM
* but also multiple external RMs. The membership information of all the RMs
* that are currently participating in Federation is available in the
* central FederationStateStore. So we will: 1. obtain the RM service
* addresses from FederationStateStore using the
* FederationRMFailoverProxyProvider. 2. disable traditional HA as that
* depends on local configuration lookup for RMs using indexes. 3. we will
* enable federation failover IF traditional HA is enabled so that the
* appropriate failover RetryPolicy is initialized.
*/
conf.setBoolean(YarnConfiguration.FEDERATION_ENABLED, true);
conf.setClass(YarnConfiguration.CLIENT_FAILOVER_PROXY_PROVIDER,
FederationRMFailoverProxyProvider.class, RMFailoverProxyProvider.class);
if (HAUtil.isHAEnabled(conf)) {
conf.setBoolean(YarnConfiguration.FEDERATION_FAILOVER_ENABLED, true);
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, false);
}
} | 3.68 |
graphhopper_GpxConversions_calcAzimuth | /**
* Return the azimuth in degree based on the first tracksegment of this instruction. If this
* instruction contains less than 2 points then NaN will be returned or the specified
* instruction will be used if that is the finish instruction.
*/
public static double calcAzimuth(Instruction instruction, Instruction nextI) {
double nextLat;
double nextLon;
if (instruction.getPoints().size() >= 2) {
nextLat = instruction.getPoints().getLat(1);
nextLon = instruction.getPoints().getLon(1);
} else if (nextI != null && instruction.getPoints().size() == 1) {
nextLat = nextI.getPoints().getLat(0);
nextLon = nextI.getPoints().getLon(0);
} else {
return Double.NaN;
}
double lat = instruction.getPoints().getLat(0);
double lon = instruction.getPoints().getLon(0);
return AC.calcAzimuth(lat, lon, nextLat, nextLon);
} | 3.68 |
pulsar_ManagedLedgerFactoryImpl_deleteManagedLedger | /**
* Delete all managed ledger resources and metadata.
*/
void deleteManagedLedger(String managedLedgerName, CompletableFuture<ManagedLedgerConfig> mlConfigFuture,
DeleteLedgerCallback callback, Object ctx) {
// Read the managed ledger metadata from store
asyncGetManagedLedgerInfo(managedLedgerName, new ManagedLedgerInfoCallback() {
@Override
public void getInfoComplete(ManagedLedgerInfo info, Object ctx) {
BookKeeper bkc = getBookKeeper();
// First delete all cursors resources
List<CompletableFuture<Void>> futures = info.cursors.entrySet().stream()
.map(e -> deleteCursor(bkc, managedLedgerName, e.getKey(), e.getValue()))
.collect(Collectors.toList());
Futures.waitForAll(futures).thenRun(() -> {
deleteManagedLedgerData(bkc, managedLedgerName, info, mlConfigFuture, callback, ctx);
}).exceptionally(ex -> {
callback.deleteLedgerFailed(new ManagedLedgerException(ex), ctx);
return null;
});
}
@Override
public void getInfoFailed(ManagedLedgerException exception, Object ctx) {
callback.deleteLedgerFailed(exception, ctx);
}
}, ctx);
} | 3.68 |
hadoop_AppCollectorData_isStamped | /**
* Returns if the collector data has been stamped by the RM with a RM cluster
* timestamp and a version number.
*
* @return true if RM has already assigned a timestamp for this collector.
* Otherwise, it means the RM has not recognized the existence of this
* collector.
*/
public boolean isStamped() {
return (getRMIdentifier() != DEFAULT_TIMESTAMP_VALUE)
|| (getVersion() != DEFAULT_TIMESTAMP_VALUE);
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_putValue | /**
* Update or insert the value for the given node.
*
* @param currentNode the node to put value for.
* @param value the value to put.
* @param returnOldState whether to return the old state.
* @return the old state if it exists and {@code returnOldState} is true, or else null.
*/
private S putValue(long currentNode, byte[] value, boolean returnOldState) {
int version = SkipListUtils.helpGetNodeLatestVersion(currentNode, spaceAllocator);
boolean needCopyOnWrite = version < highestRequiredSnapshotVersionPlusOne;
long oldValuePointer;
if (needCopyOnWrite) {
oldValuePointer = updateValueWithCopyOnWrite(currentNode, value);
} else {
oldValuePointer = updateValueWithReplace(currentNode, value);
}
NodeStatus oldStatus = helpSetNodeStatus(currentNode, NodeStatus.PUT);
if (oldStatus == NodeStatus.REMOVE) {
logicallyRemovedNodes.remove(currentNode);
}
S oldState = null;
if (returnOldState) {
oldState = helpGetState(oldValuePointer);
}
// for the replace, old value space need to free
if (!needCopyOnWrite) {
spaceAllocator.free(oldValuePointer);
}
return oldState;
} | 3.68 |
framework_ReflectTools_convertPrimitiveType | /**
* @since 7.4
*/
public static Class<?> convertPrimitiveType(Class<?> type) {
// Gets the return type from get method
if (type.isPrimitive()) {
if (type.equals(Boolean.TYPE)) {
type = Boolean.class;
} else if (type.equals(Integer.TYPE)) {
type = Integer.class;
} else if (type.equals(Float.TYPE)) {
type = Float.class;
} else if (type.equals(Double.TYPE)) {
type = Double.class;
} else if (type.equals(Byte.TYPE)) {
type = Byte.class;
} else if (type.equals(Character.TYPE)) {
type = Character.class;
} else if (type.equals(Short.TYPE)) {
type = Short.class;
} else if (type.equals(Long.TYPE)) {
type = Long.class;
}
}
return type;
} | 3.68 |
flink_AllWindowedStream_maxBy | /**
* Applies an aggregation that gives the maximum element of the pojo data stream by the given
* field expression for every window. A field expression is either the name of a public field or
* a getter method with parentheses of the {@link DataStream}S underlying type. A dot can be
* used to drill down into objects, as in {@code "field1.getInnerField2()" }.
*
* @param field The field expression based on which the aggregation will be applied.
* @param first If True then in case of field equality the first object will be returned
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> maxBy(String field, boolean first) {
return aggregate(
new ComparableAggregator<>(
field,
input.getType(),
AggregationFunction.AggregationType.MAXBY,
first,
input.getExecutionConfig()));
} | 3.68 |
hadoop_MappingRuleResult_getResult | /**
* Returns the type of the result.
* @return the type of the result.
*/
public MappingRuleResultType getResult() {
return result;
} | 3.68 |
hbase_SnapshotDescriptionUtils_isExpiredSnapshot | /**
* Method to check whether TTL has expired for specified snapshot creation time and snapshot ttl.
* NOTE: For backward compatibility (after the patch deployment on HMaster), any snapshot with ttl
* 0 is to be considered as snapshot to keep FOREVER. Default ttl value specified by
* {@link HConstants#DEFAULT_SNAPSHOT_TTL}
* @return true if ttl has expired, or, false, otherwise
*/
public static boolean isExpiredSnapshot(long snapshotTtl, long snapshotCreatedTime,
long currentTime) {
return snapshotCreatedTime > 0 && snapshotTtl > HConstants.DEFAULT_SNAPSHOT_TTL
&& snapshotTtl < TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)
&& (snapshotCreatedTime + TimeUnit.SECONDS.toMillis(snapshotTtl)) < currentTime;
} | 3.68 |
hadoop_SystemErasureCodingPolicies_getByName | /**
* Get a policy by policy name.
* @return ecPolicy, or null if not found
*/
public static ErasureCodingPolicy getByName(String name) {
return SYSTEM_POLICIES_BY_NAME.get(name);
} | 3.68 |
hbase_MultiByteBuff_reset | /**
* Similar to {@link ByteBuffer}.reset(), ensures that this MBB is reset back to last marked
* position.
* @return This MBB
*/
@Override
public MultiByteBuff reset() {
checkRefCount();
// when the buffer is moved to the next one.. the reset should happen on the previous marked
// item and the new one should be taken as the base
if (this.markedItemIndex < 0) throw new InvalidMarkException();
ByteBuffer markedItem = this.items[this.markedItemIndex];
markedItem.reset();
this.curItem = markedItem;
// All items after the marked position upto the current item should be reset to 0
for (int i = this.curItemIndex; i > this.markedItemIndex; i--) {
this.items[i].position(0);
}
this.curItemIndex = this.markedItemIndex;
return this;
} | 3.68 |
hadoop_AbfsConfiguration_getClientCorrelationId | /**
* Gets client correlation ID provided in config.
* @return Client Correlation ID config
*/
public String getClientCorrelationId() {
return clientCorrelationId;
} | 3.68 |
hbase_UserProvider_setUserProviderForTesting | /**
* Set the {@link UserProvider} in the given configuration that should be instantiated
* @param conf to update
* @param provider class of the provider to set
*/
public static void setUserProviderForTesting(Configuration conf,
Class<? extends UserProvider> provider) {
conf.set(USER_PROVIDER_CONF_KEY, provider.getName());
} | 3.68 |
flink_TableConnectorUtils_generateRuntimeName | /** Returns the table connector name used for logging and web UI. */
public static String generateRuntimeName(Class<?> clazz, String[] fields) {
String className = clazz.getSimpleName();
if (null == fields) {
return className + "(*)";
} else {
return className + "(" + String.join(", ", fields) + ")";
}
} | 3.68 |
hbase_SingleColumnValueExcludeFilter_filterRowCells | // Here we remove from row all key values from testing column
@Override
public void filterRowCells(List<Cell> kvs) {
Iterator<? extends Cell> it = kvs.iterator();
while (it.hasNext()) {
// If the current column is actually the tested column,
// we will skip it instead.
if (CellUtil.matchingColumn(it.next(), this.columnFamily, this.columnQualifier)) {
it.remove();
}
}
} | 3.68 |
hudi_SchemaChangeUtils_applyTableChange2Type | /**
* Apply all the DDL update operations to type to produce a new internalSchema.
* do not call this method directly. expose this method only for UT.
*
* @param type origin internalSchema.
* @param updates a wrapper class for all the DDL update operations.
* @return a new internalSchema.
*/
private static Type applyTableChange2Type(Type type, TableChanges.ColumnUpdateChange updates) {
switch (type.typeId()) {
case RECORD:
Types.RecordType record = (Types.RecordType) type;
List<Type> newTypes = new ArrayList<>();
for (Types.Field f : record.fields()) {
Type newType = applyTableChange2Type(f.type(), updates);
newTypes.add(updates.applyUpdates(f, newType));
}
List<Types.Field> newFields = new ArrayList<>();
for (int i = 0; i < newTypes.size(); i++) {
Type newType = newTypes.get(i);
Types.Field oldField = record.fields().get(i);
Types.Field updateField = updates.getUpdates().get(oldField.fieldId());
if (updateField != null) {
newFields.add(Types.Field.get(oldField.fieldId(), updateField.isOptional(), updateField.name(), newType, updateField.doc()));
} else if (!oldField.type().equals(newType)) {
newFields.add(Types.Field.get(oldField.fieldId(), oldField.isOptional(), oldField.name(), newType, oldField.doc()));
} else {
newFields.add(oldField);
}
}
return Types.RecordType.get(newFields, record.name());
case ARRAY:
Types.ArrayType array = (Types.ArrayType) type;
Type newElementType;
Types.Field elementField = array.fields().get(0);
newElementType = applyTableChange2Type(array.elementType(), updates);
newElementType = updates.applyUpdates(elementField, newElementType);
Types.Field elementUpdate = updates.getUpdates().get(elementField.fieldId());
boolean optional = elementUpdate == null ? array.isElementOptional() : elementUpdate.isOptional();
if (optional == elementField.isOptional() && array.elementType() == newElementType) {
return array;
}
return Types.ArrayType.get(array.elementId(), optional, newElementType);
case MAP:
Types.MapType map = (Types.MapType) type;
Types.Field valueFiled = map.fields().get(1);
Type newValueType;
newValueType = applyTableChange2Type(map.valueType(), updates);
newValueType = updates.applyUpdates(valueFiled, newValueType);
Types.Field valueUpdate = updates.getUpdates().get(valueFiled.fieldId());
boolean valueOptional = valueUpdate == null ? map.isValueOptional() : valueUpdate.isOptional();
if (valueOptional == map.isValueOptional() && map.valueType() == newValueType) {
return map;
}
return Types.MapType.get(map.keyId(), map.valueId(), map.keyType(), newValueType, valueOptional);
default:
return type;
}
} | 3.68 |
framework_BrowserWindowOpener_extend | /**
* Add this extension to the {@code EventTrigger}.
*
* @param eventTrigger
* the trigger to attach this extension to
*
* @since 8.4
*/
public void extend(EventTrigger eventTrigger) {
super.extend(eventTrigger.getConnector());
getState().partInformation = eventTrigger.getPartInformation();
} | 3.68 |
flink_CopyOnWriteStateMap_selectActiveTable | /**
* Select the sub-table which is responsible for entries with the given hash code.
*
* @param hashCode the hash code which we use to decide about the table that is responsible.
* @return the index of the sub-table that is responsible for the entry with the given hash
* code.
*/
private StateMapEntry<K, N, S>[] selectActiveTable(int hashCode) {
return (hashCode & (primaryTable.length - 1)) >= rehashIndex
? primaryTable
: incrementalRehashTable;
} | 3.68 |
hbase_HttpServer_isAlive | /**
* Test for the availability of the web server
* @return true if the web server is started, false otherwise
*/
public boolean isAlive() {
return webServer != null && webServer.isStarted();
} | 3.68 |
framework_UidlWriter_writePerformanceData | /**
* Adds the performance timing data (used by TestBench 3) to the UIDL
* response.
*
* @throws IOException
*/
private void writePerformanceData(UI ui, Writer writer) throws IOException {
if (!ui.getSession().getService().getDeploymentConfiguration()
.isProductionMode()) {
writer.write(String.format(", \"timings\":[%d, %d]",
ui.getSession().getCumulativeRequestDuration(),
ui.getSession().getLastRequestDuration()));
}
} | 3.68 |
framework_DesignContext_getShouldWriteDataDelegate | /**
* Gets the delegate that determines whether the container data of a
* component should be written out.
*
* @since 7.5.0
* @see #setShouldWriteDataDelegate(ShouldWriteDataDelegate)
* @see #shouldWriteChildren(Component, Component)
* @return the shouldWriteDataDelegate the currently use delegate
*/
public ShouldWriteDataDelegate getShouldWriteDataDelegate() {
return shouldWriteDataDelegate;
} | 3.68 |
flink_RestfulGateway_stopWithSavepoint | /**
* Stops the job with a savepoint, returning a future that completes when the operation is
* started.
*
* @param operationKey key of the operation, for deduplication
* @param targetDirectory Target directory for the savepoint.
* @param formatType Binary format of the savepoint.
* @param savepointMode context of the savepoint operation
* @param timeout for the rpc call
* @return Future which is completed once the operation is triggered successfully
*/
default CompletableFuture<Acknowledge> stopWithSavepoint(
final AsynchronousJobOperationKey operationKey,
final String targetDirectory,
SavepointFormatType formatType,
final TriggerSavepointMode savepointMode,
@RpcTimeout final Time timeout) {
throw new UnsupportedOperationException();
} | 3.68 |
hadoop_ContainerServiceRecordProcessor_createAAAAInfo | /**
* Creates a container AAAA (IPv6) record descriptor.
* @param record the service record
* @throws Exception if the descriptor creation yields an issue.
*/
protected void createAAAAInfo(ServiceRecord record)
throws Exception {
AAAAContainerRecordDescriptor
recordInfo = new AAAAContainerRecordDescriptor(
getPath(), record);
registerRecordDescriptor(Type.AAAA, recordInfo);
} | 3.68 |
querydsl_AbstractMySQLQuery_forceIndex | /**
* You can use FORCE INDEX, which acts like USE INDEX (index_list) but with the addition that a
* table scan is assumed to be very expensive. In other words, a table scan is used only if there
* is no way to use one of the given indexes to find rows in the table.
*
* @param indexes index names
* @return the current object
*/
public C forceIndex(String... indexes) {
return addJoinFlag(" force index (" + String.join(", ", indexes) + ")", JoinFlag.Position.END);
} | 3.68 |
hbase_RowCountEndpoint_getRowCount | /**
* Returns a count of the rows in the region where this coprocessor is loaded.
*/
@Override
public void getRowCount(RpcController controller, CountRequest request,
RpcCallback<CountResponse> done) {
Scan scan = new Scan();
scan.setFilter(new FirstKeyOnlyFilter());
CountResponse response = null;
InternalScanner scanner = null;
try {
scanner = env.getRegion().getScanner(scan);
List<Cell> results = new ArrayList<>();
boolean hasMore = false;
byte[] lastRow = null;
long count = 0;
do {
hasMore = scanner.next(results);
for (Cell kv : results) {
byte[] currentRow = CellUtil.cloneRow(kv);
if (lastRow == null || !Bytes.equals(lastRow, currentRow)) {
lastRow = currentRow;
count++;
}
}
results.clear();
} while (hasMore);
response = CountResponse.newBuilder().setCount(count).build();
} catch (IOException ioe) {
CoprocessorRpcUtils.setControllerException(controller, ioe);
} finally {
if (scanner != null) {
IOUtils.closeQuietly(scanner);
}
}
done.run(response);
} | 3.68 |
hbase_VersionResource_getVersionResource | /**
* Dispatch <tt>/version/rest</tt> to self.
*/
@Path("rest")
public VersionResource getVersionResource() {
return this;
} | 3.68 |
flink_KubernetesUtils_createConfigMapIfItDoesNotExist | /**
* Creates a config map with the given name if it does not exist.
*
* @param flinkKubeClient to use for creating the config map
* @param configMapName name of the config map
* @param clusterId clusterId to which the map belongs
* @throws FlinkException if the config map could not be created
*/
public static void createConfigMapIfItDoesNotExist(
FlinkKubeClient flinkKubeClient, String configMapName, String clusterId)
throws FlinkException {
int attempt = 0;
CompletionException lastException = null;
final int maxAttempts = 10;
final KubernetesConfigMap configMap =
new KubernetesConfigMap(
new ConfigMapBuilder()
.withNewMetadata()
.withName(configMapName)
.withLabels(
getConfigMapLabels(
clusterId, LABEL_CONFIGMAP_TYPE_HIGH_AVAILABILITY))
.endMetadata()
.build());
while (!flinkKubeClient.getConfigMap(configMapName).isPresent() && attempt < maxAttempts) {
try {
flinkKubeClient.createConfigMap(configMap).join();
} catch (CompletionException e) {
// retrying
lastException = ExceptionUtils.firstOrSuppressed(e, lastException);
}
attempt++;
}
if (attempt >= maxAttempts && lastException != null) {
throw new FlinkException(
String.format("Could not create the config map %s.", configMapName),
lastException);
}
} | 3.68 |
framework_VCalendar_updateWeekView | /**
* Re-renders the whole week view.
*
* @param scroll
* The amount of pixels to scroll the week view
* @param today
* Todays date
* @param daysInMonth
* How many days are there in the month
* @param firstDayOfWeek
* The first day of the week
* @param events
* The events to render
*/
public void updateWeekView(int scroll, Date today, int daysInMonth,
int firstDayOfWeek, Collection<CalendarEvent> events,
List<CalendarDay> days) {
while (outer.getWidgetCount() > 0) {
outer.remove(0);
}
monthGrid = null;
String[] realDayNames = new String[getDayNames().length];
int j = 0;
if (firstDayOfWeek == 2) {
for (int i = 1; i < getDayNames().length; i++) {
realDayNames[j++] = getDayNames()[i];
}
realDayNames[j] = getDayNames()[0];
} else {
for (int i = 0; i < getDayNames().length; i++) {
realDayNames[j++] = getDayNames()[i];
}
}
weeklyLongEvents = new WeeklyLongEvents(this);
if (weekGrid == null) {
weekGrid = new WeekGrid(this, is24HFormat());
}
updateWeekGrid(daysInMonth, days, today, realDayNames);
updateEventsToWeekGrid(sortEvents(events));
outer.add(dayToolbar, DockPanel.NORTH);
outer.add(weeklyLongEvents, DockPanel.NORTH);
outer.add(weekGrid, DockPanel.SOUTH);
weekGrid.setVerticalScrollPosition(scroll);
} | 3.68 |
hbase_HBaseTestingUtility_createMockRegionServerService | /**
* Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
* TestOpenRegionHandler
*/
public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
rss.setFileSystem(getTestFileSystem());
return rss;
} | 3.68 |
hbase_StoreFileWriter_withFilePath | /**
* Use either this method or {@link #withOutputDir}, but not both.
* @param filePath the StoreFile path to write
* @return this (for chained invocation)
*/
public Builder withFilePath(Path filePath) {
Preconditions.checkNotNull(filePath);
this.filePath = filePath;
return this;
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_directoryCreated | /**
* Indicate that we just created a directory through WASB.
*/
public void directoryCreated() {
numberOfDirectoriesCreated.incr();
} | 3.68 |
flink_HashPartition_getPartitionNumber | /**
* Gets the partition number of this partition.
*
* @return This partition's number.
*/
public int getPartitionNumber() {
return this.partitionNumber;
} | 3.68 |
hibernate-validator_TokenIterator_hasMoreInterpolationTerms | /**
* Called to advance the next interpolation term of the message descriptor. This message can be called multiple times.
* Once it returns {@code false} all interpolation terms have been processed and {@link #getInterpolatedMessage()}
* can be called.
*
* @return Returns {@code true} in case there are more message parameters, {@code false} otherwise.
*
* @throws MessageDescriptorFormatException in case the message descriptor is invalid
*/
public boolean hasMoreInterpolationTerms() throws MessageDescriptorFormatException {
while ( currentPosition < tokenList.size() ) {
currentToken = tokenList.get( currentPosition );
currentPosition++;
if ( currentToken.isParameter() ) {
currentTokenAvailable = true;
return true;
}
}
allInterpolationTermsProcessed = true;
return false;
} | 3.68 |
pulsar_FieldParser_stringToDouble | /**
* Converts String to Double.
*
* @param val
* The String to be converted.
* @return The converted Double value.
*/
public static Double stringToDouble(String val) {
String v = trim(val);
if (io.netty.util.internal.StringUtil.isNullOrEmpty(v)) {
return null;
} else {
return Double.valueOf(v);
}
} | 3.68 |
hbase_SpaceQuotaSnapshot_toSpaceQuotaSnapshot | // ProtobufUtil is in hbase-client, and this doesn't need to be public.
public static SpaceQuotaSnapshot toSpaceQuotaSnapshot(QuotaProtos.SpaceQuotaSnapshot proto) {
return new SpaceQuotaSnapshot(SpaceQuotaStatus.toStatus(proto.getQuotaStatus()),
proto.getQuotaUsage(), proto.getQuotaLimit());
} | 3.68 |
rocketmq-connect_ConnectUtil_initDefaultLitePullConsumer | /**
* init default lite pull consumer
*
* @param connectConfig
* @return
* @throws MQClientException
*/
public static DefaultLitePullConsumer initDefaultLitePullConsumer(WorkerConfig connectConfig, boolean autoCommit) {
DefaultLitePullConsumer consumer = null;
if (Objects.isNull(consumer)) {
if (StringUtils.isBlank(connectConfig.getAccessKey()) && StringUtils.isBlank(connectConfig.getSecretKey())) {
consumer = new DefaultLitePullConsumer();
} else {
consumer = new DefaultLitePullConsumer(getAclRPCHook(connectConfig.getAccessKey(), connectConfig.getSecretKey()));
}
}
consumer.setNamesrvAddr(connectConfig.getNamesrvAddr());
String uniqueName = Thread.currentThread().getName() + "-" + System.currentTimeMillis() % 1000;
consumer.setInstanceName(uniqueName);
consumer.setUnitName(uniqueName);
consumer.setAutoCommit(autoCommit);
return consumer;
} | 3.68 |
streampipes_ExtractorBase_getText | /**
* Extracts text from the given {@link TextDocument} object.
*
* @param doc The {@link TextDocument}.
* @return The extracted text.
* @throws BoilerpipeProcessingException
*/
public String getText(TextDocument doc) throws BoilerpipeProcessingException {
process(doc);
return doc.getContent();
} | 3.68 |
flink_OneInputTransformation_getOperatorFactory | /** Returns the {@code StreamOperatorFactory} of this Transformation. */
public StreamOperatorFactory<OUT> getOperatorFactory() {
return operatorFactory;
} | 3.68 |
flink_HadoopDataInputStream_getHadoopInputStream | /**
* Gets the wrapped Hadoop input stream.
*
* @return The wrapped Hadoop input stream.
*/
public org.apache.hadoop.fs.FSDataInputStream getHadoopInputStream() {
return fsDataInputStream;
} | 3.68 |
framework_EditorConnector_getRowKey | /**
* Returns the key of the given data row.
*
* @param row
* the row
* @return the row key
*/
protected static String getRowKey(JsonObject row) {
return row.getString(DataCommunicatorConstants.KEY);
} | 3.68 |
framework_CheckBoxElement_isChecked | /**
* Checks if the checkbox is checked.
*
* @return <code>true</code> if the checkbox is checked, <code>false</code>
* otherwise.
*/
public boolean isChecked() {
return getInputElement().isSelected();
} | 3.68 |
hbase_ShadedAccessControlUtil_toUserTablePermissions | /**
* Convert a ListMultimap<String, TablePermission> where key is username to a shaded
* protobuf UserPermission
* @param perm the list of user and table permissions
* @return the protobuf UserTablePermissions
*/
public static AccessControlProtos.UsersAndPermissions
toUserTablePermissions(ListMultimap<String, UserPermission> perm) {
AccessControlProtos.UsersAndPermissions.Builder builder =
AccessControlProtos.UsersAndPermissions.newBuilder();
for (Map.Entry<String, Collection<UserPermission>> entry : perm.asMap().entrySet()) {
AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder =
AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder();
userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey()));
for (UserPermission userPerm : entry.getValue()) {
userPermBuilder.addPermissions(toPermission(userPerm.getPermission()));
}
builder.addUserPermissions(userPermBuilder.build());
}
return builder.build();
} | 3.68 |
flink_SSLUtils_createRestServerSSLEngineFactory | /**
* Creates a {@link SSLHandlerFactory} to be used by the REST Servers.
*
* @param config The application configuration.
*/
public static SSLHandlerFactory createRestServerSSLEngineFactory(final Configuration config)
throws Exception {
ClientAuth clientAuth =
SecurityOptions.isRestSSLAuthenticationEnabled(config)
? ClientAuth.REQUIRE
: ClientAuth.NONE;
SslContext sslContext = createRestNettySSLContext(config, false, clientAuth);
if (sslContext == null) {
throw new IllegalConfigurationException("SSL is not enabled for REST endpoints.");
}
return new SSLHandlerFactory(sslContext, -1, -1);
} | 3.68 |
rocketmq-connect_ConnectUtil_flatOffsetTopics | /** Flat topics offsets */
public static Map<MessageQueue, TopicOffset> flatOffsetTopics(
WorkerConfig config, List<String> topics) {
Map<MessageQueue, TopicOffset> messageQueueTopicOffsets = Maps.newConcurrentMap();
offsetTopics(config, topics).values()
.forEach(
offsetTopic -> {
messageQueueTopicOffsets.putAll(offsetTopic);
});
return messageQueueTopicOffsets;
} | 3.68 |
flink_RpcEndpoint_getRpcService | /**
* Gets the endpoint's RPC service.
*
* @return The endpoint's RPC service
*/
public RpcService getRpcService() {
return rpcService;
} | 3.68 |
framework_ColorUtil_getRGBAPatternColor | /**
* Parses {@link Color} from matched RGBA {@link Matcher}.
*
* @param matcher
* {@link Matcher} matching RGBA pattern with named regex groups
* {@code red}, {@code green}, {@code blue}, and {@code alpha}
* @return {@link Color} parsed from {@link Matcher}
*/
public static Color getRGBAPatternColor(Matcher matcher) {
Color c = getRGBPatternColor(matcher);
c.setAlpha((int) (Double.parseDouble(matcher.group("alpha")) * 255d));
return c;
} | 3.68 |
dubbo_NettyChannel_getOrAddChannel | /**
* Get dubbo channel by netty channel through channel cache.
* Put netty channel into it if dubbo channel don't exist in the cache.
*
* @param ch netty channel
* @param url
* @param handler dubbo handler that contain netty's handler
* @return
*/
static NettyChannel getOrAddChannel(Channel ch, URL url, ChannelHandler handler) {
if (ch == null) {
return null;
}
NettyChannel ret = CHANNEL_MAP.get(ch);
if (ret == null) {
NettyChannel nettyChannel = new NettyChannel(ch, url, handler);
if (ch.isActive()) {
nettyChannel.markActive(true);
ret = CHANNEL_MAP.putIfAbsent(ch, nettyChannel);
}
if (ret == null) {
ret = nettyChannel;
}
} else {
ret.markActive(true);
}
return ret;
} | 3.68 |
flink_ColumnReferenceFinder_findReferencedColumn | /**
* Find referenced column names that derive the computed column.
*
* @param columnName the name of the column
* @param schema the schema contains the computed column definition
* @return the referenced column names
*/
public static Set<String> findReferencedColumn(String columnName, ResolvedSchema schema) {
Column column =
schema.getColumn(columnName)
.orElseThrow(
() ->
new ValidationException(
String.format(
"The input column %s doesn't exist in the schema.",
columnName)));
if (!(column instanceof Column.ComputedColumn)) {
return Collections.emptySet();
}
ColumnReferenceVisitor visitor =
new ColumnReferenceVisitor(
// the input ref index is based on a projection of non-computed columns
schema.getColumns().stream()
.filter(c -> !(c instanceof Column.ComputedColumn))
.map(Column::getName)
.collect(Collectors.toList()));
return visitor.visit(((Column.ComputedColumn) column).getExpression());
} | 3.68 |
streampipes_AbstractProcessingElementBuilder_supportedProtocols | /**
* Assigns supported communication/transport protocols to the pipeline elements that can be handled at runtime (e.g.,
* Kafka or JMS).
*
* @param protocols A list of supported {@link org.apache.streampipes.model.grounding.TransportProtocol}s.
* Use {@link org.apache.streampipes.sdk.helpers.SupportedProtocols} to assign protocols
* from some pre-defined ones or create your own by following the developer guide.
* @return this
*/
public K supportedProtocols(List<TransportProtocol> protocols) {
this.supportedGrounding.setTransportProtocols(protocols);
return me();
} | 3.68 |
flink_MemorySegment_getDoubleLittleEndian | /**
* Reads a double-precision floating point value (64bit, 8 bytes) from the given position, in
* little endian byte order. This method's speed depends on the system's native byte order, and
* it is possibly slower than {@link #getDouble(int)}. For most cases (such as transient storage
* in memory or serialization for I/O and network), it suffices to know that the byte order in
* which the value is written is the same as the one in which it is read, and {@link
* #getDouble(int)} is the preferable choice.
*
* @param index The position from which the value will be read.
* @return The long value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 8.
*/
public double getDoubleLittleEndian(int index) {
return Double.longBitsToDouble(getLongLittleEndian(index));
} | 3.68 |
hbase_StorageClusterStatusModel_setRegions | /**
* @param regions the total number of regions served by the cluster
*/
public void setRegions(int regions) {
this.regions = regions;
} | 3.68 |
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_buildMergeClusteringGroup | /**
* Generate clustering group according to merge rules
*
* @param identifier bucket identifier
* @param fileSlices file slice candidates to be built as merge clustering groups
* @param mergeSlot number of bucket allowed to be merged, in order to guarantee the lower bound of the total number of bucket
* @return list of clustering group, number of buckets merged (removed), remaining file slice (that does not be merged)
*/
protected Triple<List<HoodieClusteringGroup>, Integer, List<FileSlice>> buildMergeClusteringGroup(
ConsistentBucketIdentifier identifier, List<FileSlice> fileSlices, int mergeSlot) {
if (fileSlices.size() <= 1) {
return Triple.of(Collections.emptyList(), 0, fileSlices);
}
long mergeSize = getMergeSize();
int remainingMergeSlot = mergeSlot;
List<HoodieClusteringGroup> groups = new ArrayList<>();
boolean[] added = new boolean[fileSlices.size()];
fileSlices.sort(Comparator.comparingInt(a -> identifier.getBucketByFileId(a.getFileId()).getValue()));
// In each round, we check if the ith file slice can be merged with its predecessors and successors
for (int i = 0; i < fileSlices.size(); ++i) {
if (added[i] || fileSlices.get(i).getTotalFileSize() > mergeSize) {
continue;
}
// 0: startIdx, 1: endIdx
int[] rangeIdx = {i, i};
long totalSize = fileSlices.get(i).getTotalFileSize();
// Do backward check first (k == 0), and then forward check (k == 1)
for (int k = 0; k < 2; ++k) {
boolean forward = k == 1;
do {
int nextIdx = forward ? (rangeIdx[k] + 1 < fileSlices.size() ? rangeIdx[k] + 1 : 0) : (rangeIdx[k] >= 1 ? rangeIdx[k] - 1 : fileSlices.size() - 1);
ConsistentHashingNode bucketOfNextFile = identifier.getBucketByFileId(fileSlices.get(nextIdx).getFileId());
ConsistentHashingNode nextBucket = forward ? identifier.getLatterBucket(fileSlices.get(rangeIdx[k]).getFileId()) : identifier.getFormerBucket(fileSlices.get(rangeIdx[k]).getFileId());
boolean isNeighbour = bucketOfNextFile == nextBucket;
/**
* Merge condition:
* 1. there is still slot to merge bucket
* 2. the previous file slices is not merged
* 3. the previous file slice and current file slice are neighbour in the hash ring
* 4. Both the total file size up to now and the previous file slice size are smaller than merge size threshold
*/
if (remainingMergeSlot == 0 || added[nextIdx] || !isNeighbour || totalSize > mergeSize || fileSlices.get(nextIdx).getTotalFileSize() > mergeSize
|| nextIdx == rangeIdx[1 - k] // if start equal to end after update range
) {
break;
}
// Mark preIdx as merge candidate
totalSize += fileSlices.get(nextIdx).getTotalFileSize();
rangeIdx[k] = nextIdx;
remainingMergeSlot--;
} while (rangeIdx[k] != i);
}
int startIdx = rangeIdx[0];
int endIdx = rangeIdx[1];
if (endIdx == i && startIdx == i) {
continue;
}
// Construct merge group if there is at least two file slices
List<FileSlice> fs = new ArrayList<>();
while (true) {
added[startIdx] = true;
fs.add(fileSlices.get(startIdx));
if (startIdx == endIdx) {
break;
}
startIdx = startIdx + 1 < fileSlices.size() ? startIdx + 1 : 0;
}
groups.add(HoodieClusteringGroup.newBuilder()
.setSlices(getFileSliceInfo(fs))
.setNumOutputFileGroups(1)
.setMetrics(buildMetrics(fs))
.setExtraMetadata(
constructExtraMetadata(
fs.get(0).getPartitionPath(),
identifier.mergeBucket(fs.stream().map(FileSlice::getFileId).collect(Collectors.toList())),
identifier.getMetadata().getSeqNo()))
.build());
}
// Collect file slices that are not involved in merge
List<FileSlice> fsUntouched = IntStream.range(0, fileSlices.size()).filter(i -> !added[i])
.mapToObj(fileSlices::get).collect(Collectors.toList());
return Triple.of(groups, mergeSlot - remainingMergeSlot, fsUntouched);
} | 3.68 |
hbase_MemStoreFlusher_flushOneForGlobalPressure | /**
* The memstore across all regions has exceeded the low water mark. Pick one region to flush and
* flush it synchronously (this is called from the flush thread)
* @return true if successful
*/
private boolean flushOneForGlobalPressure(FlushType flushType) {
SortedMap<Long, Collection<HRegion>> regionsBySize = null;
switch (flushType) {
case ABOVE_OFFHEAP_HIGHER_MARK:
case ABOVE_OFFHEAP_LOWER_MARK:
regionsBySize = server.getCopyOfOnlineRegionsSortedByOffHeapSize();
break;
case ABOVE_ONHEAP_HIGHER_MARK:
case ABOVE_ONHEAP_LOWER_MARK:
default:
regionsBySize = server.getCopyOfOnlineRegionsSortedByOnHeapSize();
}
Set<HRegion> excludedRegions = new HashSet<>();
double secondaryMultiplier =
ServerRegionReplicaUtil.getRegionReplicaStoreFileRefreshMultiplier(conf);
boolean flushedOne = false;
while (!flushedOne) {
// Find the biggest region that doesn't have too many storefiles (might be null!)
HRegion bestFlushableRegion = getBiggestMemStoreRegion(regionsBySize, excludedRegions, true);
// Find the biggest region, total, even if it might have too many flushes.
HRegion bestAnyRegion = getBiggestMemStoreRegion(regionsBySize, excludedRegions, false);
// Find the biggest region that is a secondary region
HRegion bestRegionReplica = getBiggestMemStoreOfRegionReplica(regionsBySize, excludedRegions);
if (bestAnyRegion == null) {
// If bestAnyRegion is null, assign replica. It may be null too. Next step is check for null
bestAnyRegion = bestRegionReplica;
}
if (bestAnyRegion == null) {
LOG.error("Above memory mark but there are no flushable regions!");
return false;
}
HRegion regionToFlush;
long bestAnyRegionSize;
long bestFlushableRegionSize;
switch (flushType) {
case ABOVE_OFFHEAP_HIGHER_MARK:
case ABOVE_OFFHEAP_LOWER_MARK:
bestAnyRegionSize = bestAnyRegion.getMemStoreOffHeapSize();
bestFlushableRegionSize = getMemStoreOffHeapSize(bestFlushableRegion);
break;
case ABOVE_ONHEAP_HIGHER_MARK:
case ABOVE_ONHEAP_LOWER_MARK:
bestAnyRegionSize = bestAnyRegion.getMemStoreHeapSize();
bestFlushableRegionSize = getMemStoreHeapSize(bestFlushableRegion);
break;
default:
bestAnyRegionSize = bestAnyRegion.getMemStoreDataSize();
bestFlushableRegionSize = getMemStoreDataSize(bestFlushableRegion);
}
if (bestAnyRegionSize > 2 * bestFlushableRegionSize) {
// Even if it's not supposed to be flushed, pick a region if it's more than twice
// as big as the best flushable one - otherwise when we're under pressure we make
// lots of little flushes and cause lots of compactions, etc, which just makes
// life worse!
if (LOG.isDebugEnabled()) {
LOG.debug("Under global heap pressure: " + "Region "
+ bestAnyRegion.getRegionInfo().getRegionNameAsString() + " has too many "
+ "store files, but is " + TraditionalBinaryPrefix.long2String(bestAnyRegionSize, "", 1)
+ " vs best flushable region's "
+ TraditionalBinaryPrefix.long2String(bestFlushableRegionSize, "", 1)
+ ". Choosing the bigger.");
}
regionToFlush = bestAnyRegion;
} else {
if (bestFlushableRegion == null) {
regionToFlush = bestAnyRegion;
} else {
regionToFlush = bestFlushableRegion;
}
}
long regionToFlushSize;
long bestRegionReplicaSize;
switch (flushType) {
case ABOVE_OFFHEAP_HIGHER_MARK:
case ABOVE_OFFHEAP_LOWER_MARK:
regionToFlushSize = regionToFlush.getMemStoreOffHeapSize();
bestRegionReplicaSize = getMemStoreOffHeapSize(bestRegionReplica);
break;
case ABOVE_ONHEAP_HIGHER_MARK:
case ABOVE_ONHEAP_LOWER_MARK:
regionToFlushSize = regionToFlush.getMemStoreHeapSize();
bestRegionReplicaSize = getMemStoreHeapSize(bestRegionReplica);
break;
default:
regionToFlushSize = regionToFlush.getMemStoreDataSize();
bestRegionReplicaSize = getMemStoreDataSize(bestRegionReplica);
}
if ((regionToFlush == null || regionToFlushSize == 0) && bestRegionReplicaSize == 0) {
// A concurrency issue (such as splitting region) may happen such that the online region
// seen by getCopyOfOnlineRegionsSortedByXX() method is no longer eligible to
// getBiggestMemStoreRegion(). This means that we can come out of the loop
LOG.debug("Above memory mark but there is no flushable region");
return false;
}
if (
regionToFlush == null || (bestRegionReplica != null
&& ServerRegionReplicaUtil.isRegionReplicaStoreFileRefreshEnabled(conf)
&& (bestRegionReplicaSize > secondaryMultiplier * regionToFlushSize))
) {
LOG.info("Refreshing storefiles of region " + bestRegionReplica
+ " due to global heap pressure. Total memstore off heap size="
+ TraditionalBinaryPrefix
.long2String(server.getRegionServerAccounting().getGlobalMemStoreOffHeapSize(), "", 1)
+ " memstore heap size=" + TraditionalBinaryPrefix
.long2String(server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), "", 1));
flushedOne = refreshStoreFilesAndReclaimMemory(bestRegionReplica);
if (!flushedOne) {
LOG.info("Excluding secondary region " + bestRegionReplica
+ " - trying to find a different region to refresh files.");
excludedRegions.add(bestRegionReplica);
}
} else {
LOG.info("Flush of region " + regionToFlush + " due to global heap pressure. "
+ "Flush type=" + flushType.toString() + ", Total Memstore Heap size="
+ TraditionalBinaryPrefix
.long2String(server.getRegionServerAccounting().getGlobalMemStoreHeapSize(), "", 1)
+ ", Total Memstore Off-Heap size="
+ TraditionalBinaryPrefix
.long2String(server.getRegionServerAccounting().getGlobalMemStoreOffHeapSize(), "", 1)
+ ", Region memstore size="
+ TraditionalBinaryPrefix.long2String(regionToFlushSize, "", 1));
flushedOne = flushRegion(regionToFlush, true, null, FlushLifeCycleTracker.DUMMY);
if (!flushedOne) {
LOG.info("Excluding unflushable region " + regionToFlush
+ " - trying to find a different region to flush.");
excludedRegions.add(regionToFlush);
}
}
}
return true;
} | 3.68 |
hadoop_HsController_aboutPage | /**
* @return the page about the current server.
*/
protected Class<? extends View> aboutPage() {
return HsAboutPage.class;
} | 3.68 |
framework_Slot_setAlignment | /**
* Sets how the widget is aligned inside the slot.
*
* @param alignment
* The alignment inside the slot
*/
public void setAlignment(AlignmentInfo alignment) {
this.alignment = alignment;
if (alignment != null && alignment.isHorizontalCenter()) {
addStyleName(ALIGN_CLASS_PREFIX + "center");
removeStyleName(ALIGN_CLASS_PREFIX + "right");
} else if (alignment != null && alignment.isRight()) {
addStyleName(ALIGN_CLASS_PREFIX + "right");
removeStyleName(ALIGN_CLASS_PREFIX + "center");
} else {
removeStyleName(ALIGN_CLASS_PREFIX + "right");
removeStyleName(ALIGN_CLASS_PREFIX + "center");
}
if (alignment != null && alignment.isVerticalCenter()) {
addStyleName(ALIGN_CLASS_PREFIX + "middle");
removeStyleName(ALIGN_CLASS_PREFIX + "bottom");
} else if (alignment != null && alignment.isBottom()) {
addStyleName(ALIGN_CLASS_PREFIX + "bottom");
removeStyleName(ALIGN_CLASS_PREFIX + "middle");
} else {
removeStyleName(ALIGN_CLASS_PREFIX + "middle");
removeStyleName(ALIGN_CLASS_PREFIX + "bottom");
}
} | 3.68 |
hbase_FanOutOneBlockAsyncDFSOutputSaslHelper_wrapAndSetPayload | /**
* Create a ByteString from byte array without copying (wrap), and then set it as the payload
* for the builder.
* @param builder builder for HDFS DataTransferEncryptorMessage.
* @param payload byte array of payload.
*/
static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder,
byte[] payload) throws IOException {
Object byteStringObject;
try {
// byteStringObject = new LiteralByteString(payload);
byteStringObject = constructor.newInstance(payload);
// builder.setPayload(byteStringObject);
setPayloadMethod.invoke(builder, constructor.getDeclaringClass().cast(byteStringObject));
} catch (IllegalAccessException | InstantiationException e) {
throw new RuntimeException(e);
} catch (InvocationTargetException e) {
Throwables.propagateIfPossible(e.getTargetException(), IOException.class);
throw new RuntimeException(e.getTargetException());
}
} | 3.68 |
hudi_StreamerUtil_getLockConfig | /**
* Get the lockConfig if required, empty {@link Option} otherwise.
*/
public static Option<HoodieLockConfig> getLockConfig(Configuration conf) {
if (OptionsResolver.isLockRequired(conf) && !conf.containsKey(HoodieLockConfig.LOCK_PROVIDER_CLASS_NAME.key())) {
// configure the fs lock provider by default
return Option.of(HoodieLockConfig.newBuilder()
.fromProperties(FileSystemBasedLockProvider.getLockConfig(conf.getString(FlinkOptions.PATH)))
.withConflictResolutionStrategy(OptionsResolver.getConflictResolutionStrategy(conf))
.build());
}
return Option.empty();
} | 3.68 |
morf_DatabaseSchemaManager_tableCache | /**
* Returns the cached set of tables in the database.
*/
private Map<String, Table> tableCache(ProducerCache producerCache) {
if (!tablesLoaded.get()) {
cacheTables(producerCache.get().getSchema().tables());
}
return tables.get();
} | 3.68 |
hadoop_OBSBlockOutputStream_clearActiveBlock | /**
* Clear the active block.
*/
private synchronized void clearActiveBlock() {
if (activeBlock != null) {
LOG.debug("Clearing active block");
}
activeBlock = null;
} | 3.68 |
dubbo_LoggerFactory_getLogger | /**
* Get logger provider
*
* @param key the returned logger will be named after key
* @return logger provider
*/
public static Logger getLogger(String key) {
return ConcurrentHashMapUtils.computeIfAbsent(
LOGGERS, key, k -> new FailsafeLogger(loggerAdapter.getLogger(k)));
} | 3.68 |
framework_VComboBox_getTotalSuggestionsIncludingNullSelectionItem | /**
* Gets the total number of suggestions, including the possible null
* selection item, if it should be visible.
*
* @return total number of suggestions with null selection items
*/
private int getTotalSuggestionsIncludingNullSelectionItem() {
return getTotalSuggestions()
+ (getNullSelectionItemShouldBeVisible() ? 1 : 0);
} | 3.68 |
morf_AbstractSqlDialectTest_expectedDateLiteral | /**
* @return The expected date literal.
*/
protected String expectedDateLiteral() {
return "DATE '2010-01-02'";
} | 3.68 |
framework_VUpload_onSubmitComplete | /**
* Called by JSNI (hooked via {@link #onloadstrategy})
*/
private void onSubmitComplete() {
/* Needs to be run dereferred to avoid various browser issues. */
Scheduler.get().scheduleDeferred(() -> {
if (submitted) {
if (client != null) {
if (t != null) {
t.cancel();
}
getLogger().info("VUpload:Submit complete");
if (isAttached()) {
// no need to call poll() if component is already
// detached #8728
((UploadConnector) ConnectorMap.get(client)
.getConnector(VUpload.this))
.getRpcProxy(UploadServerRpc.class)
.poll();
}
}
rebuildPanel();
submitted = false;
enableUpload();
if (!isAttached()) {
/*
* Upload is complete when upload is already abandoned.
*/
cleanTargetFrame();
}
}
});
} | 3.68 |
flink_MathUtils_divideRoundUp | /**
* Divide and rounding up to integer. E.g., divideRoundUp(3, 2) returns 2, divideRoundUp(0, 3)
* returns 0. Note that this method does not support negative values.
*
* @param dividend value to be divided by the divisor
* @param divisor value by which the dividend is to be divided
* @return the quotient rounding up to integer
*/
public static int divideRoundUp(int dividend, int divisor) {
Preconditions.checkArgument(dividend >= 0, "Negative dividend is not supported.");
Preconditions.checkArgument(divisor > 0, "Negative or zero divisor is not supported.");
return dividend == 0 ? 0 : (dividend - 1) / divisor + 1;
} | 3.68 |
hadoop_TimelineEntity_getRelatedEntities | /**
* Get the related entities
*
* @return the related entities
*/
public Map<String, Set<String>> getRelatedEntities() {
return relatedEntities;
} | 3.68 |
hadoop_ByteBufferDecodingState_convertToByteArrayState | /**
* Convert to a ByteArrayDecodingState when it's backed by on-heap arrays.
*/
ByteArrayDecodingState convertToByteArrayState() {
int[] inputOffsets = new int[inputs.length];
int[] outputOffsets = new int[outputs.length];
byte[][] newInputs = new byte[inputs.length][];
byte[][] newOutputs = new byte[outputs.length][];
ByteBuffer buffer;
for (int i = 0; i < inputs.length; ++i) {
buffer = inputs[i];
if (buffer != null) {
inputOffsets[i] = buffer.arrayOffset() + buffer.position();
newInputs[i] = buffer.array();
}
}
for (int i = 0; i < outputs.length; ++i) {
buffer = outputs[i];
outputOffsets[i] = buffer.arrayOffset() + buffer.position();
newOutputs[i] = buffer.array();
}
ByteArrayDecodingState baeState = new ByteArrayDecodingState(decoder,
decodeLength, erasedIndexes, newInputs,
inputOffsets, newOutputs, outputOffsets);
return baeState;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.