name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_VTooltip_getFinalTouchX | /**
* Return the final X-coordinate of the tooltip based on cursor
* position, size of the tooltip, size of the page and necessary
* margins.
*
* @param offsetWidth
* @return The final X-coordinate
*/
private int getFinalTouchX(int offsetWidth) {
int x = 0;
int widthNeeded = 10 + offsetWidth;
int roomLeft = currentElement != null
? currentElement.getAbsoluteLeft()
: EVENT_XY_POSITION_OUTSIDE;
int viewPortWidth = Window.getClientWidth();
int roomRight = viewPortWidth - roomLeft;
if (roomRight > widthNeeded) {
x = roomLeft;
} else {
x = roomLeft - offsetWidth;
}
if (x + offsetWidth
- Window.getScrollLeft() > viewPortWidth) {
x = viewPortWidth - offsetWidth
+ Window.getScrollLeft();
}
if (roomLeft != EVENT_XY_POSITION_OUTSIDE) {
// Do not allow x to be zero, for otherwise the tooltip
// does not close when the mouse is moved (see
// isTooltipOpen()). #15129
int minX = Math.max(1, Window.getScrollLeft());
x = Math.max(x, minX);
}
return x;
} | 3.68 |
flink_CrossOperator_projectTuple19 | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>
ProjectCross<
I1,
I2,
Tuple19<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>>
projectTuple19() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<
Tuple19<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>>
tType =
new TupleTypeInfo<
Tuple19<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>>(fTypes);
return new ProjectCross<
I1,
I2,
Tuple19<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>>(
this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.68 |
framework_ListSelectElement_getValue | /**
* Return value of the list select element.
*
* @return value of the list select element
*/
public String getValue() {
return select.getFirstSelectedOption().getText();
} | 3.68 |
zxing_CameraManager_openDriver | /**
* Opens the camera driver and initializes the hardware parameters.
*
* @param holder The surface object which the camera will draw preview frames into.
* @throws IOException Indicates the camera driver failed to open.
*/
public synchronized void openDriver(SurfaceHolder holder) throws IOException {
OpenCamera theCamera = camera;
if (theCamera == null) {
theCamera = OpenCameraInterface.open(requestedCameraId);
if (theCamera == null) {
throw new IOException("Camera.open() failed to return object from driver");
}
camera = theCamera;
}
if (!initialized) {
initialized = true;
configManager.initFromCameraParameters(theCamera);
if (requestedFramingRectWidth > 0 && requestedFramingRectHeight > 0) {
setManualFramingRect(requestedFramingRectWidth, requestedFramingRectHeight);
requestedFramingRectWidth = 0;
requestedFramingRectHeight = 0;
}
}
Camera cameraObject = theCamera.getCamera();
Camera.Parameters parameters = cameraObject.getParameters();
String parametersFlattened = parameters == null ? null : parameters.flatten(); // Save these, temporarily
try {
configManager.setDesiredCameraParameters(theCamera, false);
} catch (RuntimeException re) {
// Driver failed
Log.w(TAG, "Camera rejected parameters. Setting only minimal safe-mode parameters");
Log.i(TAG, "Resetting to saved camera params: " + parametersFlattened);
// Reset:
if (parametersFlattened != null) {
parameters = cameraObject.getParameters();
parameters.unflatten(parametersFlattened);
try {
cameraObject.setParameters(parameters);
configManager.setDesiredCameraParameters(theCamera, true);
} catch (RuntimeException re2) {
// Well, darn. Give up
Log.w(TAG, "Camera rejected even safe-mode parameters! No configuration");
}
}
}
cameraObject.setPreviewDisplay(holder);
} | 3.68 |
hudi_TableCommand_descTable | /**
* Describes table properties.
*/
@ShellMethod(key = "desc", value = "Describe Hoodie Table properties")
public String descTable() {
HoodieTableMetaClient client = HoodieCLI.getTableMetaClient();
TableHeader header = new TableHeader().addTableHeaderField("Property").addTableHeaderField("Value");
List<Comparable[]> rows = new ArrayList<>();
rows.add(new Comparable[] {"basePath", client.getBasePath()});
rows.add(new Comparable[] {"metaPath", client.getMetaPath()});
rows.add(new Comparable[] {"fileSystem", client.getFs().getScheme()});
client.getTableConfig().propsMap().entrySet().forEach(e -> {
rows.add(new Comparable[] {e.getKey(), e.getValue()});
});
return HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows);
}
/**
* Refresh table metadata.
*/
@ShellMethod(key = {"refresh", "metadata refresh", "commits refresh", "cleans refresh", "savepoints refresh"} | 3.68 |
hadoop_GangliaConf_getDmax | /**
* @return the dmax
*/
int getDmax() {
return dmax;
} | 3.68 |
flink_ApiSpecGeneratorUtils_findAdditionalFieldType | /**
* Find whether the class contains dynamic fields that need to be documented.
*
* @param clazz class to check
* @return optional that is non-empty if the class is annotated with {@link
* FlinkJsonSchema.AdditionalFields}
*/
public static Optional<Class<?>> findAdditionalFieldType(Class<?> clazz) {
final FlinkJsonSchema.AdditionalFields annotation =
clazz.getAnnotation(FlinkJsonSchema.AdditionalFields.class);
return Optional.ofNullable(annotation).map(FlinkJsonSchema.AdditionalFields::type);
} | 3.68 |
hmily_EtcdClient_getInstance | /**
* get instance of EtcdClient.
*
* @param config etcdConfig
* @return etcd Client
*/
public static EtcdClient getInstance(final EtcdConfig config) {
Client client = Client.builder().endpoints(config.getServer()).build();
EtcdClient etcdClient = new EtcdClient();
etcdClient.setClient(client);
return etcdClient;
} | 3.68 |
hadoop_AzureNativeFileSystemStore_getDirectorySet | /**
* Take a comma-separated list of directories from a configuration variable
* and transform it to a set of directories.
*/
private Set<String> getDirectorySet(final String configVar)
throws AzureException {
String[] rawDirs = sessionConfiguration.getStrings(configVar, new String[0]);
Set<String> directorySet = new HashSet<String>();
for (String currentDir : rawDirs) {
String myDir;
try {
myDir = verifyAndConvertToStandardFormat(currentDir.trim());
} catch (URISyntaxException ex) {
throw new AzureException(String.format(
"The directory %s specified in the configuration entry %s is not"
+ " a valid URI.",
currentDir, configVar));
}
if (myDir != null) {
directorySet.add(myDir);
}
}
return directorySet;
} | 3.68 |
flink_MapView_contains | /**
* Checks if the map view contains a value for a given key.
*
* @param key The key to check.
* @return True if there exists a value for the given key, false otherwise.
* @throws Exception Thrown if the system cannot access the map.
*/
public boolean contains(K key) throws Exception {
return map.containsKey(key);
} | 3.68 |
hudi_LazyIterableIterator_end | /**
* Called once, after all elements are processed.
*/
protected void end() {} | 3.68 |
hadoop_SelectEventStreamPublisher_response | /**
* The response from the SelectObjectContent call.
* @return the response object
*/
public SelectObjectContentResponse response() {
return response;
} | 3.68 |
zxing_PDF417ScanningDecoder_createDecoderResultFromAmbiguousValues | /**
* This method deals with the fact, that the decoding process doesn't always yield a single most likely value. The
* current error correction implementation doesn't deal with erasures very well, so it's better to provide a value
* for these ambiguous codewords instead of treating it as an erasure. The problem is that we don't know which of
* the ambiguous values to choose. We try decode using the first value, and if that fails, we use another of the
* ambiguous values and try to decode again. This usually only happens on very hard to read and decode barcodes,
* so decoding the normal barcodes is not affected by this.
*
* @param erasureArray contains the indexes of erasures
* @param ambiguousIndexes array with the indexes that have more than one most likely value
* @param ambiguousIndexValues two dimensional array that contains the ambiguous values. The first dimension must
* be the same length as the ambiguousIndexes array
*/
private static DecoderResult createDecoderResultFromAmbiguousValues(int ecLevel,
int[] codewords,
int[] erasureArray,
int[] ambiguousIndexes,
int[][] ambiguousIndexValues)
throws FormatException, ChecksumException {
int[] ambiguousIndexCount = new int[ambiguousIndexes.length];
int tries = 100;
while (tries-- > 0) {
for (int i = 0; i < ambiguousIndexCount.length; i++) {
codewords[ambiguousIndexes[i]] = ambiguousIndexValues[i][ambiguousIndexCount[i]];
}
try {
return decodeCodewords(codewords, ecLevel, erasureArray);
} catch (ChecksumException ignored) {
//
}
if (ambiguousIndexCount.length == 0) {
throw ChecksumException.getChecksumInstance();
}
for (int i = 0; i < ambiguousIndexCount.length; i++) {
if (ambiguousIndexCount[i] < ambiguousIndexValues[i].length - 1) {
ambiguousIndexCount[i]++;
break;
} else {
ambiguousIndexCount[i] = 0;
if (i == ambiguousIndexCount.length - 1) {
throw ChecksumException.getChecksumInstance();
}
}
}
}
throw ChecksumException.getChecksumInstance();
} | 3.68 |
hudi_HoodieRowDataCreation_create | /**
* Creates a {@link AbstractHoodieRowData} instance based on the given configuration.
*/
public static AbstractHoodieRowData create(
String commitTime,
String commitSeqNumber,
String recordKey,
String partitionPath,
String fileName,
RowData row,
boolean withOperation,
boolean withMetaFields) {
return withMetaFields
? new HoodieRowDataWithMetaFields(commitTime, commitSeqNumber, recordKey, partitionPath, fileName, row, withOperation)
: new HoodieRowData(commitTime, commitSeqNumber, recordKey, partitionPath, fileName, row, withOperation);
} | 3.68 |
flink_Execution_setInitialState | /**
* Sets the initial state for the execution. The serialized state is then shipped via the {@link
* TaskDeploymentDescriptor} to the TaskManagers.
*
* @param taskRestore information to restore the state
*/
public void setInitialState(JobManagerTaskRestore taskRestore) {
this.taskRestore = taskRestore;
} | 3.68 |
flink_ResolvedSchema_getColumns | /** Returns all {@link Column}s of this schema. */
public List<Column> getColumns() {
return columns;
} | 3.68 |
hbase_WALSplitUtil_archive | /**
* Moves processed logs to a oldLogDir after successful processing Moves corrupted logs (any log
* that couldn't be successfully parsed to corruptDir (.corrupt) for later investigation
*/
static void archive(final Path wal, final boolean corrupt, final Path oldWALDir,
final FileSystem walFS, final Configuration conf) throws IOException {
Path dir;
Path target;
if (corrupt) {
dir = new Path(CommonFSUtils.getWALRootDir(conf), HConstants.CORRUPT_DIR_NAME);
if (conf.get("hbase.regionserver.hlog.splitlog.corrupt.dir") != null) {
LOG.warn("hbase.regionserver.hlog.splitlog.corrupt.dir is deprecated. Default to {}", dir);
}
target = new Path(dir, wal.getName());
} else {
dir = oldWALDir;
target = AbstractFSWAL.getWALArchivePath(oldWALDir, wal);
}
mkdir(walFS, dir);
moveWAL(walFS, wal, target);
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_doPhysicalRemove | /**
* Removes the node physically, and free all space used by the key and value.
*
* @param node node to remove.
* @param prevNode previous node at the level 0.
* @param nextNode next node at the level 0.
*/
private void doPhysicalRemove(long node, long prevNode, long nextNode) {
// free space used by key and level index
long valuePointer = deleteNodeMeta(node, prevNode, nextNode);
// free space used by value
SkipListUtils.removeAllValues(valuePointer, spaceAllocator);
} | 3.68 |
flink_DataStream_getExecutionEnvironment | /**
* Returns the {@link StreamExecutionEnvironment} that was used to create this {@link
* DataStream}.
*
* @return The Execution Environment
*/
public StreamExecutionEnvironment getExecutionEnvironment() {
return environment;
} | 3.68 |
hbase_BackupAdminImpl_cleanupBackupDir | /**
* Clean up the data at target directory
* @throws IOException if cleaning up the backup directory fails
*/
private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf)
throws IOException {
try {
// clean up the data at target directory
String targetDir = backupInfo.getBackupRootDir();
if (targetDir == null) {
LOG.warn("No target directory specified for " + backupInfo.getBackupId());
return;
}
FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
Path targetDirPath = new Path(BackupUtils.getTableBackupDir(backupInfo.getBackupRootDir(),
backupInfo.getBackupId(), table));
if (outputFs.delete(targetDirPath, true)) {
LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
} else {
LOG.info("No data has been found in " + targetDirPath.toString() + ".");
}
} catch (IOException e1) {
LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table
+ "at " + backupInfo.getBackupRootDir() + " failed due to " + e1.getMessage() + ".");
throw e1;
}
} | 3.68 |
pulsar_SchemasImpl_convertSchemaDataToStringLegacy | // the util function exists for backward compatibility concern
static String convertSchemaDataToStringLegacy(SchemaInfo schemaInfo) throws IOException {
byte[] schemaData = schemaInfo.getSchema();
if (null == schemaInfo.getSchema()) {
return "";
}
if (schemaInfo.getType() == SchemaType.KEY_VALUE) {
return DefaultImplementation.getDefaultImplementation().convertKeyValueSchemaInfoDataToString(
DefaultImplementation.getDefaultImplementation().decodeKeyValueSchemaInfo(schemaInfo));
}
return new String(schemaData, UTF_8);
} | 3.68 |
dubbo_AbstractConfig_convert | /**
* @param parameters the raw parameters
* @param prefix the prefix
* @return the parameters whose raw key will replace "-" to "."
* @revised 2.7.8 "private" to be "protected"
*/
protected static Map<String, String> convert(Map<String, String> parameters, String prefix) {
if (parameters == null || parameters.isEmpty()) {
return new HashMap<>();
}
Map<String, String> result = new HashMap<>();
String pre = (StringUtils.isNotEmpty(prefix) ? prefix + "." : "");
for (Map.Entry<String, String> entry : parameters.entrySet()) {
String key = entry.getKey();
String value = entry.getValue();
result.put(pre + key, value);
// For compatibility, key like "registry-type" will have a duplicate key "registry.type"
if (Arrays.binarySearch(Constants.DOT_COMPATIBLE_KEYS, key) >= 0) {
result.put(pre + key.replace('-', '.'), value);
}
}
return result;
} | 3.68 |
flink_PekkoUtils_getRemoteConfig | /**
* Creates a Pekko config for a remote actor system listening on port on the network interface
* identified by bindAddress.
*
* @param configuration instance containing the user provided configuration values
* @param bindAddress of the network interface to bind on
* @param port to bind to or if 0 then Pekko picks a free port automatically
* @param externalHostname The host name to expect for Pekko messages
* @param externalPort The port to expect for Pekko messages
* @return Flink's Pekko configuration for remote actor systems
*/
private static Config getRemoteConfig(
Configuration configuration,
String bindAddress,
int port,
String externalHostname,
int externalPort) {
final ConfigBuilder builder = new ConfigBuilder();
addBaseRemoteConfig(builder, configuration, port, externalPort);
addHostnameRemoteConfig(builder, bindAddress, externalHostname);
addSslRemoteConfig(builder, configuration);
addRemoteForkJoinExecutorConfig(
builder,
ActorSystemBootstrapTools.getRemoteForkJoinExecutorConfiguration(configuration));
return builder.build();
} | 3.68 |
open-banking-gateway_DatasafeConfigurer_psuDatasafeServices | /**
* PSU/FinTech user Datasafe storage.
* @param psuReadStorePass Datasafe password to open keystore.
* @param serde Serialization/Deserialization handler
* @return PSU/FinTech user Datasafe storage
*/
@Bean
public PsuSecureStorage psuDatasafeServices(
@Value(ENCRYPTION_DATASAFE_READ_KEYSTORE_PREFIX + ".psu} | 3.68 |
hbase_AccessController_checkForReservedTagPresence | // Checks whether incoming cells contain any tag with type as ACL_TAG_TYPE. This tag
// type is reserved and should not be explicitly set by user.
private void checkForReservedTagPresence(User user, Mutation m) throws IOException {
// No need to check if we're not going to throw
if (!authorizationEnabled) {
m.setAttribute(TAG_CHECK_PASSED, TRUE);
return;
}
// Superusers are allowed to store cells unconditionally.
if (Superusers.isSuperUser(user)) {
m.setAttribute(TAG_CHECK_PASSED, TRUE);
return;
}
// We already checked (prePut vs preBatchMutation)
if (m.getAttribute(TAG_CHECK_PASSED) != null) {
return;
}
for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) {
Iterator<Tag> tagsItr = PrivateCellUtil.tagsIterator(cellScanner.current());
while (tagsItr.hasNext()) {
if (tagsItr.next().getType() == PermissionStorage.ACL_TAG_TYPE) {
throw new AccessDeniedException("Mutation contains cell with reserved type tag");
}
}
}
m.setAttribute(TAG_CHECK_PASSED, TRUE);
} | 3.68 |
hmily_HmilyConsistentHashLoadBalance_refresh | /**
* Refresh local invoker.
*
* @param invokers invokers
*/
@Override
public void refresh(final Collection<Invoker<T>> invokers) {
LOGGER.info(config.getSimpleObjectName() + " try to refresh ConsistentHashLoadBalance's invoker cache, size=" + (invokers == null || invokers.isEmpty() ? 0 : invokers.size()));
if (CollectionUtils.isEmpty(invokers)) {
sortedInvokersCache = null;
conHashInvokersCache = null;
return;
}
List<Invoker<T>> sortedInvokersTmp = new ArrayList<>(invokers);
sortedInvokersTmp.sort(comparator);
sortedInvokersCache = sortedInvokersTmp;
ConcurrentSkipListMap<Long, Invoker<T>> concurrentSkipListMap = new ConcurrentSkipListMap<Long, Invoker<T>>();
LoadBalanceHelper.buildConsistentHashCircle(sortedInvokersTmp, config).forEach(concurrentSkipListMap::put);
conHashInvokersCache = concurrentSkipListMap;
LOGGER.info(config.getSimpleObjectName() + " refresh ConsistentHashLoadBalance's invoker cache done, conHashInvokersCache size="
+ (conHashInvokersCache == null || conHashInvokersCache.isEmpty() ? 0 : conHashInvokersCache.size())
+ ", sortedInvokersCache size=" + (sortedInvokersCache == null || sortedInvokersCache.isEmpty() ? 0 : sortedInvokersCache.size()));
} | 3.68 |
graphhopper_GraphHopper_close | /**
* Releases all associated resources like memory or files. But it does not remove them. To
* remove the files created in graphhopperLocation you have to call clean().
*/
public void close() {
if (baseGraph != null)
baseGraph.close();
if (properties != null)
properties.close();
chGraphs.values().forEach(RoutingCHGraph::close);
landmarks.values().forEach(LandmarkStorage::close);
if (locationIndex != null)
locationIndex.close();
try {
lockFactory.forceRemove(fileLockName, true);
} catch (Exception ex) {
// silently fail e.g. on Windows where we cannot remove an unreleased native lock
}
} | 3.68 |
graphhopper_VectorTile_setFeatures | /**
* <pre>
* The actual features in this tile.
* </pre>
*
* <code>repeated .vector_tile.Tile.Feature features = 2;</code>
*/
public Builder setFeatures(
int index, vector_tile.VectorTile.Tile.Feature.Builder builderForValue) {
if (featuresBuilder_ == null) {
ensureFeaturesIsMutable();
features_.set(index, builderForValue.build());
onChanged();
} else {
featuresBuilder_.setMessage(index, builderForValue.build());
}
return this;
} | 3.68 |
hadoop_AbfsConfiguration_getEnum | /**
* Returns the account-specific enum value if it exists, then
* looks for an account-agnostic value.
* @param name Account-agnostic configuration key
* @param defaultValue Value returned if none is configured
* @param <T> Enum type
* @return enum value if one exists, else null
*/
public <T extends Enum<T>> T getEnum(String name, T defaultValue) {
return rawConfig.getEnum(accountConf(name),
rawConfig.getEnum(name, defaultValue));
} | 3.68 |
open-banking-gateway_FintechUserAuthSessionTuple_toDatasafePathWithoutParent | /**
* Computes current tuples' Datasafe storage path.
* @return Datasafe path corresponding to current tuple
*/
public String toDatasafePathWithoutParent() {
return this.authSessionId.toString();
} | 3.68 |
framework_VAbstractCalendarPanel_onTabOut | /**
* True should be returned if the panel will not be used after this event.
*
* @param event
* dom event
* @return {@code true} if the panel will not be used after this event,
* {@code false} otherwise
*/
protected boolean onTabOut(DomEvent<?> event) {
if (focusOutListener != null) {
return focusOutListener.onFocusOut(event);
}
return false;
} | 3.68 |
hbase_HRegion_replayWALEntry | /**
* Replay remote wal entry sent by primary replica.
* <p/>
* Should only call this method on secondary replicas.
*/
void replayWALEntry(WALEntry entry, CellScanner cells) throws IOException {
long timeout = -1L;
Optional<RpcCall> call = RpcServer.getCurrentCall();
if (call.isPresent()) {
long deadline = call.get().getDeadline();
if (deadline < Long.MAX_VALUE) {
timeout = deadline - EnvironmentEdgeManager.currentTime();
if (timeout <= 0) {
throw new TimeoutIOException("Timeout while replaying edits for " + getRegionInfo());
}
}
}
if (timeout > 0) {
try {
if (!replayLock.tryLock(timeout, TimeUnit.MILLISECONDS)) {
throw new TimeoutIOException(
"Timeout while waiting for lock when replaying edits for " + getRegionInfo());
}
} catch (InterruptedException e) {
throw throwOnInterrupt(e);
}
} else {
replayLock.lock();
}
try {
int count = entry.getAssociatedCellCount();
long sequenceId = entry.getKey().getLogSequenceNumber();
if (lastReplayedSequenceId >= sequenceId) {
// we have already replayed this edit, skip
// remember to advance the CellScanner, as we may have multiple WALEntries, we may still
// need apply later WALEntries
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
}
}
return;
}
Map<byte[], List<Cell>> family2Cells = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
}
Cell cell = cells.current();
if (WALEdit.isMetaEditFamily(cell)) {
// If there is meta edit, i.e, we have done flush/compaction/open, then we need to apply
// the previous cells first, and then replay the special meta edit. The meta edit is like
// a barrier, We need to keep the order. For example, the flush marker will contain a
// flush sequence number, which makes us possible to drop memstore content, but if we
// apply some edits which have greater sequence id first, then we can not drop the
// memstore content when replaying the flush marker, which is not good as we could run out
// of memory.
// And usually, a meta edit will have a special WALEntry for it, so this is just a safe
// guard logic to make sure we do not break things in the worst case.
if (!family2Cells.isEmpty()) {
replayWALBatchMutate(family2Cells);
family2Cells.clear();
}
replayWALMetaEdit(cell);
} else {
family2Cells.computeIfAbsent(CellUtil.cloneFamily(cell), k -> new ArrayList<>())
.add(cell);
}
}
// do not forget to apply the remaining cells
if (!family2Cells.isEmpty()) {
replayWALBatchMutate(family2Cells);
}
mvcc.advanceTo(sequenceId);
lastReplayedSequenceId = sequenceId;
} finally {
replayLock.unlock();
}
} | 3.68 |
hbase_HFileLink_getReferencedRegionName | /**
* Get the Region name of the referenced link
* @param fileName HFileLink file name
* @return the name of the referenced Region
*/
public static String getReferencedRegionName(final String fileName) {
Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(fileName);
if (!m.matches()) {
throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!");
}
return (m.group(3));
} | 3.68 |
hadoop_AMRMTokenSecretManager_getMasterKey | // If nextMasterKey is not Null, then return nextMasterKey
// otherwise return currentMasterKey
@VisibleForTesting
public MasterKeyData getMasterKey() {
this.readLock.lock();
try {
return nextMasterKey == null ? currentMasterKey : nextMasterKey;
} finally {
this.readLock.unlock();
}
} | 3.68 |
morf_AbstractSqlDialectTest_verifyPostInsertStatementsInsertingUnderAutonumLimit | /**
* Verify on the expected SQL statements to be run after insert for the test database table.
* @param sqlScriptExecutor The script executor to use
* @param connection The connection to use
*/
@SuppressWarnings("unused")
protected void verifyPostInsertStatementsInsertingUnderAutonumLimit(SqlScriptExecutor sqlScriptExecutor,Connection connection) {
verifyNoMoreInteractions(sqlScriptExecutor);
} | 3.68 |
graphhopper_LocationIndex_query | /**
* This method explores the LocationIndex with the specified Visitor. It visits only the stored edges (and only once)
* and limited by the queryBBox. Also (a few) more edges slightly outside of queryBBox could be
* returned that you can avoid via doing an explicit BBox check of the coordinates.
*/
default void query(BBox queryBBox, Visitor function) {
query(createBBoxTileFilter(queryBBox), function);
} | 3.68 |
hbase_HFileWriterImpl_newBlock | /**
* Ready a new block for writing.
*/
protected void newBlock() throws IOException {
// This is where the next block begins.
blockWriter.startWriting(BlockType.DATA);
firstCellInBlock = null;
if (lastCell != null) {
lastCellOfPreviousBlock = lastCell;
}
} | 3.68 |
rocketmq-connect_Serdes_Short | /**
* A serde for nullable {@code Short} type.
*/
static public Serde<Short> Short() {
return new ShortSerde();
} | 3.68 |
querydsl_StringExpression_concat | /**
* Create a {@code concat(this, str)} expression
*
* <p>Get the concatenation of this and str</p>
*
* @param str string to append
* @return this + str
*/
public StringExpression concat(String str) {
return append(str);
} | 3.68 |
flink_ChangelogKeyedStateBackend_initMaterialization | /**
* Initialize state materialization so that materialized data can be persisted durably and
* included into the checkpoint.
*
* <p>This method is not thread safe. It should be called either under a lock or through task
* mailbox executor.
*
* @return a tuple of - future snapshot result from the underlying state backend - a {@link
* SequenceNumber} identifying the latest change in the changelog
*/
@Override
public Optional<MaterializationRunnable> initMaterialization() throws Exception {
if (lastConfirmedMaterializationId < materializedId - 1
&& lastFailedMaterializationId < materializedId - 1) {
// SharedStateRegistry potentially requires that the checkpoint's dependency on the
// shared file be continuous, it will be broken if we trigger a new materialization
// before the previous one has either confirmed or failed. See discussion in
// https://github.com/apache/flink/pull/22669#issuecomment-1593370772 .
LOG.info(
"materialization:{} not confirmed or failed or cancelled, skip trigger new one.",
materializedId - 1);
return Optional.empty();
}
SequenceNumber upTo = stateChangelogWriter.nextSequenceNumber();
SequenceNumber lastMaterializedTo = changelogSnapshotState.lastMaterializedTo();
LOG.info(
"Initialize Materialization. Current changelog writers last append to sequence number {}",
upTo);
if (upTo.compareTo(lastMaterializedTo) > 0) {
LOG.info("Starting materialization from {} : {}", lastMaterializedTo, upTo);
// This ID is not needed for materialization; But since we are re-using the
// streamFactory that is designed for state backend snapshot, which requires unique
// checkpoint ID. A faked materialized Id is provided here.
long materializationID = materializedId++;
MaterializationRunnable materializationRunnable =
new MaterializationRunnable(
keyedStateBackend.snapshot(
materializationID,
System.currentTimeMillis(),
// TODO: implement its own streamFactory.
streamFactory,
CHECKPOINT_OPTIONS),
materializationID,
upTo);
// log metadata after materialization is triggered
changelogStateFactory.resetAllWritingMetaFlags();
return Optional.of(materializationRunnable);
} else {
LOG.debug(
"Skip materialization, last materialized to {} : last log to {}",
lastMaterializedTo,
upTo);
return Optional.empty();
}
} | 3.68 |
morf_SchemaUtils_toUpperCase | /**
* Convert all the strings in a list to upper case.
*
* @param listOfStrings A list of strings
* @return A new list of strings, with each string converted to upper case
*/
public static List<String> toUpperCase(List<String> listOfStrings) {
return listOfStrings.stream().map(String::toUpperCase).collect(Collectors.toList());
} | 3.68 |
framework_FlyweightCell_setColSpan | /**
* Set the colspan attribute for the element of the cell.
*
* @param numberOfCells
* spanned cell count, must be at least 1
*/
public void setColSpan(final int numberOfCells) {
if (numberOfCells < 1) {
throw new IllegalArgumentException(
"Number of cells should be more than 0");
}
/*-
* This will default to 1 if unset, as per DOM specifications:
* http://www.w3.org/TR/html5/tabular-data.html#attributes-common-to-td-and-th-elements
*/
final int prevColSpan = getElement().getPropertyInt(COLSPAN_ATTR);
if (numberOfCells == 1 && prevColSpan == 1) {
return;
}
getElement().setPropertyInt(COLSPAN_ATTR, numberOfCells);
adjustCellWidthForSpan(numberOfCells);
hideOrRevealAdjacentCellElements(numberOfCells, prevColSpan);
currentIterator.setSkipNext(numberOfCells - 1);
} | 3.68 |
hbase_DoubleArrayCost_applyCostsChange | /**
* We do not want to introduce a getCosts method to let upper layer get the cost array directly,
* so here we introduce this method to take a {@link Consumer} as parameter, where we will pass
* the actual cost array in, so you can change the element of the cost array in the
* {@link Consumer} implementation.
* <p/>
* Usually, in prepare method, you need to fill all the elements of the cost array, while in
* regionMoved method, you just need to update the element for the effect region servers.
*/
void applyCostsChange(Consumer<double[]> consumer) {
consumer.accept(costs);
costsChanged = true;
} | 3.68 |
hudi_HoodieRecordPayload_getMetadata | /**
* This method can be used to extract some metadata from HoodieRecordPayload. The metadata is passed to {@code WriteStatus.markSuccess()} and
* {@code WriteStatus.markFailure()} in order to compute some aggregate metrics using the metadata in the context of a write success or failure.
* @return the metadata in the form of Map<String, String> if any.
*/
@PublicAPIMethod(maturity = ApiMaturityLevel.STABLE)
default Option<Map<String, String>> getMetadata() {
return Option.empty();
} | 3.68 |
dubbo_NacosRegistry_notifySubscriber | /**
* Notify the Enabled {@link Instance instances} to subscriber.
*
* @param url {@link URL}
* @param listener {@link NotifyListener}
* @param instances all {@link Instance instances}
*/
private void notifySubscriber(
URL url, String serviceName, NacosAggregateListener listener, Collection<Instance> instances) {
List<Instance> enabledInstances = new LinkedList<>(instances);
if (enabledInstances.size() > 0) {
// Instances
filterEnabledInstances(enabledInstances);
}
List<URL> aggregatedUrls =
toUrlWithEmpty(url, listener.saveAndAggregateAllInstances(serviceName, enabledInstances));
NacosRegistry.this.notify(url, listener.getNotifyListener(), aggregatedUrls);
} | 3.68 |
zilla_ManyToOneRingBuffer_consumerPosition | /**
* {@inheritDoc}
*/
public long consumerPosition()
{
return buffer.getLongVolatile(headPositionIndex);
} | 3.68 |
flink_FileSystemCheckpointStorage_getCheckpointPath | /**
* Gets the base directory where all the checkpoints are stored. The job-specific checkpoint
* directory is created inside this directory.
*
* @return The base directory for checkpoints.
*/
@Nonnull
public Path getCheckpointPath() {
// we know that this can never be null by the way of constructor checks
//noinspection ConstantConditions
return location.getBaseCheckpointPath();
} | 3.68 |
flink_LongMaximum_add | /** Consider using {@link #add(long)} instead for primitive long values */
@Override
public void add(Long value) {
this.max = Math.max(this.max, value);
} | 3.68 |
flink_RocksDBProperty_getConfigKey | /**
* @return key for enabling metric using {@link org.apache.flink.configuration.Configuration}.
*/
public String getConfigKey() {
return String.format(CONFIG_KEY_FORMAT, property);
} | 3.68 |
framework_ColorPickerTestUI_getStream | /* Must implement this method that returns the resource as a stream. */
@Override
public InputStream getStream() {
/* Create an image and draw something on it. */
BufferedImage image = new BufferedImage(270, 270,
BufferedImage.TYPE_INT_RGB);
Graphics drawable = image.getGraphics();
drawable.setColor(bgColor);
drawable.fillRect(0, 0, 270, 270);
drawable.setColor(fgColor);
drawable.fillOval(25, 25, 220, 220);
drawable.setColor(java.awt.Color.blue);
drawable.drawRect(0, 0, 269, 269);
drawable.setColor(java.awt.Color.black);
drawable.drawString("r=" + String.valueOf(fgColor.getRed()) + ",g="
+ String.valueOf(fgColor.getGreen()) + ",b="
+ String.valueOf(fgColor.getBlue()), 50, 100);
drawable.drawString("r=" + String.valueOf(bgColor.getRed()) + ",g="
+ String.valueOf(bgColor.getGreen()) + ",b="
+ String.valueOf(bgColor.getBlue()), 5, 15);
try {
/* Write the image to a buffer. */
imagebuffer = new ByteArrayOutputStream();
ImageIO.write(image, "png", imagebuffer);
/* Return a stream from the buffer. */
return new ByteArrayInputStream(imagebuffer.toByteArray());
} catch (IOException e) {
return null;
}
} | 3.68 |
hbase_KeyOnlyFilter_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() {
FilterProtos.KeyOnlyFilter.Builder builder = FilterProtos.KeyOnlyFilter.newBuilder();
builder.setLenAsVal(this.lenAsVal);
return builder.build().toByteArray();
} | 3.68 |
flink_InternalWindowProcessFunction_isWindowLate | /**
* Returns {@code true} if the watermark is after the end timestamp plus the allowed lateness of
* the given window.
*/
protected boolean isWindowLate(W window) {
return (windowAssigner.isEventTime()
&& (toEpochMillsForTimer(cleanupTime(window), ctx.getShiftTimeZone())
<= ctx.currentWatermark()));
} | 3.68 |
hudi_BaseTableMetadata_getAllPartitionPaths | /**
* Return the list of partitions in the dataset.
* <p>
* If the Metadata Table is enabled, the listing is retrieved from the stored metadata. Otherwise, the list of
* partitions is retrieved directly from the underlying {@code FileSystem}.
* <p>
* On any errors retrieving the listing from the metadata, defaults to using the file system listings.
*/
@Override
public List<String> getAllPartitionPaths() throws IOException {
ValidationUtils.checkArgument(isMetadataTableInitialized);
try {
return fetchAllPartitionPaths();
} catch (Exception e) {
throw new HoodieMetadataException("Failed to retrieve list of partition from metadata", e);
}
} | 3.68 |
flink_UserDefinedFunctionHelper_validateImplementationMethods | /**
* Validates the implementation methods such as {@link #SCALAR_EVAL} or {@link
* #AGGREGATE_ACCUMULATE} depending on the {@link UserDefinedFunction} subclass.
*
* <p>This method must be kept in sync with the code generation requirements and the individual
* docs of each function.
*/
private static void validateImplementationMethods(
Class<? extends UserDefinedFunction> functionClass) {
if (ScalarFunction.class.isAssignableFrom(functionClass)) {
validateImplementationMethod(functionClass, false, false, SCALAR_EVAL);
} else if (TableFunction.class.isAssignableFrom(functionClass)) {
validateImplementationMethod(functionClass, true, false, TABLE_EVAL);
} else if (AsyncTableFunction.class.isAssignableFrom(functionClass)) {
validateImplementationMethod(functionClass, true, false, ASYNC_TABLE_EVAL);
} else if (AggregateFunction.class.isAssignableFrom(functionClass)) {
validateImplementationMethod(functionClass, true, false, AGGREGATE_ACCUMULATE);
validateImplementationMethod(functionClass, true, true, AGGREGATE_RETRACT);
validateImplementationMethod(functionClass, true, true, AGGREGATE_MERGE);
} else if (TableAggregateFunction.class.isAssignableFrom(functionClass)) {
validateImplementationMethod(functionClass, true, false, TABLE_AGGREGATE_ACCUMULATE);
validateImplementationMethod(functionClass, true, true, TABLE_AGGREGATE_RETRACT);
validateImplementationMethod(functionClass, true, true, TABLE_AGGREGATE_MERGE);
validateImplementationMethod(
functionClass, true, false, TABLE_AGGREGATE_EMIT, TABLE_AGGREGATE_EMIT_RETRACT);
}
} | 3.68 |
flink_ProcessPythonEnvironmentManager_getBootLog | /** Returns the boot log of the Python Environment. */
public String getBootLog() throws Exception {
File bootLogFile =
new File(resource.baseDirectory + File.separator + "flink-python-udf-boot.log");
String msg = "Failed to create stage bundle factory!";
if (bootLogFile.exists()) {
byte[] output = Files.readAllBytes(bootLogFile.toPath());
msg += String.format(" %s", new String(output, Charset.defaultCharset()));
}
return msg;
} | 3.68 |
flink_TypeInferenceExtractor_forAggregateFunction | /** Extracts a type inference from a {@link AggregateFunction}. */
public static TypeInference forAggregateFunction(
DataTypeFactory typeFactory, Class<? extends AggregateFunction<?, ?>> function) {
final FunctionMappingExtractor mappingExtractor =
new FunctionMappingExtractor(
typeFactory,
function,
UserDefinedFunctionHelper.AGGREGATE_ACCUMULATE,
createParameterSignatureExtraction(1),
createGenericResultExtraction(AggregateFunction.class, 1, false),
createGenericResultExtraction(AggregateFunction.class, 0, true),
createParameterWithAccumulatorVerification());
return extractTypeInference(mappingExtractor);
} | 3.68 |
hudi_InternalSchemaUtils_pruneInternalSchemaByID | /**
* Create project internalSchema.
* support nested project.
*
* @param schema a internal schema.
* @param fieldIds project col field_ids.
* @return a project internalSchema.
*/
public static InternalSchema pruneInternalSchemaByID(InternalSchema schema, List<Integer> fieldIds, List<Integer> topParentFieldIds) {
Types.RecordType recordType = (Types.RecordType)pruneType(schema.getRecord(), fieldIds);
// reorder top parent fields, since the recordType.fields() produced by pruneType maybe out of order.
List<Types.Field> newFields = new ArrayList<>();
if (topParentFieldIds != null && !topParentFieldIds.isEmpty()) {
for (int id : topParentFieldIds) {
Types.Field f = recordType.field(id);
if (f != null) {
newFields.add(f);
} else {
throw new HoodieSchemaException(String.format("cannot find pruned id %s in currentSchema %s", id, schema.toString()));
}
}
}
return new InternalSchema(newFields.isEmpty() ? recordType : Types.RecordType.get(newFields));
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSqlForMathOperations11 | /**
* @return expected SQL for math operation 11
*/
protected String expectedSqlForMathOperations11() {
return "(a / 100 + 1) / b + 100";
} | 3.68 |
dubbo_ServiceDeployer_getExtensions | /**
* get extensions by type
*
* @param extensionClass
* @param <T>
* @return
*/
// TODO add javax.annotation.Priority sort
public <T> List<T> getExtensions(Class<T> extensionClass) {
ArrayList<T> exts = new ArrayList<>();
if (extensions.isEmpty()) {
return exts;
}
for (Object extension : extensions) {
if (extensionClass.isAssignableFrom(extension.getClass())) {
exts.add((T) extension);
}
}
return exts;
} | 3.68 |
hudi_AvroInternalSchemaConverter_fixNullOrdering | /**
* Converting from avro -> internal schema -> avro
* causes null to always be first in unions.
* if we compare a schema that has not been converted to internal schema
* at any stage, the difference in ordering can cause issues. To resolve this,
* we order null to be first for any avro schema that enters into hudi.
* AvroSchemaUtils.isProjectionOfInternal uses index based comparison for unions.
* Spark and flink don't support complex unions so this would not be an issue
* but for the metadata table HoodieMetadata.avsc uses a trick where we have a bunch of
* different types wrapped in record for col stats.
*
* @param Schema avro schema.
* @return an avro Schema where null is the first.
*/
public static Schema fixNullOrdering(Schema schema) {
if (schema.getType() == Schema.Type.NULL) {
return schema;
}
return convert(convert(schema), schema.getFullName());
} | 3.68 |
hbase_HRegion_getRegionWALFileSystem | /** Returns the WAL {@link HRegionFileSystem} used by this region */
HRegionWALFileSystem getRegionWALFileSystem() throws IOException {
return new HRegionWALFileSystem(conf, getWalFileSystem(),
CommonFSUtils.getWALTableDir(conf, htableDescriptor.getTableName()), fs.getRegionInfo());
} | 3.68 |
flink_PrioritizedDeque_size | /** Returns the number of priority and non-priority elements. */
public int size() {
return deque.size();
} | 3.68 |
pulsar_ManagedLedgerConfig_setMetadataEnsembleSize | /**
* @param metadataEnsembleSize
* the metadataEnsembleSize to set
*/
public ManagedLedgerConfig setMetadataEnsembleSize(int metadataEnsembleSize) {
this.metadataEnsembleSize = metadataEnsembleSize;
return this;
} | 3.68 |
morf_AbstractSqlDialectTest_provideCustomHint | /**
* This method can be overridden in specific dialects to test providing custom hints in each dialect
* @return a mock CustomHint or an overridden, more specific, CustomHint
*/
@SuppressWarnings("deprecation")
protected CustomHint provideCustomHint() {
return mock(CustomHint.class);
} | 3.68 |
pulsar_AuthenticationDataKeyStoreTls_hasDataForTls | /*
* TLS
*/
@Override
public boolean hasDataForTls() {
return true;
} | 3.68 |
flink_KvStateInfo_duplicate | /**
* Creates a deep copy of the current {@link KvStateInfo} by duplicating all the included
* serializers.
*
* <p>This method assumes correct implementation of the {@link TypeSerializer#duplicate()}
* method of the included serializers.
*/
public KvStateInfo<K, N, V> duplicate() {
final TypeSerializer<K> dupKeySerializer = keySerializer.duplicate();
final TypeSerializer<N> dupNamespaceSerializer = namespaceSerializer.duplicate();
final TypeSerializer<V> dupSVSerializer = stateValueSerializer.duplicate();
if (dupKeySerializer == keySerializer
&& dupNamespaceSerializer == namespaceSerializer
&& dupSVSerializer == stateValueSerializer) {
return this;
}
return new KvStateInfo<>(dupKeySerializer, dupNamespaceSerializer, dupSVSerializer);
} | 3.68 |
morf_SQLEntityNameValidationService_isReservedWord | /**
* Method to establish if a given string is an SQL Reserved Word
*
* @param word the string to establish if its a SQL Reserved Word
* @return true if its a SQL Reserved Word otherwise false.
*/
public boolean isReservedWord(String word) {
return schemaValidator.isSQLReservedWord(word);
} | 3.68 |
morf_AbstractSqlDialectTest_windowFunctions | /**
* The window functions to test
*/
private FluentIterable<AliasedField> windowFunctions(){
return FluentIterable.from(Lists.newArrayList(
windowFunction(count()).build(),
windowFunction(count()).partitionBy(field("field1")).build(),
windowFunction(sum(field("field1"))).partitionBy(field("field2"),field("field3")).orderBy(field("field4")).build(),
windowFunction(max(field("field1"))).partitionBy(field("field2"),field("field3")).orderBy(field("field4").asc()).build(),
windowFunction(min(field("field1"))).partitionBy(field("field2"),field("field3")).orderBy(field("field4").desc(),field("field5")).build(),
windowFunction(min(field("field1"))).orderBy(field("field2")).build(),
windowFunction(rowNumber()).partitionBy(field("field2"),field("field3")).orderBy(field("field4")).build(),
windowFunction(rowNumber()).orderBy(field("field2")).build(),
select( windowFunction(min(field("field1"))).orderBy(field("field2")).build().as("window")).from(tableRef("srcTable")).asField()
));
} | 3.68 |
hbase_KeyValue_getBuffer | /**
* To be used only in tests where the Cells are clearly assumed to be of type KeyValue and that we
* need access to the backing array to do some test case related assertions.
* @return The byte array backing this KeyValue.
*/
public byte[] getBuffer() {
return this.bytes;
} | 3.68 |
framework_ScrollbarBundle_setScrollPosByDelta | /**
* Modifies the scroll position of this scrollbar by a number of pixels.
* <p>
* <em>Note:</em> Even though {@code double} values are used, they are
* currently only used as integers as large {@code int} (or small but fast
* {@code long}). This means, all values are truncated to zero decimal
* places.
*
* @param delta
* the delta in pixels to change the scroll position by
*/
public final void setScrollPosByDelta(double delta) {
if (delta != 0) {
setScrollPos(getScrollPos() + delta);
}
} | 3.68 |
hadoop_BlockStorageMovementNeeded_decrementPendingWorkCount | /**
* Decrement the pending work count for directory one track info is
* completed.
*/
public synchronized void decrementPendingWorkCount() {
this.pendingWorkCount--;
} | 3.68 |
hadoop_TypedBytesInput_readList | /**
* Reads the list following a <code>Type.LIST</code> code.
* @return the obtained list
* @throws IOException
*/
@SuppressWarnings("unchecked")
public List readList() throws IOException {
List list = new ArrayList();
Object obj = read();
while (obj != null) {
list.add(obj);
obj = read();
}
return list;
} | 3.68 |
hadoop_ActiveAuditManagerS3A_modifyHttpResponse | /**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public SdkHttpResponse modifyHttpResponse(Context.ModifyHttpResponse context,
ExecutionAttributes executionAttributes) {
return span.modifyHttpResponse(context, executionAttributes);
} | 3.68 |
hudi_HoodieMergedLogRecordReader_scan | /**
* Scans delta-log files processing blocks
*/
public final void scan() {
scan(false);
} | 3.68 |
hbase_ExecutorService_getName | /**
* Returns the executor name inferred from the type and the servername on which this is running.
*/
public String getName() {
return getExecutorType().getExecutorName(servername);
} | 3.68 |
hbase_MasterObserver_postCreateNamespace | /**
* Called after the createNamespace operation has been requested.
* @param ctx the environment to interact with the framework and master
* @param ns the NamespaceDescriptor for the table
*/
default void postCreateNamespace(final ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns) throws IOException {
} | 3.68 |
hbase_HeterogeneousRegionCountCostFunction_prepare | /**
* Called once per LB invocation to give the cost function to initialize it's state, and perform
* any costly calculation.
*/
@Override
void prepare(final BalancerClusterState cluster) {
this.cluster = cluster;
this.loadRules();
} | 3.68 |
flink_BinarySegmentUtils_getBytes | /** Maybe not copied, if want copy, please use copyTo. */
public static byte[] getBytes(MemorySegment[] segments, int baseOffset, int sizeInBytes) {
// avoid copy if `base` is `byte[]`
if (segments.length == 1) {
byte[] heapMemory = segments[0].getHeapMemory();
if (baseOffset == 0 && heapMemory != null && heapMemory.length == sizeInBytes) {
return heapMemory;
} else {
byte[] bytes = new byte[sizeInBytes];
segments[0].get(baseOffset, bytes, 0, sizeInBytes);
return bytes;
}
} else {
byte[] bytes = new byte[sizeInBytes];
copyMultiSegmentsToBytes(segments, baseOffset, bytes, 0, sizeInBytes);
return bytes;
}
} | 3.68 |
hadoop_LocalityMulticastAMRMProxyPolicy_splitIndividualAny | /**
* Return a projection of this ANY {@link ResourceRequest} that belongs to
* this sub-cluster. This is done based on the "count" of the containers that
* require locality in each sublcuster (if any) or based on the "weights" and
* headroom.
*/
private void splitIndividualAny(ResourceRequest originalResourceRequest,
Set<SubClusterId> targetSubclusters,
AllocationBookkeeper allocationBookkeeper) throws YarnException {
long allocationId = originalResourceRequest.getAllocationRequestId();
int numContainer = originalResourceRequest.getNumContainers();
// If the ANY request has 0 containers to begin with we must forward it to
// any RM we have previously contacted (this might be the user way
// to cancel a previous request).
if (numContainer == 0) {
for (SubClusterId targetId : headroom.keySet()) {
allocationBookkeeper.addAnyRR(targetId, originalResourceRequest);
}
return;
}
// List preserves iteration order
List<SubClusterId> targetSCs = new ArrayList<>(targetSubclusters);
// Compute the distribution weights
ArrayList<Float> weightsList = new ArrayList<>();
for (SubClusterId targetId : targetSCs) {
// If ANY is associated with localized asks, split based on their ratio
if (allocationBookkeeper.getSubClustersForId(allocationId) != null) {
weightsList.add(getLocalityBasedWeighting(allocationId, targetId,
allocationBookkeeper));
} else {
// split ANY based on load and policy configuration
float headroomWeighting =
getHeadroomWeighting(targetId, allocationBookkeeper);
float policyWeighting =
getPolicyConfigWeighting(targetId, allocationBookkeeper);
// hrAlpha controls how much headroom influencing decision
weightsList
.add(hrAlpha * headroomWeighting + (1 - hrAlpha) * policyWeighting);
}
}
// Compute the integer container counts for each sub-cluster
ArrayList<Integer> containerNums =
computeIntegerAssignment(numContainer, weightsList);
int i = 0;
for (SubClusterId targetId : targetSCs) {
// if the calculated request is non-empty add it to the answer
if (containerNums.get(i) > 0) {
ResourceRequest out = ResourceRequest.clone(originalResourceRequest);
out.setNumContainers(containerNums.get(i));
if (ResourceRequest.isAnyLocation(out.getResourceName())) {
allocationBookkeeper.addAnyRR(targetId, out);
} else {
allocationBookkeeper.addRackRR(targetId, out);
}
}
i++;
}
} | 3.68 |
dubbo_AbstractStateRouter_setNextRouter | /**
* Next Router node state is maintained by AbstractStateRouter and this method is not allow to override.
* If a specified router wants to control the behaviour of continue route or not,
* please override {@link AbstractStateRouter#supportContinueRoute()}
*/
@Override
public final void setNextRouter(StateRouter<T> nextRouter) {
this.nextRouter = nextRouter;
} | 3.68 |
framework_MenuBarConnector_updateFromUIDL | /**
* This method must be implemented to update the client-side component from
* UIDL data received from server.
*
* This method is called when the page is loaded for the first time, and
* every time UI changes in the component are received from the server.
*/
@Override
public void updateFromUIDL(UIDL uidl, ApplicationConnection client) {
if (!isRealUpdate(uidl)) {
return;
}
VMenuBar widget = getWidget();
widget.htmlContentAllowed = uidl
.hasAttribute(MenuBarConstants.HTML_CONTENT_ALLOWED);
if (BrowserInfo.get().isAndroid() || BrowserInfo.get().isIOS()) {
// disable the auto-open on hover on devices that don't support
// hover.
// fixes https://github.com/vaadin/framework/issues/5873
widget.openRootOnHover = false;
} else {
widget.openRootOnHover = uidl.getBooleanAttribute(
MenuBarConstants.OPEN_ROOT_MENU_ON_HOWER);
}
widget.enabled = isEnabled();
// For future connections
widget.client = client;
widget.uidlId = uidl.getId();
Timer timer = new Timer() {
@Override
public void run() {
// Empty the menu every time it receives new information
if (!widget.getItems().isEmpty()) {
widget.clearItems();
}
UIDL options = uidl.getChildUIDL(0);
if (null != getState()
&& !ComponentStateUtil.isUndefinedWidth(getState())) {
UIDL moreItemUIDL = options.getChildUIDL(0);
StringBuilder itemHTML = new StringBuilder();
if (moreItemUIDL.hasAttribute("icon")) {
Icon icon = client.getIcon(
moreItemUIDL.getStringAttribute("icon"));
if (icon != null) {
itemHTML.append(icon.getElement().getString());
}
}
String moreItemText = moreItemUIDL
.getStringAttribute("text");
if ("".equals(moreItemText)) {
moreItemText = "►";
}
itemHTML.append(moreItemText);
widget.moreItem = GWT.create(VMenuBar.CustomMenuItem.class);
widget.moreItem.setHTML(itemHTML.toString());
widget.moreItem.setCommand(VMenuBar.emptyCommand);
widget.collapsedRootItems = new VMenuBar(true, widget);
widget.moreItem.setSubMenu(widget.collapsedRootItems);
widget.moreItem.addStyleName(
widget.getStylePrimaryName() + "-more-menuitem");
}
UIDL uidlItems = uidl.getChildUIDL(1);
Iterator<Object> itr = uidlItems.iterator();
Stack<Iterator<Object>> iteratorStack = new Stack<>();
Stack<VMenuBar> menuStack = new Stack<>();
VMenuBar currentMenu = widget;
while (itr.hasNext()) {
UIDL item = (UIDL) itr.next();
VMenuBar.CustomMenuItem currentItem = null;
final int itemId = item.getIntAttribute("id");
boolean itemHasCommand = item.hasAttribute("command");
boolean itemIsCheckable = item
.hasAttribute(MenuBarConstants.ATTRIBUTE_CHECKED);
String itemHTML = widget.buildItemHTML(item);
Command cmd = null;
if (!item.hasAttribute("separator")) {
if (itemHasCommand || itemIsCheckable) {
// Construct a command that fires onMenuClick(int)
// with the
// item's id-number
cmd = () -> widget.hostReference
.onMenuClick(itemId);
}
}
currentItem = currentMenu.addItem(itemHTML, cmd);
currentItem.setId("" + itemId);
currentItem.updateFromUIDL(item, client);
String domId = getState().id;
if (domId != null && !domId.isEmpty()) {
currentItem.getElement().setId(domId + "-" + itemId);
}
if (item.getChildCount() > 0) {
menuStack.push(currentMenu);
iteratorStack.push(itr);
itr = item.iterator();
currentMenu = new VMenuBar(true, currentMenu);
client.getVTooltip()
.connectHandlersToWidget(currentMenu);
// this is the top-level style that also propagates to
// items -
// any item specific styles are set above in
// currentItem.updateFromUIDL(item, client)
if (ComponentStateUtil.hasStyles(getState())) {
for (String style : getState().styles) {
currentMenu.addStyleDependentName(style);
}
}
currentItem.setSubMenu(currentMenu);
}
while (!itr.hasNext() && !iteratorStack.empty()) {
boolean hasCheckableItem = false;
for (VMenuBar.CustomMenuItem menuItem : currentMenu
.getItems()) {
hasCheckableItem = hasCheckableItem
|| menuItem.isCheckable();
}
if (hasCheckableItem) {
currentMenu.addStyleDependentName("check-column");
} else {
currentMenu
.removeStyleDependentName("check-column");
}
itr = iteratorStack.pop();
currentMenu = menuStack.pop();
}
}
}
};
getLayoutManager().setNeedsHorizontalLayout(MenuBarConnector.this);
if (widget.mouseDownPressed) {
timer.schedule(getState().delayMs);
widget.mouseDownPressed = false;
} else {
timer.run();
}
} | 3.68 |
framework_ScrollbarBundle_recalculateMaxScrollPos | /**
* Calculates and sets maximum scroll position based on the current scroll
* size and the scrollbar's length.
*/
public void recalculateMaxScrollPos() {
double scrollSize = getScrollSize();
double offsetSize = getOffsetSize();
maxScrollPos = Math.max(0, scrollSize - offsetSize);
// make sure that the correct max scroll position is maintained.
setScrollPos(scrollPos);
} | 3.68 |
hbase_CellFlatMap_pollFirstEntry | // The following 2 methods (pollFirstEntry, pollLastEntry) are unsupported because these are
// updating methods.
@Override
public Entry<Cell, Cell> pollFirstEntry() {
throw new UnsupportedOperationException();
} | 3.68 |
flink_FromClasspathEntryClassInformationProvider_getJobClassName | /**
* Returns the job class name if it could be derived from the specified classpath or was
* explicitly specified.
*
* @return The job class name or an empty {@code Optional} if none was specified and it couldn't
* be derived from the classpath.
*/
@Override
public Optional<String> getJobClassName() {
return Optional.of(jobClassName);
} | 3.68 |
flink_UnresolvedDataType_toDataType | /**
* Converts this instance to a resolved {@link DataType} possibly enriched with additional
* nullability and conversion class information.
*/
public DataType toDataType(DataTypeFactory factory) {
DataType resolvedDataType = resolutionFactory.apply(factory);
if (isNullable == Boolean.TRUE) {
resolvedDataType = resolvedDataType.nullable();
} else if (isNullable == Boolean.FALSE) {
resolvedDataType = resolvedDataType.notNull();
}
if (conversionClass != null) {
resolvedDataType = resolvedDataType.bridgedTo(conversionClass);
}
return resolvedDataType;
} | 3.68 |
framework_AriaHelper_setVisibleForAssistiveDevicesOnly | /**
* Allows to move an element out of the visible area of the browser window.
*
* This makes it possible to have additional information for an assistive
* device, that is not in the way for visual users.
*
* @param element
* Element to move out of sight
* @param assistiveOnly
* {@code true} when element should only be visible for assistive
* devices, {@code false} to make the element visible for all
*/
public static void setVisibleForAssistiveDevicesOnly(Element element,
boolean assistiveOnly) {
if (assistiveOnly) {
element.addClassName(ASSISTIVE_DEVICE_ONLY_STYLE);
} else {
element.removeClassName(ASSISTIVE_DEVICE_ONLY_STYLE);
}
} | 3.68 |
hadoop_NMClientAsync_onContainerRestart | /**
* Callback for container restart.
*
* @param containerId the Id of the container to restart.
*/
public void onContainerRestart(ContainerId containerId) {} | 3.68 |
dubbo_RouterChain_setInvokers | /**
* Notify router chain of the initial addresses from registry at the first time.
* Notify whenever addresses in registry change.
*/
public synchronized void setInvokers(BitList<Invoker<T>> invokers, Runnable switchAction) {
try {
// Lock to prevent directory continue list
lock.writeLock().lock();
// Switch to back up chain. Will update main chain first.
currentChain = backupChain;
} finally {
// Release lock to minimize the impact for each newly created invocations as much as possible.
// Should not release lock until main chain update finished. Or this may cause long hang.
lock.writeLock().unlock();
}
// Refresh main chain.
// No one can request to use main chain. `currentChain` is backup chain. `route` method cannot access main
// chain.
try {
// Lock main chain to wait all invocation end
// To wait until no one is using main chain.
mainChain.getLock().writeLock().lock();
// refresh
mainChain.setInvokers(invokers);
} catch (Throwable t) {
logger.error(LoggerCodeConstants.INTERNAL_ERROR, "", "", "Error occurred when refreshing router chain.", t);
throw t;
} finally {
// Unlock main chain
mainChain.getLock().writeLock().unlock();
}
// Set the reference of newly invokers to temp variable.
// Reason: The next step will switch the invokers reference in directory, so we should check the
// `availableInvokers`
// argument when `route`. If the current invocation use newly invokers, we should use main chain to
// route, and
// this can prevent use newly invokers to route backup chain, which can only route origin invokers now.
notifyingInvokers.set(invokers);
// Switch the invokers reference in directory.
// Cannot switch before update main chain or after backup chain update success. Or that will cause state
// inconsistent.
switchAction.run();
try {
// Lock to prevent directory continue list
// The invokers reference in directory now should be the newly one and should always use the newly one once
// lock released.
lock.writeLock().lock();
// Switch to main chain. Will update backup chain later.
currentChain = mainChain;
// Clean up temp variable.
// `availableInvokers` check is useless now, because `route` method will no longer receive any
// `availableInvokers` related
// with the origin invokers. The getter of invokers reference in directory is locked now, and will return
// newly invokers
// once lock released.
notifyingInvokers.set(null);
} finally {
// Release lock to minimize the impact for each newly created invocations as much as possible.
// Will use newly invokers and main chain now.
lock.writeLock().unlock();
}
// Refresh main chain.
// No one can request to use main chain. `currentChain` is main chain. `route` method cannot access backup
// chain.
try {
// Lock main chain to wait all invocation end
backupChain.getLock().writeLock().lock();
// refresh
backupChain.setInvokers(invokers);
} catch (Throwable t) {
logger.error(LoggerCodeConstants.INTERNAL_ERROR, "", "", "Error occurred when refreshing router chain.", t);
throw t;
} finally {
// Unlock backup chain
backupChain.getLock().writeLock().unlock();
}
} | 3.68 |
hbase_CatalogReplicaLoadBalanceSelectorFactory_createSelector | /**
* Create a CatalogReplicaLoadBalanceReplicaSelector based on input config.
* @param replicaSelectorClass Selector classname.
* @param tableName System table name.
* @param conn {@link AsyncConnectionImpl}
* @return {@link CatalogReplicaLoadBalanceSelector}
*/
public static CatalogReplicaLoadBalanceSelector createSelector(String replicaSelectorClass,
TableName tableName, AsyncConnectionImpl conn, IntSupplier getReplicaCount) {
return ReflectionUtils.instantiateWithCustomCtor(replicaSelectorClass,
new Class[] { TableName.class, AsyncConnectionImpl.class, IntSupplier.class },
new Object[] { tableName, conn, getReplicaCount });
} | 3.68 |
hadoop_SQLDelegationTokenSecretManager_storeToken | /**
* Persists a TokenIdentifier and its corresponding TokenInformation into
* the SQL database. The TokenIdentifier is expected to be unique and any
* duplicate token attempts will result in an IOException.
* @param ident TokenIdentifier to persist.
* @param tokenInfo DelegationTokenInformation associated with the TokenIdentifier.
*/
@Override
protected void storeToken(TokenIdent ident,
DelegationTokenInformation tokenInfo) throws IOException {
try (ByteArrayOutputStream bos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(bos)) {
tokenInfo.write(dos);
// Add token to SQL database
insertToken(ident.getSequenceNumber(), ident.getBytes(), bos.toByteArray());
// Add token to local cache
super.storeToken(ident, tokenInfo);
} catch (SQLException e) {
throw new IOException("Failed to store token in SQL secret manager", e);
}
} | 3.68 |
pulsar_AdditionalServletUtils_getAdditionalServletDefinition | /**
* Retrieve the additional servlet definition from the provided nar package.
*
* @param narPath the path to the additional servlet NAR package
* @return the additional servlet definition
* @throws IOException when fail to load the additional servlet or get the definition
*/
public AdditionalServletDefinition getAdditionalServletDefinition(
String narPath, String narExtractionDirectory) throws IOException {
try (NarClassLoader ncl = NarClassLoaderBuilder.builder()
.narFile(new File(narPath))
.extractionDirectory(narExtractionDirectory)
.build();) {
return getAdditionalServletDefinition(ncl);
}
} | 3.68 |
flink_HyperLogLogPlusPlus_distance | /**
* Use square of the difference between the current estimate and the estimate at the given index
* as distance metric.
*/
private double distance(double e, double[] estimates, int i) {
double diff = e - estimates[i];
return diff * diff;
} | 3.68 |
hbase_ZKWatcher_filterMetaReplicaNodes | /**
* @param nodes Input list of znodes
* @return Filtered list of znodes from nodes that belong to meta replica(s).
*/
private List<String> filterMetaReplicaNodes(List<String> nodes) {
if (nodes == null || nodes.isEmpty()) {
return new ArrayList<>();
}
List<String> metaReplicaNodes = new ArrayList<>(2);
String pattern = conf.get(ZNodePaths.META_ZNODE_PREFIX_CONF_KEY, ZNodePaths.META_ZNODE_PREFIX);
for (String child : nodes) {
if (child.startsWith(pattern)) {
metaReplicaNodes.add(child);
}
}
return metaReplicaNodes;
} | 3.68 |
flink_AbstractHeapState_getStateTable | /** This should only be used for testing. */
@VisibleForTesting
public StateTable<K, N, SV> getStateTable() {
return stateTable;
} | 3.68 |
hbase_RegionScannerImpl_resetFilters | /**
* Reset both the filter and the old filter.
* @throws IOException in case a filter raises an I/O exception.
*/
protected final void resetFilters() throws IOException {
if (filter != null) {
filter.reset();
}
} | 3.68 |
hadoop_AclEntryType_toStringStable | /**
* Returns a string representation guaranteed to be stable across versions to
* satisfy backward compatibility requirements, such as for shell command
* output or serialization.
*
* @return stable, backward compatible string representation
*/
public String toStringStable() {
// The base implementation uses the enum value names, which are public API
// and therefore stable.
return super.toString();
} | 3.68 |
hadoop_ServiceRecord_addExternalEndpoint | /**
* Add an external endpoint
* @param endpoint endpoint to set
*/
public void addExternalEndpoint(Endpoint endpoint) {
Preconditions.checkArgument(endpoint != null);
endpoint.validate();
external.add(endpoint);
} | 3.68 |
pulsar_ManagedCursorContainer_cursorUpdated | /**
* Signal that a cursor position has been updated and that the container must re-order the cursor heap
* tracking the slowest reader.
* Only those cursors are tracked and can be updated which were added to the container with the
* {@link #add(ManagedCursor, Position)} method that specified the initial position in the position
* parameter.
*
* @param cursor the cursor to update the position for
* @param newPosition the updated position for the cursor
* @return a pair of positions, representing the previous slowest reader and the new slowest reader (after the
* update).
*/
public Pair<PositionImpl, PositionImpl> cursorUpdated(ManagedCursor cursor, Position newPosition) {
requireNonNull(cursor);
long stamp = rwLock.writeLock();
try {
Item item = cursors.get(cursor.getName());
if (item == null || item.idx == -1) {
return null;
}
PositionImpl previousSlowestConsumer = heap.get(0).position;
item.position = (PositionImpl) newPosition;
if (heap.size() == 1) {
return Pair.of(previousSlowestConsumer, item.position);
}
// When the cursor moves forward, we need to push it toward the
// bottom of the tree and push it up if a reset was done
if (item.idx == 0 || getParent(item).position.compareTo(item.position) <= 0) {
siftDown(item);
} else {
siftUp(item);
}
PositionImpl newSlowestConsumer = heap.get(0).position;
return Pair.of(previousSlowestConsumer, newSlowestConsumer);
} finally {
rwLock.unlockWrite(stamp);
}
} | 3.68 |
flink_ShuffleMaster_registerJob | /**
* Registers the target job together with the corresponding {@link JobShuffleContext} to this
* shuffle master. Through the shuffle context, one can obtain some basic information like job
* ID, job configuration. It enables ShuffleMaster to notify JobMaster about lost result
* partitions, so that JobMaster can identify and reproduce unavailable partitions earlier.
*
* @param context the corresponding shuffle context of the target job.
*/
default void registerJob(JobShuffleContext context) {} | 3.68 |
morf_DatabaseMetaDataProvider_dataTypeFromSqlType | /**
* Converts a given SQL data type to a {@link DataType}.
*
* @param typeCode JDBC data type.
* @param typeName JDBC type name.
* @param width JDBC column size.
* @return Morf data type.
*/
protected DataType dataTypeFromSqlType(int typeCode, String typeName, int width) {
switch (typeCode) {
case Types.TINYINT:
case Types.SMALLINT:
case Types.INTEGER:
return DataType.INTEGER;
case Types.BIGINT:
return DataType.BIG_INTEGER;
case Types.FLOAT:
case Types.REAL:
case Types.DOUBLE:
case Types.NUMERIC:
case Types.DECIMAL:
return DataType.DECIMAL;
case Types.CHAR:
case Types.VARCHAR:
case Types.LONGVARCHAR:
case Types.LONGNVARCHAR:
case Types.NVARCHAR:
return DataType.STRING;
case Types.BOOLEAN:
case Types.BIT:
return DataType.BOOLEAN;
case Types.DATE:
return DataType.DATE;
case Types.BLOB:
case Types.BINARY:
case Types.VARBINARY:
case Types.LONGVARBINARY:
return DataType.BLOB;
case Types.NCLOB:
case Types.CLOB:
return DataType.CLOB;
default:
throw new UnexpectedDataTypeException("Unsupported data type [" + typeName + "] (type " + typeCode + " width " + width + ")");
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.