name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hmily_GrpcHmilyContext_removeAfterInvoke | /**
* remove hmily conext after invoke.
*
*/
public static void removeAfterInvoke() {
GrpcHmilyContext.getHmilyClass().remove();
} | 3.68 |
framework_AbstractComponentContainer_removeComponentAttachListener | /* documented in interface */
@Override
@Deprecated
public void removeComponentAttachListener(
ComponentAttachListener listener) {
removeListener(ComponentAttachEvent.class, listener,
ComponentAttachListener.attachMethod);
} | 3.68 |
hudi_Transient_lazy | /**
* Creates instance of {@link Transient} by lazily executing provided {@code initializer},
* to instantiate value of type {@link T}. Same initializer will be used to re-instantiate
* the value after original one being dropped during serialization/deserialization cycle
*/
public static <T> Transient<T> lazy(SerializableSupplier<T> initializer) {
return new Transient<>(initializer);
} | 3.68 |
hadoop_LocalTempDir_tempPath | /**
* Get a temporary path.
* @param conf configuration to use when creating the allocator
* @param prefix filename prefix
* @param size file size, or -1 if not known
* @return the temp path.
* @throws IOException IO failure
*/
public static Path tempPath(Configuration conf, String prefix, long size)
throws IOException {
return getAllocator(conf, BUFFER_DIR)
.getLocalPathForWrite(prefix, size, conf);
} | 3.68 |
framework_GridElement_save | /**
* Saves the fields of this editor.
* <p>
* <em>Note:</em> that this closes the editor making this element
* useless.
*/
public void save() {
findElement(By.className("v-grid-editor-save")).click();
} | 3.68 |
hadoop_S3AReadOpContext_getReadahead | /**
* Get the readahead for this operation.
* @return a value {@literal >=} 0
*/
public long getReadahead() {
return readahead;
} | 3.68 |
querydsl_GenericExporter_setNameSuffix | /**
* Set the name suffix
*
* @param suffix
*/
public void setNameSuffix(String suffix) {
codegenModule.bind(CodegenModule.SUFFIX, suffix);
} | 3.68 |
flink_ExtractionUtils_hasInvokableConstructor | /**
* Checks for an invokable constructor matching the given arguments.
*
* @see #isInvokable(Executable, Class[])
*/
public static boolean hasInvokableConstructor(Class<?> clazz, Class<?>... classes) {
for (Constructor<?> constructor : clazz.getDeclaredConstructors()) {
if (isInvokable(constructor, classes)) {
return true;
}
}
return false;
} | 3.68 |
hadoop_PendingSet_getCommits | /**
* @return commit list.
*/
public List<SinglePendingCommit> getCommits() {
return commits;
} | 3.68 |
rocketmq-connect_ServiceProviderUtil_getConfigManagementService | /**
* Get config management service by class name
*
* @param configManagementServiceClazz
* @return
*/
@NotNull
public static ConfigManagementService getConfigManagementService(String configManagementServiceClazz) {
if (StringUtils.isEmpty(configManagementServiceClazz)) {
configManagementServiceClazz = LocalConfigManagementServiceImpl.class.getName();
}
ConfigManagementService configManagementService = null;
ServiceLoader<ConfigManagementService> configManagementServiceServiceLoader = ServiceLoader.load(ConfigManagementService.class);
Iterator<ConfigManagementService> configManagementServiceIterator = configManagementServiceServiceLoader.iterator();
while (configManagementServiceIterator.hasNext()) {
ConfigManagementService currentConfigManagementService = configManagementServiceIterator.next();
if (currentConfigManagementService.getClass().getName().equals(configManagementServiceClazz)) {
configManagementService = currentConfigManagementService;
break;
}
}
if (null == configManagementService) {
throw new ConnectException("ConfigManagementService class " + configManagementServiceClazz + " not " +
"found");
}
return configManagementService;
} | 3.68 |
hudi_FlinkOptions_flatOptions | /**
* Collects all the config options, the 'properties.' prefix would be removed if the option key starts with it.
*/
public static Configuration flatOptions(Configuration conf) {
final Map<String, String> propsMap = new HashMap<>();
conf.toMap().forEach((key, value) -> {
final String subKey = key.startsWith(PROPERTIES_PREFIX)
? key.substring((PROPERTIES_PREFIX).length())
: key;
propsMap.put(subKey, value);
});
return fromMap(propsMap);
} | 3.68 |
hbase_Mutation_checkRow | /**
* @param row Row to check
* @throws IllegalArgumentException Thrown if <code>row</code> is empty or null or >
* {@link HConstants#MAX_ROW_LENGTH}
* @return <code>row</code>
*/
static byte[] checkRow(final byte[] row, final int offset, final int length) {
if (row == null) {
throw new IllegalArgumentException("Row buffer is null");
}
if (length == 0) {
throw new IllegalArgumentException("Row length is 0");
}
if (length > HConstants.MAX_ROW_LENGTH) {
throw new IllegalArgumentException(
"Row length " + length + " is > " + HConstants.MAX_ROW_LENGTH);
}
return row;
} | 3.68 |
flink_TableDescriptor_forConnector | /**
* Creates a new {@link Builder} for a table using the given connector.
*
* @param connector The factory identifier for the connector.
*/
public static Builder forConnector(String connector) {
Preconditions.checkNotNull(connector, "Table descriptors require a connector identifier.");
final Builder descriptorBuilder = new Builder();
descriptorBuilder.option(FactoryUtil.CONNECTOR, connector);
return descriptorBuilder;
} | 3.68 |
hadoop_RouterDelegationTokenSecretManager_getTokenByRouterStoreToken | /**
* Get RMDelegationTokenIdentifier according to RouterStoreToken.
*
* @param identifier RMDelegationTokenIdentifier
* @return RMDelegationTokenIdentifier
* @throws YarnException An internal conversion error occurred when getting the Token
* @throws IOException IO exception occurred
*/
public RMDelegationTokenIdentifier getTokenByRouterStoreToken(
RMDelegationTokenIdentifier identifier) throws YarnException, IOException {
try {
RouterRMTokenResponse response = federationFacade.getTokenByRouterStoreToken(identifier);
YARNDelegationTokenIdentifier responseIdentifier =
response.getRouterStoreToken().getTokenIdentifier();
return (RMDelegationTokenIdentifier) responseIdentifier;
} catch (Exception ex) {
throw new YarnException(ex);
}
} | 3.68 |
hadoop_SolverPreprocessor_aggregateSkylines | /**
* Aggregate all job's {@link ResourceSkyline}s in the one run of recurring
* pipeline, and return the aggregated {@link ResourceSkyline}s in different
* runs.
*
* @param jobHistory the history {@link ResourceSkyline} of the recurring
* pipeline job.
* @param minJobRuns the minimum number of job runs required to run the
* solver.
* @return the aggregated {@link ResourceSkyline}s in different runs.
* @throws InvalidInputException if: (1) job submission time parsing fails;
* (2) jobHistory has less job runs than the minimum requirement;
*/
public final List<ResourceSkyline> aggregateSkylines(
final Map<RecurrenceId, List<ResourceSkyline>> jobHistory,
final int minJobRuns) throws InvalidInputException {
List<ResourceSkyline> resourceSkylines = new ArrayList<ResourceSkyline>();
for (Map.Entry<RecurrenceId, List<ResourceSkyline>> entry : jobHistory
.entrySet()) {
// TODO: identify different jobs within the same pipeline
// right now, we do prediction at the granularity of pipeline, i.e., we
// will merge the
// resource skylines of jobs within the same pipeline into one aggregated
// resource skyline
ResourceSkyline skylineAgg = null;
skylineAgg = mergeSkyline(entry.getValue());
resourceSkylines.add(skylineAgg);
}
int numJobs = resourceSkylines.size();
if (numJobs < minJobRuns) {
LOGGER.error(
"Solver requires job resource skyline history for at least {} runs,"
+ " but it only receives history info for {} runs.",
minJobRuns, numJobs);
throw new InvalidInputException("Job ResourceSkyline history",
"containing less job runs" + " than " + minJobRuns);
}
return resourceSkylines;
} | 3.68 |
hmily_DubboHmilyInventoryApplication_main | /**
* main.
*
* @param args args
*/
public static void main(final String[] args) {
SpringApplication springApplication = new SpringApplication(DubboHmilyInventoryApplication.class);
springApplication.setWebApplicationType(WebApplicationType.NONE);
springApplication.run(args);
} | 3.68 |
hbase_DirectMemoryUtils_destroyDirectByteBuffer | /**
* DirectByteBuffers are garbage collected by using a phantom reference and a reference queue.
* Every once a while, the JVM checks the reference queue and cleans the DirectByteBuffers.
* However, as this doesn't happen immediately after discarding all references to a
* DirectByteBuffer, it's easy to OutOfMemoryError yourself using DirectByteBuffers. This function
* explicitly calls the Cleaner method of a DirectByteBuffer. The DirectByteBuffer that will be
* "cleaned". Utilizes reflection.
*/
public static void destroyDirectByteBuffer(ByteBuffer toBeDestroyed)
throws IllegalArgumentException, IllegalAccessException, InvocationTargetException,
SecurityException, NoSuchMethodException {
Preconditions.checkArgument(toBeDestroyed.isDirect(), "toBeDestroyed isn't direct!");
Method cleanerMethod = toBeDestroyed.getClass().getMethod("cleaner");
cleanerMethod.setAccessible(true);
Object cleaner = cleanerMethod.invoke(toBeDestroyed);
Method cleanMethod = cleaner.getClass().getMethod("clean");
cleanMethod.setAccessible(true);
cleanMethod.invoke(cleaner);
} | 3.68 |
framework_MenuBarsWithNesting_createFirstMenuBar | /*
* Returns a menu bar with three levels of nesting but no icons.
*/
private MenuBar createFirstMenuBar() {
MenuBar menuBar = new MenuBar();
MenuItem file = menuBar.addItem("File", null);
file.addItem("Open", selectionCommand);
file.addItem("Save", selectionCommand);
file.addItem("Save As..", selectionCommand);
file.addSeparator();
MenuItem export = file.addItem("Export..", null);
export.addItem("As PDF...", selectionCommand);
export.addItem("As Doc...", selectionCommand);
file.addSeparator();
file.addItem("Exit", selectionCommand);
MenuItem edit = menuBar.addItem("Edit", null);
edit.addItem("Copy", selectionCommand);
edit.addItem("Cut", selectionCommand);
edit.addItem("Paste", selectionCommand);
menuBar.addItem("Help", selectionCommand);
MenuItem disabled = menuBar.addItem("Disabled", null);
disabled.setEnabled(false);
disabled.addItem("Can't reach", selectionCommand);
return menuBar;
} | 3.68 |
hudi_MarkerDirState_getPendingMarkerCreationRequests | /**
* @param shouldClear Should clear the internal request list or not.
* @return futures of pending marker creation requests.
*/
public List<MarkerCreationFuture> getPendingMarkerCreationRequests(boolean shouldClear) {
List<MarkerCreationFuture> pendingFutures;
synchronized (markerCreationFutures) {
if (markerCreationFutures.isEmpty()) {
return new ArrayList<>();
}
pendingFutures = new ArrayList<>(markerCreationFutures);
if (shouldClear) {
markerCreationFutures.clear();
}
}
return pendingFutures;
} | 3.68 |
hbase_TableInputFormat_addColumn | /**
* Parses a combined family and qualifier and adds either both or just the family in case there is
* no qualifier. This assumes the older colon divided notation, e.g. "family:qualifier".
* @param scan The Scan to update.
* @param familyAndQualifier family and qualifier
* @throws IllegalArgumentException When familyAndQualifier is invalid.
*/
private static void addColumn(Scan scan, byte[] familyAndQualifier) {
byte[][] fq = CellUtil.parseColumn(familyAndQualifier);
if (fq.length == 1) {
scan.addFamily(fq[0]);
} else if (fq.length == 2) {
scan.addColumn(fq[0], fq[1]);
} else {
throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
}
} | 3.68 |
hadoop_XAttrStorage_readINodeXAttrs | /**
* Reads the existing extended attributes of an inode.
* <p>
* Must be called while holding the FSDirectory read lock.
*
* @param inodeAttr INodeAttributes to read.
* @return {@code XAttr} list.
*/
public static List<XAttr> readINodeXAttrs(INodeAttributes inodeAttr) {
XAttrFeature f = inodeAttr.getXAttrFeature();
return f == null ? new ArrayList<XAttr>(0) : f.getXAttrs();
} | 3.68 |
hbase_Scan_getTimeRange | /** Returns TimeRange */
public TimeRange getTimeRange() {
return this.tr;
} | 3.68 |
hudi_HoodieFlinkCompactor_start | /**
* Main method to start compaction service.
*/
public void start(boolean serviceMode) throws Exception {
if (serviceMode) {
compactionScheduleService.start(null);
try {
compactionScheduleService.waitForShutdown();
} catch (Exception e) {
throw new HoodieException(e.getMessage(), e);
} finally {
LOG.info("Shut down hoodie flink compactor");
}
} else {
LOG.info("Hoodie Flink Compactor running only single round");
try {
compactionScheduleService.compact();
} catch (ApplicationExecutionException aee) {
if (aee.getMessage().contains(NO_EXECUTE_KEYWORD)) {
LOG.info("Compaction is not performed");
} else {
throw aee;
}
} catch (Exception e) {
LOG.error("Got error running delta sync once. Shutting down", e);
throw e;
} finally {
LOG.info("Shut down hoodie flink compactor");
}
}
} | 3.68 |
flink_ExecutionFailureHandler_getFailureHandlingResult | /**
* Return result of failure handling. Can be a set of task vertices to restart and a delay of
* the restarting. Or that the failure is not recoverable and the reason for it.
*
* @param failedExecution is the failed execution
* @param cause of the task failure
* @param timestamp of the task failure
* @return result of the failure handling
*/
public FailureHandlingResult getFailureHandlingResult(
Execution failedExecution, Throwable cause, long timestamp) {
return handleFailure(
failedExecution,
cause,
timestamp,
failoverStrategy.getTasksNeedingRestart(failedExecution.getVertex().getID(), cause),
false);
} | 3.68 |
hibernate-validator_GetDeclaredMethodHandle_andMakeAccessible | /**
* Before using this method on arbitrary classes, you need to check the {@code HibernateValidatorPermission.ACCESS_PRIVATE_MEMBERS}
* permission against the security manager, if the calling class exposes the handle to clients.
*/
public static GetDeclaredMethodHandle andMakeAccessible(Lookup lookup, Class<?> clazz, String methodName, Class<?>... parameterTypes) {
return new GetDeclaredMethodHandle( lookup, clazz, methodName, true, parameterTypes );
} | 3.68 |
druid_IbatisUtils_getId | /**
* 通过反射的方式得到id,能够兼容2.3.0和2.3.4
*
* @return
*/
protected static String getId(Object statement) {
try {
if (methodGetId == null) {
Class<?> clazz = statement.getClass();
methodGetId = clazz.getMethod("getId");
}
Object returnValue = methodGetId.invoke(statement);
if (returnValue == null) {
return null;
}
return returnValue.toString();
} catch (Exception ex) {
LOG.error("createIdError", ex);
return null;
}
} | 3.68 |
hbase_Operation_toString | /**
* Produces a string representation of this Operation. It defaults to a JSON representation, but
* falls back to a string representation of the fingerprint and details in the case of a JSON
* encoding failure.
*/
@Override
public String toString() {
return toString(DEFAULT_MAX_COLS);
} | 3.68 |
hadoop_IncrementalBlockReportManager_putMissing | /**
* Put the all blocks to this IBR unless the block already exists.
* @param rdbis list of blocks to add.
* @return the number of missing blocks added.
*/
int putMissing(ReceivedDeletedBlockInfo[] rdbis) {
int count = 0;
for (ReceivedDeletedBlockInfo rdbi : rdbis) {
if (!blocks.containsKey(rdbi.getBlock())) {
put(rdbi);
count++;
}
}
return count;
} | 3.68 |
hadoop_WriteOperationHelper_initiateMultiPartUpload | /**
* {@inheritDoc}
*/
@Retries.RetryTranslated
public String initiateMultiPartUpload(
final String destKey,
final PutObjectOptions options)
throws IOException {
LOG.debug("Initiating Multipart upload to {}", destKey);
try (AuditSpan span = activateAuditSpan()) {
return retry("initiate MultiPartUpload", destKey, true,
() -> {
final CreateMultipartUploadRequest.Builder initiateMPURequestBuilder =
getRequestFactory().newMultipartUploadRequestBuilder(
destKey, options);
return owner.initiateMultipartUpload(initiateMPURequestBuilder.build())
.uploadId();
});
}
} | 3.68 |
flink_FineGrainedSlotManager_checkResourceRequirementsWithDelay | /**
* Depending on the implementation of {@link ResourceAllocationStrategy}, checking resource
* requirements and potentially making a re-allocation can be heavy. In order to cover more
* changes with each check, thus reduce the frequency of unnecessary re-allocations, the checks
* are performed with a slight delay.
*/
private void checkResourceRequirementsWithDelay() {
if (requirementsCheckDelay.toMillis() <= 0) {
checkResourceRequirements();
} else {
if (requirementsCheckFuture == null || requirementsCheckFuture.isDone()) {
requirementsCheckFuture = new CompletableFuture<>();
scheduledExecutor.schedule(
() ->
mainThreadExecutor.execute(
() -> {
checkResourceRequirements();
Preconditions.checkNotNull(requirementsCheckFuture)
.complete(null);
}),
requirementsCheckDelay.toMillis(),
TimeUnit.MILLISECONDS);
}
}
} | 3.68 |
hadoop_MagicCommitTracker_aboutToComplete | /**
* Complete operation: generate the final commit data, put it.
* @param uploadId Upload ID
* @param parts list of parts
* @param bytesWritten bytes written
* @param iostatistics nullable IO statistics
* @return false, indicating that the commit must fail.
* @throws IOException any IO problem.
* @throws IllegalArgumentException bad argument
*/
@Override
public boolean aboutToComplete(String uploadId,
List<CompletedPart> parts,
long bytesWritten,
final IOStatistics iostatistics)
throws IOException {
Preconditions.checkArgument(StringUtils.isNotEmpty(uploadId),
"empty/null upload ID: "+ uploadId);
Preconditions.checkArgument(parts != null,
"No uploaded parts list");
Preconditions.checkArgument(!parts.isEmpty(),
"No uploaded parts to save");
// put a 0-byte file with the name of the original under-magic path
// Add the final file length as a header
// this is done before the task commit, so its duration can be
// included in the statistics
Map<String, String> headers = new HashMap<>();
headers.put(X_HEADER_MAGIC_MARKER, Long.toString(bytesWritten));
PutObjectRequest originalDestPut = writer.createPutObjectRequest(
originalDestKey,
0,
new PutObjectOptions(true, null, headers), false);
upload(originalDestPut, new ByteArrayInputStream(EMPTY));
// build the commit summary
SinglePendingCommit commitData = new SinglePendingCommit();
commitData.touch(System.currentTimeMillis());
commitData.setDestinationKey(getDestKey());
commitData.setBucket(bucket);
commitData.setUri(path.toUri().toString());
commitData.setUploadId(uploadId);
commitData.setText("");
commitData.setLength(bytesWritten);
commitData.bindCommitData(parts);
commitData.setIOStatistics(
new IOStatisticsSnapshot(iostatistics));
byte[] bytes = commitData.toBytes(SinglePendingCommit.serializer());
LOG.info("Uncommitted data pending to file {};"
+ " commit metadata for {} parts in {}. size: {} byte(s)",
path.toUri(), parts.size(), pendingPartKey, bytesWritten);
LOG.debug("Closed MPU to {}, saved commit information to {}; data=:\n{}",
path, pendingPartKey, commitData);
PutObjectRequest put = writer.createPutObjectRequest(
pendingPartKey,
bytes.length, null, false);
upload(put, new ByteArrayInputStream(bytes));
return false;
} | 3.68 |
shardingsphere-elasticjob_JobConfiguration_overwrite | /**
* Set whether overwrite local configuration to registry center when job startup.
*
* <p>
* If overwrite enabled, every startup will use local configuration.
* </p>
*
* @param overwrite whether overwrite local configuration to registry center when job startup
* @return ElasticJob configuration builder
*/
public Builder overwrite(final boolean overwrite) {
this.overwrite = overwrite;
return this;
} | 3.68 |
flink_CoGroupOperator_sortFirstGroup | /**
* Sorts Pojo or {@link org.apache.flink.api.java.tuple.Tuple} elements within a
* group in the first input on the specified field in the specified {@link Order}.
*
* <p>Groups can be sorted by multiple fields by chaining {@link
* #sortFirstGroup(String, Order)} calls.
*
* @param fieldExpression The expression to the field on which the group is to be
* sorted.
* @param order The Order in which the specified Tuple field is sorted.
* @return A SortedGrouping with specified order of group element.
* @see Order
*/
public CoGroupOperatorWithoutFunction sortFirstGroup(
String fieldExpression, Order order) {
ExpressionKeys<I1> ek = new ExpressionKeys<>(fieldExpression, input1.getType());
int[] groupOrderKeys = ek.computeLogicalKeyPositions();
for (int key : groupOrderKeys) {
this.groupSortKeyOrderFirst.add(new ImmutablePair<>(key, order));
}
return this;
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_prepRecords | /**
* Tag each record with the location in the given partition.
* The record is tagged with respective file slice's location based on its record key.
*/
protected HoodieData<HoodieRecord> prepRecords(Map<MetadataPartitionType,
HoodieData<HoodieRecord>> partitionRecordsMap) {
// The result set
HoodieData<HoodieRecord> allPartitionRecords = engineContext.emptyHoodieData();
HoodieTableFileSystemView fsView = HoodieTableMetadataUtil.getFileSystemView(metadataMetaClient);
for (Map.Entry<MetadataPartitionType, HoodieData<HoodieRecord>> entry : partitionRecordsMap.entrySet()) {
final String partitionName = entry.getKey().getPartitionPath();
HoodieData<HoodieRecord> records = entry.getValue();
List<FileSlice> fileSlices =
HoodieTableMetadataUtil.getPartitionLatestFileSlices(metadataMetaClient, Option.ofNullable(fsView), partitionName);
if (fileSlices.isEmpty()) {
// scheduling of INDEX only initializes the file group and not add commit
// so if there are no committed file slices, look for inflight slices
fileSlices = getPartitionLatestFileSlicesIncludingInflight(metadataMetaClient, Option.ofNullable(fsView), partitionName);
}
final int fileGroupCount = fileSlices.size();
ValidationUtils.checkArgument(fileGroupCount > 0, "FileGroup count for MDT partition " + partitionName + " should be >0");
List<FileSlice> finalFileSlices = fileSlices;
HoodieData<HoodieRecord> rddSinglePartitionRecords = records.map(r -> {
FileSlice slice = finalFileSlices.get(HoodieTableMetadataUtil.mapRecordKeyToFileGroupIndex(r.getRecordKey(),
fileGroupCount));
r.unseal();
r.setCurrentLocation(new HoodieRecordLocation(slice.getBaseInstantTime(), slice.getFileId()));
r.seal();
return r;
});
allPartitionRecords = allPartitionRecords.union(rddSinglePartitionRecords);
}
return allPartitionRecords;
} | 3.68 |
flink_RocksDBMemoryControllerUtils_validateArenaBlockSize | /**
* RocksDB starts flushing the active memtable constantly in the case when the arena block size
* is greater than mutable limit (as calculated in {@link #calculateRocksDBMutableLimit(long)}).
*
* <p>This happens because in such a case the check <a
* href="https://github.com/dataArtisans/frocksdb/blob/958f191d3f7276ae59b270f9db8390034d549ee0/include/rocksdb/write_buffer_manager.h#L47">
* here</a> is always true.
*
* <p>This method checks that arena block size is smaller than mutable limit.
*
* @param arenaBlockSize Arena block size
* @param mutableLimit mutable limit
* @return whether arena block size is sensible
*/
@VisibleForTesting
static boolean validateArenaBlockSize(long arenaBlockSize, long mutableLimit) {
return arenaBlockSize <= mutableLimit;
} | 3.68 |
benchmark_DistributedWorkersEnsemble_getNumberOfProducerWorkers | /*
* For driver-jms extra consumers are required. If there is an odd number of workers then allocate the extra
* to consumption.
*/
@VisibleForTesting
static int getNumberOfProducerWorkers(List<Worker> workers, boolean extraConsumerWorkers) {
return extraConsumerWorkers ? (workers.size() + 2) / 3 : workers.size() / 2;
} | 3.68 |
hbase_AccessChecker_requireNamespacePermission | /**
* Checks that the user has the given global or namespace permission.
* @param user Active user to which authorization checks should be applied
* @param request Request type
* @param namespace The given namespace
* @param tableName Table requested
* @param familyMap Column family map requested
* @param permissions Actions being requested
*/
public void requireNamespacePermission(User user, String request, String namespace,
TableName tableName, Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions)
throws IOException {
AuthResult result = null;
for (Action permission : permissions) {
if (authManager.authorizeUserNamespace(user, namespace, permission)) {
result =
AuthResult.allow(request, "Namespace permission granted", user, permission, namespace);
result.getParams().setTableName(tableName).setFamilies(familyMap);
break;
} else {
// rest of the world
result = AuthResult.deny(request, "Insufficient permissions", user, permission, namespace);
result.getParams().setTableName(tableName).setFamilies(familyMap);
}
}
logResult(result);
if (!result.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " + result.toContextString());
}
} | 3.68 |
framework_VScrollTable_getNavigationRightKey | /**
* Get the key that scroll to the right on the table. By default it is the
* right arrow key but by overriding this you can change the key to whatever
* you want.
*
* @return The keycode of the key
*/
protected int getNavigationRightKey() {
return KeyCodes.KEY_RIGHT;
} | 3.68 |
pulsar_TopicName_getTopicPartitionNameString | /**
* A helper method to get a partition name of a topic in String.
* @return topic + "-partition-" + partition.
*/
public static String getTopicPartitionNameString(String topic, int partitionIndex) {
return topic + PARTITIONED_TOPIC_SUFFIX + partitionIndex;
} | 3.68 |
framework_Page_addBrowserWindowResizeListener | /**
* Adds a new {@link BrowserWindowResizeListener} to this UI. The listener
* will be notified whenever the browser window within which this UI resides
* is resized.
* <p>
* In most cases, the UI should be in lazy resize mode when using browser
* window resize listeners. Otherwise, a large number of events can be
* received while a resize is being performed. Use
* {@link UI#setResizeLazy(boolean)}.
* </p>
*
* @param resizeListener
* the listener to add
* @return a registration object for removing the listener
*
* @see BrowserWindowResizeListener#browserWindowResized(BrowserWindowResizeEvent)
* @see UI#setResizeLazy(boolean)
* @see Registration
* @since 8.0
*/
public Registration addBrowserWindowResizeListener(
BrowserWindowResizeListener resizeListener) {
Registration registration = addListener(BrowserWindowResizeEvent.class,
resizeListener, BROWSER_RESIZE_METHOD);
getState(true).hasResizeListeners = true;
return () -> {
registration.remove();
getState(true).hasResizeListeners = hasEventRouter()
&& eventRouter.hasListeners(BrowserWindowResizeEvent.class);
};
} | 3.68 |
flink_AbstractMergeOuterJoinIterator_callWithNextKey | /**
* Calls the <code>JoinFunction#join()</code> method for all two key-value pairs that share the
* same key and come from different inputs. Furthermore, depending on the outer join type (LEFT,
* RIGHT, FULL), all key-value pairs where no matching partner from the other input exists are
* joined with null. The output of the <code>join()</code> method is forwarded.
*
* @throws Exception Forwards all exceptions from the user code and the I/O system.
* @see
* org.apache.flink.runtime.operators.util.JoinTaskIterator#callWithNextKey(org.apache.flink.api.common.functions.FlatJoinFunction,
* org.apache.flink.util.Collector)
*/
@Override
public boolean callWithNextKey(
final FlatJoinFunction<T1, T2, O> joinFunction, final Collector<O> collector)
throws Exception {
if (!initialized) {
// first run, set iterators to first elements
it1Empty = !this.iterator1.nextKey();
it2Empty = !this.iterator2.nextKey();
initialized = true;
}
if (it1Empty && it2Empty) {
return false;
} else if (it2Empty) {
if (outerJoinType == OuterJoinType.LEFT || outerJoinType == OuterJoinType.FULL) {
joinLeftKeyValuesWithNull(iterator1.getValues(), joinFunction, collector);
it1Empty = !iterator1.nextKey();
return true;
} else {
// consume rest of left side
while (iterator1.nextKey()) {}
it1Empty = true;
return false;
}
} else if (it1Empty) {
if (outerJoinType == OuterJoinType.RIGHT || outerJoinType == OuterJoinType.FULL) {
joinRightKeyValuesWithNull(iterator2.getValues(), joinFunction, collector);
it2Empty = !iterator2.nextKey();
return true;
} else {
// consume rest of right side
while (iterator2.nextKey()) {}
it2Empty = true;
return false;
}
} else {
final TypePairComparator<T1, T2> comparator = super.pairComparator;
comparator.setReference(this.iterator1.getCurrent());
T2 current2 = this.iterator2.getCurrent();
// zig zag
while (true) {
// determine the relation between the (possibly composite) keys
final int comp = comparator.compareToReference(current2);
if (comp == 0) {
break;
}
if (comp < 0) {
// right key < left key
if (outerJoinType == OuterJoinType.RIGHT
|| outerJoinType == OuterJoinType.FULL) {
// join right key values with null in case of right or full outer join
joinRightKeyValuesWithNull(iterator2.getValues(), joinFunction, collector);
it2Empty = !iterator2.nextKey();
return true;
} else {
// skip this right key if it is a left outer join
if (!this.iterator2.nextKey()) {
// if right side is empty, join current left key values with null
joinLeftKeyValuesWithNull(
iterator1.getValues(), joinFunction, collector);
it1Empty = !iterator1.nextKey();
it2Empty = true;
return true;
}
current2 = this.iterator2.getCurrent();
}
} else {
// right key > left key
if (outerJoinType == OuterJoinType.LEFT
|| outerJoinType == OuterJoinType.FULL) {
// join left key values with null in case of left or full outer join
joinLeftKeyValuesWithNull(iterator1.getValues(), joinFunction, collector);
it1Empty = !iterator1.nextKey();
return true;
} else {
// skip this left key if it is a right outer join
if (!this.iterator1.nextKey()) {
// if right side is empty, join current right key values with null
joinRightKeyValuesWithNull(
iterator2.getValues(), joinFunction, collector);
it1Empty = true;
it2Empty = !iterator2.nextKey();
return true;
}
comparator.setReference(this.iterator1.getCurrent());
}
}
}
// here, we have a common key! call the join function with the cross product of the
// values
final Iterator<T1> values1 = this.iterator1.getValues();
final Iterator<T2> values2 = this.iterator2.getValues();
crossMatchingGroup(values1, values2, joinFunction, collector);
it1Empty = !iterator1.nextKey();
it2Empty = !iterator2.nextKey();
return true;
}
} | 3.68 |
flink_AvroUtils_getAvroUtils | /**
* Returns either the default {@link AvroUtils} which throw an exception in cases where Avro
* would be needed or loads the specific utils for Avro from flink-avro.
*/
public static AvroUtils getAvroUtils() {
// try and load the special AvroUtils from the flink-avro package
try {
Class<?> clazz =
Class.forName(
AVRO_KRYO_UTILS, false, Thread.currentThread().getContextClassLoader());
return clazz.asSubclass(AvroUtils.class).getConstructor().newInstance();
} catch (ClassNotFoundException e) {
// cannot find the utils, return the default implementation
return new DefaultAvroUtils();
} catch (Exception e) {
throw new RuntimeException("Could not instantiate " + AVRO_KRYO_UTILS + ".", e);
}
} | 3.68 |
flink_RouteResult_decodedPath | /** Returns the decoded request path. */
public String decodedPath() {
return decodedPath;
} | 3.68 |
zxing_State_addBinaryShiftChar | // Create a new state representing this state, but an additional character
// output in Binary Shift mode.
State addBinaryShiftChar(int index) {
Token token = this.token;
int mode = this.mode;
int bitCount = this.bitCount;
if (this.mode == HighLevelEncoder.MODE_PUNCT || this.mode == HighLevelEncoder.MODE_DIGIT) {
int latch = HighLevelEncoder.LATCH_TABLE[mode][HighLevelEncoder.MODE_UPPER];
token = token.add(latch & 0xFFFF, latch >> 16);
bitCount += latch >> 16;
mode = HighLevelEncoder.MODE_UPPER;
}
int deltaBitCount =
(binaryShiftByteCount == 0 || binaryShiftByteCount == 31) ? 18 :
(binaryShiftByteCount == 62) ? 9 : 8;
State result = new State(token, mode, binaryShiftByteCount + 1, bitCount + deltaBitCount);
if (result.binaryShiftByteCount == 2047 + 31) {
// The string is as long as it's allowed to be. We should end it.
result = result.endBinaryShift(index + 1);
}
return result;
} | 3.68 |
hudi_PartialBindVisitor_visitNameReference | /**
* If the attribute cannot find from the schema, directly return null, visitPredicate
* will handle it.
*/
@Override
public Expression visitNameReference(NameReference attribute) {
Types.Field field = caseSensitive
? recordType.fieldByName(attribute.getName())
: recordType.fieldByNameCaseInsensitive(attribute.getName());
if (field == null) {
return null;
}
return new BoundReference(field.fieldId(), field.type());
} | 3.68 |
dubbo_JValidatorNew_generateMethodParameterClass | /**
* try to generate methodParameterClass.
*
* @param clazz interface class
* @param method invoke method
* @param parameterClassName generated parameterClassName
* @return Class<?> generated methodParameterClass
*/
private static Class<?> generateMethodParameterClass(Class<?> clazz, Method method, String parameterClassName)
throws Exception {
ClassPool pool = ClassGenerator.getClassPool(clazz.getClassLoader());
synchronized (parameterClassName.intern()) {
CtClass ctClass = null;
try {
ctClass = pool.getCtClass(parameterClassName);
} catch (NotFoundException ignore) {
}
if (null == ctClass) {
ctClass = pool.makeClass(parameterClassName);
ClassFile classFile = ctClass.getClassFile();
ctClass.addConstructor(CtNewConstructor.defaultConstructor(pool.getCtClass(parameterClassName)));
// parameter fields
Parameter[] parameters = method.getParameters();
Annotation[][] parameterAnnotations = method.getParameterAnnotations();
for (int i = 0; i < parameters.length; i++) {
Annotation[] annotations = parameterAnnotations[i];
AnnotationsAttribute attribute =
new AnnotationsAttribute(classFile.getConstPool(), AnnotationsAttribute.visibleTag);
for (Annotation annotation : annotations) {
if (annotation.annotationType().isAnnotationPresent(Constraint.class)) {
javassist.bytecode.annotation.Annotation ja = new javassist.bytecode.annotation.Annotation(
classFile.getConstPool(),
pool.getCtClass(annotation.annotationType().getName()));
Method[] members = annotation.annotationType().getMethods();
for (Method member : members) {
if (Modifier.isPublic(member.getModifiers())
&& member.getParameterTypes().length == 0
&& member.getDeclaringClass() == annotation.annotationType()) {
Object value = member.invoke(annotation);
if (null != value) {
MemberValue memberValue = createMemberValue(
classFile.getConstPool(),
pool.get(member.getReturnType().getName()),
value);
ja.addMemberValue(member.getName(), memberValue);
}
}
}
attribute.addAnnotation(ja);
}
}
Parameter parameter = parameters[i];
Class<?> type = parameter.getType();
String fieldName = parameter.getName();
CtField ctField = CtField.make(
"public " + type.getCanonicalName() + " " + fieldName + ";",
pool.getCtClass(parameterClassName));
ctField.getFieldInfo().addAttribute(attribute);
ctClass.addField(ctField);
}
return pool.toClass(ctClass, clazz, clazz.getClassLoader(), clazz.getProtectionDomain());
} else {
return Class.forName(parameterClassName, true, clazz.getClassLoader());
}
}
} | 3.68 |
framework_VAbstractCalendarPanel_getDateTimeService | /**
* Returns date time service for the widget.
*
* @see #setDateTimeService(DateTimeService)
*
* @return date time service
*/
protected DateTimeService getDateTimeService() {
return dateTimeService;
} | 3.68 |
graphhopper_EdgeIterator_isValid | /**
* Checks if a given integer edge ID is valid or not. Edge IDs >= 0 are considered valid, while negative
* values are considered as invalid. However, some negative values are used as special values, e.g. {@link
* #NO_EDGE}.
*/
public static boolean isValid(int edgeId) {
return edgeId >= 0;
} | 3.68 |
flink_DefaultCompletedCheckpointStore_addCheckpointAndSubsumeOldestOne | /**
* Synchronously writes the new checkpoints to state handle store and asynchronously removes
* older ones.
*
* @param checkpoint Completed checkpoint to add.
* @throws PossibleInconsistentStateException if adding the checkpoint failed and leaving the
* system in a possibly inconsistent state, i.e. it's uncertain whether the checkpoint
* metadata was fully written to the underlying systems or not.
*/
@Override
public CompletedCheckpoint addCheckpointAndSubsumeOldestOne(
final CompletedCheckpoint checkpoint,
CheckpointsCleaner checkpointsCleaner,
Runnable postCleanup)
throws Exception {
Preconditions.checkState(running.get(), "Checkpoint store has already been shutdown.");
checkNotNull(checkpoint, "Checkpoint");
final String path =
completedCheckpointStoreUtil.checkpointIDToName(checkpoint.getCheckpointID());
// Now add the new one. If it fails, we don't want to lose existing data.
checkpointStateHandleStore.addAndLock(path, checkpoint);
completedCheckpoints.addLast(checkpoint);
// Remove completed checkpoint from queue and checkpointStateHandleStore, not discard.
Optional<CompletedCheckpoint> subsume =
CheckpointSubsumeHelper.subsume(
completedCheckpoints,
maxNumberOfCheckpointsToRetain,
completedCheckpoint -> {
tryRemove(completedCheckpoint.getCheckpointID());
checkpointsCleaner.addSubsumedCheckpoint(completedCheckpoint);
});
findLowest(completedCheckpoints)
.ifPresent(
id ->
checkpointsCleaner.cleanSubsumedCheckpoints(
id,
getSharedStateRegistry().unregisterUnusedState(id),
postCleanup,
ioExecutor));
return subsume.orElse(null);
} | 3.68 |
framework_ComponentConnectorLayoutSlot_reportActualRelativeWidth | /**
* Reports the expected outer width to the LayoutManager.
*
* @param allocatedWidth
* the width to set (including margins, borders and paddings) in
* pixels
*/
@Override
protected void reportActualRelativeWidth(int allocatedWidth) {
getLayoutManager().reportOuterWidth(child, allocatedWidth);
} | 3.68 |
hbase_ParseFilter_parseSimpleFilterExpression | /**
* Constructs a filter object given a simple filter expression
* <p>
* @param filterStringAsByteArray filter string given by the user
* @return filter object we constructed
*/
public Filter parseSimpleFilterExpression(byte[] filterStringAsByteArray)
throws CharacterCodingException {
String filterName = Bytes.toString(getFilterName(filterStringAsByteArray));
ArrayList<byte[]> filterArguments = getFilterArguments(filterStringAsByteArray);
if (!filterHashMap.containsKey(filterName)) {
throw new IllegalArgumentException("Filter Name " + filterName + " not supported");
}
filterName = filterHashMap.get(filterName);
final String methodName = "createFilterFromArguments";
try {
Class<?> c = Class.forName(filterName);
Class<?>[] argTypes = new Class[] { ArrayList.class };
Method m = c.getDeclaredMethod(methodName, argTypes);
return (Filter) m.invoke(null, filterArguments);
} catch (ClassNotFoundException e) {
LOG.error("Could not find class {}", filterName, e);
} catch (NoSuchMethodException e) {
LOG.error("Could not find method {} in {}", methodName, filterName, e);
} catch (IllegalAccessException e) {
LOG.error("Unable to access specified class {}", filterName, e);
} catch (InvocationTargetException e) {
LOG.error("Method {} threw an exception for {}", methodName, filterName, e);
}
throw new IllegalArgumentException(
"Incorrect filter string " + new String(filterStringAsByteArray, StandardCharsets.UTF_8));
} | 3.68 |
framework_GridDragSource_getDragDataGenerator | /**
* Returns the drag data generator function for the given type.
*
* @param type
* Type of the generated data.
* @return Drag data generator function for the given type.
*/
public SerializableFunction<T, String> getDragDataGenerator(String type) {
return generatorFunctions.get(type);
} | 3.68 |
hbase_HRegion_newHRegion | // Utility methods
/**
* A utility method to create new instances of HRegion based on the {@link HConstants#REGION_IMPL}
* configuration property.
* @param tableDir qualified path of directory where region should be located, usually the table
* directory.
* @param wal The WAL is the outbound log for any updates to the HRegion The wal file is a
* logfile from the previous execution that's custom-computed for this HRegion.
* The HRegionServer computes and sorts the appropriate wal info for this
* HRegion. If there is a previous file (implying that the HRegion has been
* written-to before), then read it from the supplied path.
* @param fs is the filesystem.
* @param conf is global configuration settings.
* @param regionInfo - RegionInfo that describes the region is new), then read them from the
* supplied path.
* @param htd the table descriptor
* @return the new instance
*/
public static HRegion newHRegion(Path tableDir, WAL wal, FileSystem fs, Configuration conf,
RegionInfo regionInfo, final TableDescriptor htd, RegionServerServices rsServices) {
try {
@SuppressWarnings("unchecked")
Class<? extends HRegion> regionClass =
(Class<? extends HRegion>) conf.getClass(HConstants.REGION_IMPL, HRegion.class);
Constructor<? extends HRegion> c =
regionClass.getConstructor(Path.class, WAL.class, FileSystem.class, Configuration.class,
RegionInfo.class, TableDescriptor.class, RegionServerServices.class);
return c.newInstance(tableDir, wal, fs, conf, regionInfo, htd, rsServices);
} catch (Throwable e) {
// todo: what should I throw here?
throw new IllegalStateException("Could not instantiate a region instance.", e);
}
} | 3.68 |
druid_MySqlSchemaStatVisitor_visit | // DUAL
public boolean visit(MySqlDeleteStatement x) {
if (repository != null
&& x.getParent() == null) {
repository.resolve(x);
}
SQLTableSource from = x.getFrom();
if (from != null) {
from.accept(this);
}
SQLTableSource using = x.getUsing();
if (using != null) {
using.accept(this);
}
SQLTableSource tableSource = x.getTableSource();
tableSource.accept(this);
if (tableSource instanceof SQLExprTableSource) {
TableStat stat = this.getTableStat((SQLExprTableSource) tableSource);
stat.incrementDeleteCount();
}
accept(x.getWhere());
accept(x.getOrderBy());
accept(x.getLimit());
return false;
} | 3.68 |
flink_UserDefinedFunction_functionIdentifier | /** Returns a unique, serialized representation for this function. */
public final String functionIdentifier() {
final String className = getClass().getName();
if (isClassNameSerializable(this)) {
return className;
}
final String md5 =
EncodingUtils.hex(EncodingUtils.md5(EncodingUtils.encodeObjectToString(this)));
return className.concat("$").concat(md5);
} | 3.68 |
flink_InputSelection_build | /**
* Build normalized mask, if all inputs were manually selected, inputMask will be normalized
* to -1.
*/
public InputSelection build(int inputCount) {
long allSelectedMask = (1L << inputCount) - 1;
if (inputMask == allSelectedMask) {
inputMask = -1;
} else if (inputMask > allSelectedMask) {
throw new IllegalArgumentException(
String.format(
"inputMask [%d] selects more than expected number of inputs [%d]",
inputMask, inputCount));
}
return build();
} | 3.68 |
morf_FieldFromSelect_getSelectStatement | /**
* @return the selectStatement
*/
public SelectStatement getSelectStatement() {
return selectStatement;
} | 3.68 |
flink_FlinkContainersSettings_isBuildFromFlinkDist | /**
* Returns whether to build from flink-dist or from an existing base container. Also see the
* {@code baseImage} property.
*/
public Boolean isBuildFromFlinkDist() {
return buildFromFlinkDist;
} | 3.68 |
framework_ContainerOrderedWrapper_removeAllItems | /**
* Removes all items from the underlying container and from the ordering.
*
* @return <code>true</code> if the operation succeeded, otherwise
* <code>false</code>
* @throws UnsupportedOperationException
* if the removeAllItems is not supported.
*/
@Override
public boolean removeAllItems() throws UnsupportedOperationException {
final boolean success = container.removeAllItems();
if (!ordered && success) {
first = last = null;
next.clear();
prev.clear();
}
return success;
} | 3.68 |
pulsar_Topics_createPartitionedTopic | /**
* Create a partitioned topic.
* <p/>
* Create a partitioned topic. It needs to be called before creating a producer for a partitioned topic.
* <p/>
*
* @param topic
* Topic name
* @param numPartitions
* Number of partitions to create of the topic
* @throws PulsarAdminException
*/
default void createPartitionedTopic(String topic, int numPartitions) throws PulsarAdminException {
createPartitionedTopic(topic, numPartitions, null);
} | 3.68 |
flink_PartitionTimeCommitPredicate_watermarkHasPassedWithDelay | /**
* Returns the watermark has passed the partition time or not, if true means it's time to commit
* the partition.
*/
private boolean watermarkHasPassedWithDelay(
long watermark, LocalDateTime partitionTime, long commitDelay) {
// here we don't parse the long watermark to TIMESTAMP and then comparison,
// but parse the partition timestamp to epoch mills to avoid Daylight Saving Time issue
long epochPartTime = partitionTime.atZone(watermarkTimeZone).toInstant().toEpochMilli();
return watermark > epochPartTime + commitDelay;
} | 3.68 |
rocketmq-connect_JdbcSourceTask_getContext | // Common context
private QueryContext getContext(String querySuffix, String tableOrQuery, String topicPrefix, QueryMode queryMode) {
QueryContext context = new QueryContext(
queryMode,
queryMode == QueryMode.TABLE ? dialect.parseTableNameToTableId(tableOrQuery) : null,
queryMode == QueryMode.QUERY ? tableOrQuery : null,
topicPrefix,
this.config.getOffsetSuffix(),
querySuffix,
config.getBatchMaxRows()
);
return context;
} | 3.68 |
flink_StopWithSavepointTerminationHandlerImpl_terminateExceptionally | /**
* Handles the termination of the {@code StopWithSavepointTerminationHandler} exceptionally
* without triggering a global job fail-over but restarting the checkpointing. It does restart
* the checkpoint scheduling.
*
* @param throwable the error that caused the exceptional termination.
*/
private void terminateExceptionally(Throwable throwable) {
checkpointScheduling.startCheckpointScheduler();
result.completeExceptionally(throwable);
} | 3.68 |
hadoop_RollingFileSystemSink_loadConf | /**
* Return the supplied configuration for testing or otherwise load a new
* configuration.
*
* @return the configuration to use
*/
private Configuration loadConf() {
Configuration c;
if (suppliedConf != null) {
c = suppliedConf;
} else {
// The config we're handed in init() isn't the one we want here, so we
// create a new one to pick up the full settings.
c = new Configuration();
}
return c;
} | 3.68 |
morf_H2Dialect_indexDropStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#indexDropStatements(org.alfasoftware.morf.metadata.Table,
* org.alfasoftware.morf.metadata.Index)
*/
@Override
public Collection<String> indexDropStatements(Table table, Index indexToBeRemoved) {
return Arrays.asList("DROP INDEX " + indexToBeRemoved.getName());
} | 3.68 |
framework_AbstractTextField_getCursorPosition | /**
* Returns the last known cursor position of the field.
*
* @return the last known cursor position
*/
public int getCursorPosition() {
return lastKnownCursorPosition;
} | 3.68 |
flink_CheckpointConfig_setMinPauseBetweenCheckpoints | /**
* Sets the minimal pause between checkpointing attempts. This setting defines how soon the
* checkpoint coordinator may trigger another checkpoint after it becomes possible to trigger
* another checkpoint with respect to the maximum number of concurrent checkpoints (see {@link
* #setMaxConcurrentCheckpoints(int)}).
*
* <p>If the maximum number of concurrent checkpoints is set to one, this setting makes
* effectively sure that a minimum amount of time passes where no checkpoint is in progress at
* all.
*
* @param minPauseBetweenCheckpoints The minimal pause before the next checkpoint is triggered.
*/
public void setMinPauseBetweenCheckpoints(long minPauseBetweenCheckpoints) {
if (minPauseBetweenCheckpoints < 0) {
throw new IllegalArgumentException("Pause value must be zero or positive");
}
configuration.set(
ExecutionCheckpointingOptions.MIN_PAUSE_BETWEEN_CHECKPOINTS,
Duration.ofMillis(minPauseBetweenCheckpoints));
} | 3.68 |
flink_RankProcessStrategy_analyzeRankProcessStrategies | /** Gets {@link RankProcessStrategy} based on input, partitionKey and orderKey. */
static List<RankProcessStrategy> analyzeRankProcessStrategies(
StreamPhysicalRel rank, ImmutableBitSet partitionKey, RelCollation orderKey) {
FlinkRelMetadataQuery mq = (FlinkRelMetadataQuery) rank.getCluster().getMetadataQuery();
List<RelFieldCollation> fieldCollations = orderKey.getFieldCollations();
boolean isUpdateStream = !ChangelogPlanUtils.inputInsertOnly(rank);
RelNode input = rank.getInput(0);
if (isUpdateStream) {
Set<ImmutableBitSet> upsertKeys =
mq.getUpsertKeysInKeyGroupRange(input, partitionKey.toArray());
if (upsertKeys == null
|| upsertKeys.isEmpty()
// upsert key should contains partition key
|| upsertKeys.stream().noneMatch(k -> k.contains(partitionKey))) {
// and we fall back to using retract rank
return Collections.singletonList(RETRACT_STRATEGY);
} else {
FlinkRelMetadataQuery fmq = FlinkRelMetadataQuery.reuseOrCreate(mq);
RelModifiedMonotonicity monotonicity = fmq.getRelModifiedMonotonicity(input);
boolean isMonotonic = false;
if (monotonicity != null && !fieldCollations.isEmpty()) {
isMonotonic =
fieldCollations.stream()
.allMatch(
collation -> {
SqlMonotonicity fieldMonotonicity =
monotonicity
.fieldMonotonicities()[
collation.getFieldIndex()];
RelFieldCollation.Direction direction =
collation.direction;
if ((fieldMonotonicity == SqlMonotonicity.DECREASING
|| fieldMonotonicity
== SqlMonotonicity
.STRICTLY_DECREASING)
&& direction
== RelFieldCollation.Direction
.ASCENDING) {
// sort field is ascending and its monotonicity
// is decreasing
return true;
} else if ((fieldMonotonicity
== SqlMonotonicity
.INCREASING
|| fieldMonotonicity
== SqlMonotonicity
.STRICTLY_INCREASING)
&& direction
== RelFieldCollation.Direction
.DESCENDING) {
// sort field is descending and its monotonicity
// is increasing
return true;
} else {
// sort key is a grouping key of upstream agg,
// it is monotonic
return fieldMonotonicity
== SqlMonotonicity.CONSTANT;
}
});
}
if (isMonotonic) {
// TODO: choose a set of primary key
return Arrays.asList(
new UpdateFastStrategy(upsertKeys.iterator().next().toArray()),
RETRACT_STRATEGY);
} else {
return Collections.singletonList(RETRACT_STRATEGY);
}
}
} else {
return Collections.singletonList(APPEND_FAST_STRATEGY);
}
} | 3.68 |
hbase_FSTableDescriptors_getAll | /**
* Returns a map from table name to table descriptor for all tables.
*/
@Override
public Map<String, TableDescriptor> getAll() throws IOException {
Map<String, TableDescriptor> tds = new ConcurrentSkipListMap<>();
if (fsvisited) {
for (Map.Entry<TableName, TableDescriptor> entry : this.cache.entrySet()) {
tds.put(entry.getKey().getNameWithNamespaceInclAsString(), entry.getValue());
}
} else {
LOG.info("Fetching table descriptors from the filesystem.");
final long startTime = EnvironmentEdgeManager.currentTime();
AtomicBoolean allvisited = new AtomicBoolean(usecache);
List<Path> tableDirs = FSUtils.getTableDirs(fs, rootdir);
if (!tableDescriptorParallelLoadEnable) {
for (Path dir : tableDirs) {
internalGet(dir, tds, allvisited);
}
} else {
CountDownLatch latch = new CountDownLatch(tableDirs.size());
for (Path dir : tableDirs) {
executor.submit(new Runnable() {
@Override
public void run() {
try {
internalGet(dir, tds, allvisited);
} finally {
latch.countDown();
}
}
});
}
try {
latch.await();
} catch (InterruptedException ie) {
throw (InterruptedIOException) new InterruptedIOException().initCause(ie);
}
}
fsvisited = allvisited.get();
LOG.info("Fetched table descriptors(size=" + tds.size() + ") cost "
+ (EnvironmentEdgeManager.currentTime() - startTime) + "ms.");
}
return tds;
} | 3.68 |
flink_AbstractCatalogStore_close | /** Closes the catalog store. */
@Override
public void close() {
isOpen = false;
} | 3.68 |
graphhopper_MinHeapWithUpdate_peekValue | /**
* @return the value of the next element to be polled
*/
public float peekValue() {
return vals[1];
} | 3.68 |
hbase_HRegion_createRegionDir | /**
* Create the region directory in the filesystem.
*/
public static HRegionFileSystem createRegionDir(Configuration configuration, RegionInfo ri,
Path rootDir) throws IOException {
FileSystem fs = rootDir.getFileSystem(configuration);
Path tableDir = CommonFSUtils.getTableDir(rootDir, ri.getTable());
// If directory already exists, will log warning and keep going. Will try to create
// .regioninfo. If one exists, will overwrite.
return HRegionFileSystem.createRegionOnFileSystem(configuration, fs, tableDir, ri);
} | 3.68 |
framework_PushConfiguration_getParameter | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.PushConfiguration#getParameter(java.lang.String)
*/
@Override
public String getParameter(String parameter) {
return getState(false).parameters.get(parameter);
} | 3.68 |
pulsar_ResourceGroupService_getPublishRateLimiters | // Visibility for testing.
protected BytesAndMessagesCount getPublishRateLimiters (String rgName) throws PulsarAdminException {
ResourceGroup rg = this.getResourceGroupInternal(rgName);
if (rg == null) {
throw new PulsarAdminException("Resource group does not exist: " + rgName);
}
return rg.getRgPublishRateLimiterValues();
} | 3.68 |
hmily_AbstractHmilyDatabase_executeUpdate | /**
* Execute update int.
*
* @param sql the sql
* @param params the params
* @return the int
*/
private int executeUpdate(final String sql, final Object... params) {
try (Connection con = dataSource.getConnection();
PreparedStatement ps = createPreparedStatement(con, sql, params)) {
return ps.executeUpdate();
} catch (SQLException e) {
log.error("hmily jdbc executeUpdate repository exception -> ", e);
return FAIL_ROWS;
}
} | 3.68 |
flink_ResultRetryStrategy_fixedDelayRetry | /** Create a fixed-delay retry strategy by given params. */
public static ResultRetryStrategy fixedDelayRetry(
int maxAttempts,
long backoffTimeMillis,
Predicate<Collection<RowData>> resultPredicate) {
return new ResultRetryStrategy(
new AsyncRetryStrategies.FixedDelayRetryStrategyBuilder(
maxAttempts, backoffTimeMillis)
.ifResult(resultPredicate)
.build());
} | 3.68 |
flink_RawFormatFactory_validateAndExtractSingleField | /** Validates and extract the single field type from the given physical row schema. */
private static LogicalType validateAndExtractSingleField(RowType physicalRowType) {
if (physicalRowType.getFieldCount() != 1) {
String schemaString =
physicalRowType.getFields().stream()
.map(RowType.RowField::asSummaryString)
.collect(Collectors.joining(", "));
throw new ValidationException(
String.format(
"The 'raw' format only supports single physical column. "
+ "However the defined schema contains multiple physical columns: [%s]",
schemaString));
}
LogicalType fieldType = physicalRowType.getChildren().get(0);
checkFieldType(fieldType);
return fieldType;
} | 3.68 |
flink_HiveParserQBParseInfo_setSortByExprForClause | /** Set the Sort By AST for the clause. */
public void setSortByExprForClause(String clause, HiveParserASTNode ast) {
destToSortby.put(clause, ast);
} | 3.68 |
flink_Tuple22_setFields | /**
* Sets new values to all fields of the tuple.
*
* @param f0 The value for field 0
* @param f1 The value for field 1
* @param f2 The value for field 2
* @param f3 The value for field 3
* @param f4 The value for field 4
* @param f5 The value for field 5
* @param f6 The value for field 6
* @param f7 The value for field 7
* @param f8 The value for field 8
* @param f9 The value for field 9
* @param f10 The value for field 10
* @param f11 The value for field 11
* @param f12 The value for field 12
* @param f13 The value for field 13
* @param f14 The value for field 14
* @param f15 The value for field 15
* @param f16 The value for field 16
* @param f17 The value for field 17
* @param f18 The value for field 18
* @param f19 The value for field 19
* @param f20 The value for field 20
* @param f21 The value for field 21
*/
public void setFields(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13,
T14 f14,
T15 f15,
T16 f16,
T17 f17,
T18 f18,
T19 f19,
T20 f20,
T21 f21) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;
this.f13 = f13;
this.f14 = f14;
this.f15 = f15;
this.f16 = f16;
this.f17 = f17;
this.f18 = f18;
this.f19 = f19;
this.f20 = f20;
this.f21 = f21;
} | 3.68 |
framework_VAbstractTextualDate_createFormatString | /**
* Create a format string suitable for the widget in its current state.
*
* @return a date format string to use when formatting and parsing the text
* in the input field
* @since 8.1
*/
protected String createFormatString() {
if (isYear(getCurrentResolution())) {
return "yyyy"; // force full year
}
try {
String frmString = LocaleService.getDateFormat(currentLocale);
return cleanFormat(frmString);
} catch (LocaleNotLoadedException e) {
// TODO should die instead? Can the component survive
// without format string?
getLogger().log(Level.SEVERE,
e.getMessage() == null ? "" : e.getMessage(), e);
return null;
}
} | 3.68 |
streampipes_BoilerpipeHTMLContentHandler_endElement | // @Override
public void endElement(String uri, String localName, String qName) throws SAXException {
TagAction ta = tagActions.get(localName);
if (ta != null) {
flush = ta.end(this, localName, qName) | flush;
} else {
flush = true;
}
if (ta == null || ta.changesTagLevel()) {
tagLevel--;
}
if (flush) {
flushBlock();
}
lastEvent = Event.END_TAG;
lastEndTag = localName;
labelStacks.removeLast();
} | 3.68 |
hudi_MarkerUtils_readMarkerType | /**
* Reads the marker type from `MARKERS.type` file.
*
* @param fileSystem file system to use.
* @param markerDir marker directory.
* @return the marker type, or empty if the marker type file does not exist.
*/
public static Option<MarkerType> readMarkerType(FileSystem fileSystem, String markerDir) {
Path markerTypeFilePath = new Path(markerDir, MARKER_TYPE_FILENAME);
FSDataInputStream fsDataInputStream = null;
Option<MarkerType> content = Option.empty();
try {
if (!doesMarkerTypeFileExist(fileSystem, markerDir)) {
return Option.empty();
}
fsDataInputStream = fileSystem.open(markerTypeFilePath);
String markerType = FileIOUtils.readAsUTFString(fsDataInputStream);
if (StringUtils.isNullOrEmpty(markerType)) {
return Option.empty();
}
content = Option.of(MarkerType.valueOf(markerType));
} catch (IOException e) {
throw new HoodieIOException("Cannot read marker type file " + markerTypeFilePath.toString()
+ "; " + e.getMessage(), e);
} finally {
closeQuietly(fsDataInputStream);
}
return content;
} | 3.68 |
framework_AbsoluteLayout_getLeftValue | /**
* Gets the 'left' attributes value using current units.
*
* @return The value of the 'left' attribute, null if not set
* @see #getLeftUnits()
*/
public Float getLeftValue() {
return leftValue;
} | 3.68 |
morf_DatabaseDataSetProducer_getSchema | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#getSchema()
*/
@Override
public Schema getSchema() {
if (connection == null) {
throw new IllegalStateException("Dataset has not been opened");
}
if (schema == null) {
// we use the same connection as this provider, so there is no (extra) clean-up to do.
schema = DatabaseType.Registry.findByIdentifier(connectionResources.getDatabaseType()).openSchema(connection, connectionResources.getDatabaseName(), connectionResources.getSchemaName());
}
return schema;
} | 3.68 |
hbase_Mutation_setACL | /**
* Set the ACL for this operation.
* @param perms A map of permissions for a user or users
*/
public Mutation setACL(Map<String, Permission> perms) {
ListMultimap<String, Permission> permMap = ArrayListMultimap.create();
for (Map.Entry<String, Permission> entry : perms.entrySet()) {
permMap.put(entry.getKey(), entry.getValue());
}
setAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL,
AccessControlUtil.toUsersAndPermissions(permMap).toByteArray());
return this;
} | 3.68 |
morf_HumanReadableStatementHelper_generateRenameTableString | /**
* @param from - The table name to change from
* @param to - The table name to change to
* @return a string containing the human-readable version of the action
*/
public static String generateRenameTableString(String from, String to) {
StringBuilder renameTableBuilder = new StringBuilder();
renameTableBuilder.append(String.format("Rename table %s to %s",
from,
to));
return renameTableBuilder.toString();
} | 3.68 |
flink_BinarySegmentUtils_equals | /**
* Equals two memory segments regions.
*
* @param segments1 Segments 1
* @param offset1 Offset of segments1 to start equaling
* @param segments2 Segments 2
* @param offset2 Offset of segments2 to start equaling
* @param len Length of the equaled memory region
* @return true if equal, false otherwise
*/
public static boolean equals(
MemorySegment[] segments1,
int offset1,
MemorySegment[] segments2,
int offset2,
int len) {
if (inFirstSegment(segments1, offset1, len) && inFirstSegment(segments2, offset2, len)) {
return segments1[0].equalTo(segments2[0], offset1, offset2, len);
} else {
return equalsMultiSegments(segments1, offset1, segments2, offset2, len);
}
} | 3.68 |
flink_OperationExpressionsUtils_extractAggregationsAndProperties | /**
* Extracts and deduplicates all aggregation and window property expressions (zero, one, or
* more) from the given expressions.
*
* @param expressions a list of expressions to extract
* @return a Tuple2, the first field contains the extracted and deduplicated aggregations, and
* the second field contains the extracted and deduplicated window properties.
*/
public static CategorizedExpressions extractAggregationsAndProperties(
List<Expression> expressions) {
AggregationAndPropertiesSplitter splitter = new AggregationAndPropertiesSplitter();
expressions.forEach(expr -> expr.accept(splitter));
List<Expression> projections =
expressions.stream()
.map(
expr ->
expr.accept(
new AggregationAndPropertiesReplacer(
splitter.aggregates, splitter.properties)))
.collect(Collectors.toList());
List<Expression> aggregates = nameExpressions(splitter.aggregates);
List<Expression> properties = nameExpressions(splitter.properties);
return new CategorizedExpressions(projections, aggregates, properties);
} | 3.68 |
framework_Panel_removeClickListener | /**
* Remove a click listener from the Panel. The listener should earlier have
* been added using {@link #addClickListener(ClickListener)}.
*
* @param listener
* The listener to remove
* @deprecated As of 8.0, replaced by {@link Registration#remove()} in the
* registration object returned from
* {@link #addClickListener(ClickListener)}.
*/
@Deprecated
public void removeClickListener(ClickListener listener) {
removeListener(EventId.CLICK_EVENT_IDENTIFIER, ClickEvent.class,
listener);
} | 3.68 |
hbase_PrivateCellUtil_compareRow | /**
* Compare cell's row against given comparator
* @param cell the cell to use for comparison
* @param comparator the {@link CellComparator} to use for comparison
* @return result comparing cell's row
*/
public static int compareRow(Cell cell, ByteArrayComparable comparator) {
if (cell instanceof ByteBufferExtendedCell) {
return comparator.compareTo(((ByteBufferExtendedCell) cell).getRowByteBuffer(),
((ByteBufferExtendedCell) cell).getRowPosition(), cell.getRowLength());
}
return comparator.compareTo(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
} | 3.68 |
hbase_RSGroupBasedLoadBalancer_balanceCluster | /**
* Balance by RSGroup.
*/
@Override
public synchronized List<RegionPlan> balanceCluster(
Map<TableName, Map<ServerName, List<RegionInfo>>> loadOfAllTable) throws IOException {
if (!isOnline()) {
throw new ConstraintException(
RSGroupInfoManager.class.getSimpleName() + " is not online, unable to perform balance");
}
// Calculate correct assignments and a list of RegionPlan for mis-placed regions
Pair<Map<TableName, Map<ServerName, List<RegionInfo>>>,
List<RegionPlan>> correctedStateAndRegionPlans = correctAssignments(loadOfAllTable);
Map<TableName, Map<ServerName, List<RegionInfo>>> correctedLoadOfAllTable =
correctedStateAndRegionPlans.getFirst();
List<RegionPlan> regionPlans = correctedStateAndRegionPlans.getSecond();
RSGroupInfo defaultInfo = rsGroupInfoManager.getRSGroup(RSGroupInfo.DEFAULT_GROUP);
// Add RegionPlan
// for the regions which have been placed according to the region server group assignment
// into the movement list
try {
// For each rsgroup
for (RSGroupInfo rsgroup : rsGroupInfoManager.listRSGroups()) {
LOG.debug("Balancing RSGroup={}", rsgroup.getName());
Map<TableName, Map<ServerName, List<RegionInfo>>> loadOfTablesInGroup = new HashMap<>();
for (Map.Entry<TableName, Map<ServerName, List<RegionInfo>>> entry : correctedLoadOfAllTable
.entrySet()) {
TableName tableName = entry.getKey();
RSGroupInfo targetRSGInfo = RSGroupUtil
.getRSGroupInfo(masterServices, rsGroupInfoManager, tableName).orElse(defaultInfo);
if (targetRSGInfo.getName().equals(rsgroup.getName())) {
loadOfTablesInGroup.put(tableName, entry.getValue());
}
}
List<RegionPlan> groupPlans = null;
if (!loadOfTablesInGroup.isEmpty()) {
LOG.info("Start Generate Balance plan for group: " + rsgroup.getName());
groupPlans = this.internalBalancer.balanceCluster(loadOfTablesInGroup);
}
if (groupPlans != null) {
regionPlans.addAll(groupPlans);
}
}
} catch (IOException exp) {
LOG.warn("Exception while balancing cluster.", exp);
regionPlans.clear();
}
// Return the whole movement list
return regionPlans;
} | 3.68 |
hadoop_ClientThrottlingIntercept_responseReceived | /**
* Called after the Azure Storage SDK receives a response. Client-side
* throttling uses this to collect metrics.
*
* @param event The connection, operation, and request state.
*/
public static void responseReceived(ResponseReceivedEvent event) {
updateMetrics((HttpURLConnection) event.getConnectionObject(),
event.getRequestResult());
} | 3.68 |
hudi_SparkRDDReadClient_readROView | /**
* Given a bunch of hoodie keys, fetches all the individual records out as a data frame.
*
* @return a dataframe
*/
public Dataset<Row> readROView(JavaRDD<HoodieKey> hoodieKeys, int parallelism) {
assertSqlContext();
JavaPairRDD<HoodieKey, Option<Pair<String, String>>> lookupResultRDD = checkExists(hoodieKeys);
JavaPairRDD<HoodieKey, Option<String>> keyToFileRDD =
lookupResultRDD.mapToPair(r -> new Tuple2<>(r._1, convertToDataFilePath(r._2)));
List<String> paths = keyToFileRDD.filter(keyFileTuple -> keyFileTuple._2().isPresent())
.map(keyFileTuple -> keyFileTuple._2().get()).collect();
// record locations might be same for multiple keys, so need a unique list
Set<String> uniquePaths = new HashSet<>(paths);
Dataset<Row> originalDF = null;
// read files based on the file extension name
if (paths.size() == 0 || paths.get(0).endsWith(HoodieFileFormat.PARQUET.getFileExtension())) {
originalDF = sqlContextOpt.get().read().parquet(uniquePaths.toArray(new String[uniquePaths.size()]));
} else if (paths.get(0).endsWith(HoodieFileFormat.ORC.getFileExtension())) {
originalDF = sqlContextOpt.get().read().orc(uniquePaths.toArray(new String[uniquePaths.size()]));
}
StructType schema = originalDF.schema();
JavaPairRDD<HoodieKey, Row> keyRowRDD = originalDF.javaRDD().mapToPair(row -> {
HoodieKey key = new HoodieKey(row.getAs(HoodieRecord.RECORD_KEY_METADATA_FIELD),
row.getAs(HoodieRecord.PARTITION_PATH_METADATA_FIELD));
return new Tuple2<>(key, row);
});
// Now, we need to further filter out, for only rows that match the supplied hoodie keys
JavaRDD<Row> rowRDD = keyRowRDD.join(keyToFileRDD, parallelism).map(tuple -> tuple._2()._1());
return sqlContextOpt.get().createDataFrame(rowRDD, schema);
} | 3.68 |
hbase_ByteBufferUtils_readAsInt | /**
* Converts a ByteBuffer to an int value
* @param buf The ByteBuffer
* @param offset Offset to int value
* @param length Number of bytes used to store the int value.
* @return the int value if there's not enough bytes left in the buffer after the given offset
*/
public static int readAsInt(ByteBuffer buf, int offset, final int length) {
if (offset + length > buf.limit()) {
throw new IllegalArgumentException("offset (" + offset + ") + length (" + length
+ ") exceed the" + " limit of the buffer: " + buf.limit());
}
int n = 0;
for (int i = offset; i < (offset + length); i++) {
n <<= 8;
n ^= toByte(buf, i) & 0xFF;
}
return n;
} | 3.68 |
hmily_HmilyActionEnum_acquireByCode | /**
* Acquire by code tcc action enum.
*
* @param code the code
* @return the tcc action enum
*/
public static HmilyActionEnum acquireByCode(final int code) {
return Arrays.stream(HmilyActionEnum.values())
.filter(v -> Objects.equals(v.getCode(), code))
.findFirst().orElse(HmilyActionEnum.TRYING);
} | 3.68 |
flink_TableSinkBase_getFieldNames | /** Returns the field names of the table to emit. */
@Override
public String[] getFieldNames() {
if (fieldNames.isPresent()) {
return fieldNames.get();
} else {
throw new IllegalStateException(
"Table sink must be configured to retrieve field names.");
}
} | 3.68 |
flink_FlinkZooKeeperQuorumPeer_runFlinkZkQuorumPeer | /**
* Runs a ZooKeeper {@link QuorumPeer} if further peers are configured or a single {@link
* ZooKeeperServer} if no further peers are configured.
*
* @param zkConfigFile ZooKeeper config file 'zoo.cfg'
* @param peerId ID for the 'myid' file
*/
public static void runFlinkZkQuorumPeer(String zkConfigFile, int peerId) throws Exception {
Properties zkProps = new Properties();
try (InputStream inStream = new FileInputStream(new File(zkConfigFile))) {
zkProps.load(inStream);
}
LOG.info("Configuration: " + zkProps);
// Set defaults for required properties
setRequiredProperties(zkProps);
// Write peer id to myid file
writeMyIdToDataDir(zkProps, peerId);
// The myid file needs to be written before creating the instance. Otherwise, this
// will fail.
QuorumPeerConfig conf = new QuorumPeerConfig();
conf.parseProperties(zkProps);
if (conf.isDistributed()) {
// Run quorum peer
LOG.info(
"Running distributed ZooKeeper quorum peer (total peers: {}).",
conf.getServers().size());
QuorumPeerMain qp = new QuorumPeerMain();
qp.runFromConfig(conf);
} else {
// Run standalone
LOG.info("Running standalone ZooKeeper quorum peer.");
ZooKeeperServerMain zk = new ZooKeeperServerMain();
ServerConfig sc = new ServerConfig();
sc.readFrom(conf);
zk.runFromConfig(sc);
}
} | 3.68 |
flink_KafkaEventsGeneratorJob_rpsFromSleep | // Used for backwards compatibility to convert legacy 'sleep' parameter to records per second.
private static double rpsFromSleep(int sleep, int parallelism) {
return (1000d / sleep) * parallelism;
} | 3.68 |
framework_LayoutManager_getMarginTop | /**
* Gets the top margin of the given element, provided that it has been
* measured. These elements are guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* A negative number is returned if the element has not been measured. If 0
* is returned, it might indicate that the element is not attached to the
* DOM.
*
* @param element
* the element to get the measured size for
* @return the measured top margin of the element in pixels.
*/
public int getMarginTop(Element element) {
assert needsMeasure(
element) : "Getting measurement for element that is not measured";
return getMeasuredSize(element, nullSize).getMarginTop();
} | 3.68 |
hadoop_FSDirAppendOp_appendFile | /**
* Append to an existing file.
* <p>
*
* The method returns the last block of the file if this is a partial block,
* which can still be used for writing more data. The client uses the
* returned block locations to form the data pipeline for this block.<br>
* The {@link LocatedBlock} will be null if the last block is full.
* The client then allocates a new block with the next call using
* {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#addBlock}.
* <p>
*
* For description of parameters and exceptions thrown see
* {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#append}
*
* @param fsn namespace
* @param srcArg path name
* @param pc permission checker to check fs permission
* @param holder client name
* @param clientMachine client machine info
* @param newBlock if the data is appended to a new block
* @param logRetryCache whether to record RPC ids in editlog for retry cache
* rebuilding
* @return the last block with status
*/
static LastBlockWithStatus appendFile(final FSNamesystem fsn,
final String srcArg, final FSPermissionChecker pc, final String holder,
final String clientMachine, final boolean newBlock,
final boolean logRetryCache) throws IOException {
assert fsn.hasWriteLock();
final LocatedBlock lb;
final FSDirectory fsd = fsn.getFSDirectory();
final INodesInPath iip;
fsd.writeLock();
try {
iip = fsd.resolvePath(pc, srcArg, DirOp.WRITE);
// Verify that the destination does not exist as a directory already
final INode inode = iip.getLastINode();
final String path = iip.getPath();
if (inode != null && inode.isDirectory()) {
throw new FileAlreadyExistsException("Cannot append to directory "
+ path + "; already exists as a directory.");
}
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
if (inode == null) {
throw new FileNotFoundException(
"Failed to append to non-existent file " + path + " for client "
+ clientMachine);
}
final INodeFile file = INodeFile.valueOf(inode, path, true);
if (file.isStriped() && !newBlock) {
throw new UnsupportedOperationException(
"Append on EC file without new block is not supported. Use "
+ CreateFlag.NEW_BLOCK + " create flag while appending file.");
}
BlockManager blockManager = fsd.getBlockManager();
final BlockStoragePolicy lpPolicy = blockManager
.getStoragePolicy("LAZY_PERSIST");
if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) {
throw new UnsupportedOperationException(
"Cannot append to lazy persist file " + path);
}
// Opening an existing file for append - may need to recover lease.
fsn.recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, path, holder,
clientMachine, false);
final BlockInfo lastBlock = file.getLastBlock();
// Check that the block has at least minimum replication.
if (lastBlock != null) {
if (lastBlock.getBlockUCState() == BlockUCState.COMMITTED) {
throw new RetriableException(
new NotReplicatedYetException("append: lastBlock="
+ lastBlock + " of src=" + path
+ " is COMMITTED but not yet COMPLETE."));
} else if (lastBlock.isComplete()
&& !blockManager.isSufficientlyReplicated(lastBlock)) {
throw new IOException("append: lastBlock=" + lastBlock + " of src="
+ path + " is not sufficiently replicated yet.");
}
}
lb = prepareFileForAppend(fsn, iip, holder, clientMachine, newBlock,
true, logRetryCache);
} catch (IOException ie) {
NameNode.stateChangeLog
.warn("DIR* NameSystem.append: " + ie.getMessage());
throw ie;
} finally {
fsd.writeUnlock();
}
HdfsFileStatus stat =
FSDirStatAndListingOp.getFileInfo(fsd, iip, false, false);
if (lb != null) {
NameNode.stateChangeLog.debug(
"DIR* NameSystem.appendFile: file {} for {} at {} block {} block"
+ " size {}", srcArg, holder, clientMachine, lb.getBlock(), lb
.getBlock().getNumBytes());
}
return new LastBlockWithStatus(lb, stat);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.