name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_HFileBlock_writeBlock | /**
* Takes the given {@link BlockWritable} instance, creates a new block of its appropriate type,
* writes the writable into this block, and flushes the block into the output stream. The writer
* is instructed not to buffer uncompressed bytes for cache-on-write.
* @param bw the block-writable object to write as a block
* @param out the file system output stream
*/
void writeBlock(BlockWritable bw, FSDataOutputStream out) throws IOException {
bw.writeToBlock(startWriting(bw.getBlockType()));
writeHeaderAndData(out);
} | 3.68 |
hadoop_IncrementalBlockReportManager_getPerStorageIBR | /** @return the pending IBR for the given {@code storage} */
private PerStorageIBR getPerStorageIBR(DatanodeStorage storage) {
PerStorageIBR perStorage = pendingIBRs.get(storage);
if (perStorage == null) {
// This is the first time we are adding incremental BR state for
// this storage so create a new map. This is required once per
// storage, per service actor.
perStorage = new PerStorageIBR(dnMetrics);
pendingIBRs.put(storage, perStorage);
}
return perStorage;
} | 3.68 |
framework_GridDragSourceConnector_removeDraggedStyle | /**
* Remove {@code v-grid-row-dragged} class name from dragged rows.
*
* @param event
* The dragend event.
*/
@Override
protected void removeDraggedStyle(NativeEvent event) {
getDraggedRowElementStream().forEach(
rowElement -> rowElement.removeClassName(draggedStyleName));
} | 3.68 |
hadoop_HdfsFileStatus_flags | /**
* Set {@link Flags} for this entity
* (default = {@link EnumSet#noneOf(Class)}).
* @param flags Flags
* @return This builder instance
*/
public Builder flags(EnumSet<Flags> flags) {
this.flags = flags;
return this;
} | 3.68 |
hbase_ScannerContext_canEnforceLimitFromScope | /**
* @param checkerScope The scope in which the limit is being checked
* @return true when the checker is in a scope that indicates the limit can be enforced. Limits
* can be enforced from "higher or equal" scopes (i.e. the checker's scope is at a
* lesser depth than the limit)
*/
boolean canEnforceLimitFromScope(LimitScope checkerScope) {
return checkerScope != null && checkerScope.depth() <= depth;
} | 3.68 |
hadoop_GetContentSummaryOperation_buildDirectorySet | /***
* This method builds the set of all directories found under the base path. We need to do this
* because if the directory structure /a/b/c was created with a single mkdirs() call, it is
* stored as 1 object in S3 and the list files iterator will only return a single entry /a/b/c.
*
* We keep track of paths traversed so far to prevent duplication of work. For eg, if we had
* a/b/c/file-1.txt and /a/b/c/file-2.txt, we will only recurse over the complete path once
* and won't have to do anything for file-2.txt.
*
* @param dirSet Set of all directories found in the path
* @param pathsTraversed Set of all paths traversed so far
* @param basePath Path of directory to scan
* @param parentPath Parent path of the current file/directory in the iterator
*/
private void buildDirectorySet(Set<Path> dirSet, Set<Path> pathsTraversed, Path basePath,
Path parentPath) {
if (parentPath == null || pathsTraversed.contains(parentPath) || parentPath.equals(basePath)) {
return;
}
dirSet.add(parentPath);
buildDirectorySet(dirSet, pathsTraversed, basePath, parentPath.getParent());
pathsTraversed.add(parentPath);
} | 3.68 |
framework_SQLContainer_getPropertyIds | /**
* Fetches property id's (column names and their types) from the data
* source.
*
* @throws SQLException
*/
private void getPropertyIds() throws SQLException {
propertyIds.clear();
propertyTypes.clear();
queryDelegate.setFilters(null);
queryDelegate.setOrderBy(null);
ResultSet rs = null;
ResultSetMetaData rsmd = null;
try {
queryDelegate.beginTransaction();
rs = queryDelegate.getResults(0, 1);
rsmd = rs.getMetaData();
boolean resultExists = rs.next();
Class<?> type = null;
for (int i = 1; i <= rsmd.getColumnCount(); i++) {
if (!isColumnIdentifierValid(rsmd.getColumnLabel(i))) {
continue;
}
String colName = rsmd.getColumnLabel(i);
/*
* Make sure not to add the same colName twice. This can easily
* happen if the SQL query joins many tables with an ID column.
*/
if (!propertyIds.contains(colName)) {
propertyIds.add(colName);
}
/* Try to determine the column's JDBC class by all means. */
if (resultExists && rs.getObject(i) != null) {
type = rs.getObject(i).getClass();
} else {
try {
type = Class.forName(rsmd.getColumnClassName(i));
} catch (Exception e) {
getLogger().log(Level.WARNING, "Class not found", e);
/* On failure revert to Object and hope for the best. */
type = Object.class;
}
}
/*
* Determine read only and nullability status of the column. A
* column is read only if it is reported as either read only or
* auto increment by the database, and also it is set as the
* version column in a TableQuery delegate.
*/
boolean readOnly = rsmd.isAutoIncrement(i)
|| rsmd.isReadOnly(i);
boolean persistable = !rsmd.isReadOnly(i);
if (queryDelegate instanceof TableQuery) {
if (rsmd.getColumnLabel(i).equals(
((TableQuery) queryDelegate).getVersionColumn())) {
readOnly = true;
}
}
propertyReadOnly.put(colName, readOnly);
propertyPersistable.put(colName, persistable);
propertyNullable.put(colName,
rsmd.isNullable(i) == ResultSetMetaData.columnNullable);
propertyPrimaryKey.put(colName,
queryDelegate.getPrimaryKeyColumns()
.contains(rsmd.getColumnLabel(i)));
propertyTypes.put(colName, type);
}
rs.getStatement().close();
rs.close();
queryDelegate.commit();
getLogger().log(Level.FINER, "Property IDs fetched.");
} catch (SQLException e) {
getLogger().log(Level.WARNING,
"Failed to fetch property ids, rolling back", e);
try {
queryDelegate.rollback();
} catch (SQLException e1) {
getLogger().log(Level.SEVERE, "Failed to roll back", e1);
}
try {
if (rs != null) {
if (rs.getStatement() != null) {
rs.getStatement().close();
}
rs.close();
}
} catch (SQLException e1) {
getLogger().log(Level.WARNING, "Failed to close session", e1);
}
throw e;
}
} | 3.68 |
framework_AutomaticImmediate_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Field should be immediate automatically if it has value change listener";
} | 3.68 |
dubbo_HeaderExchangeChannel_close | // graceful close
@Override
public void close(int timeout) {
if (closed) {
return;
}
if (timeout > 0) {
long start = System.currentTimeMillis();
while (DefaultFuture.hasFuture(channel) && System.currentTimeMillis() - start < timeout) {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
logger.warn(TRANSPORT_FAILED_CLOSE, "", "", e.getMessage(), e);
}
}
}
close();
} | 3.68 |
hudi_KafkaOffsetGen_checkTopicExists | /**
* Check if topic exists.
* @param consumer kafka consumer
* @return
*/
public boolean checkTopicExists(KafkaConsumer consumer) {
Map<String, List<PartitionInfo>> result = consumer.listTopics();
return result.containsKey(topicName);
} | 3.68 |
pulsar_MessageDeduplication_purgeInactiveProducers | /**
* Remove from hash maps all the producers that were inactive for more than the configured amount of time.
*/
public synchronized void purgeInactiveProducers() {
long minimumActiveTimestamp = System.currentTimeMillis() - TimeUnit.MINUTES
.toMillis(pulsar.getConfiguration().getBrokerDeduplicationProducerInactivityTimeoutMinutes());
Iterator<Map.Entry<String, Long>> mapIterator = inactiveProducers.entrySet().iterator();
boolean hasInactive = false;
while (mapIterator.hasNext()) {
java.util.Map.Entry<String, Long> entry = mapIterator.next();
String producerName = entry.getKey();
long lastActiveTimestamp = entry.getValue();
if (lastActiveTimestamp < minimumActiveTimestamp) {
log.info("[{}] Purging dedup information for producer {}", topic.getName(), producerName);
mapIterator.remove();
highestSequencedPushed.remove(producerName);
highestSequencedPersisted.remove(producerName);
hasInactive = true;
}
}
if (hasInactive && isEnabled()) {
takeSnapshot(getManagedCursor().getMarkDeletedPosition());
}
} | 3.68 |
hadoop_AbstractDelegationTokenBinding_getDescription | /**
* Return a description.
* This is logged during after service start and binding:
* it should be as informative as possible.
* @return a description to log.
*/
public String getDescription() {
return "Token binding " + getKind().toString();
} | 3.68 |
framework_VTwinColSelect_updateCaptions | /**
* Updates the captions above the left (options) and right (selections)
* columns. {code null} value clear the caption.
*
* @param leftCaption
* the left caption to set, or {@code null} to clear
* @param rightCaption
* the right caption to set, or {@code null} to clear
*/
public void updateCaptions(String leftCaption, String rightCaption) {
boolean hasCaptions = leftCaption != null || rightCaption != null;
if (leftCaption == null) {
removeOptionsCaption();
} else {
getOptionsCaption().setText(leftCaption);
}
if (rightCaption == null) {
removeSelectionsCaption();
} else {
getSelectionsCaption().setText(rightCaption);
}
captionWrapper.setVisible(hasCaptions);
} | 3.68 |
framework_Cell_getRow | /**
* Returns the index of the row the cell resides in.
*
* @return the row index
*
*/
public int getRow() {
return row;
} | 3.68 |
hbase_StoreFileReader_getStoreFileScanner | /**
* Get a scanner to scan over this StoreFile.
* @param cacheBlocks should this scanner cache blocks?
* @param pread use pread (for highly concurrent small readers)
* @param isCompaction is scanner being used for compaction?
* @param scannerOrder Order of this scanner relative to other scanners. See
* {@link KeyValueScanner#getScannerOrder()}.
* @param canOptimizeForNonNullColumn {@code true} if we can make sure there is no null column,
* otherwise {@code false}. This is a hint for optimization.
* @return a scanner
*/
public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean pread,
boolean isCompaction, long readPt, long scannerOrder, boolean canOptimizeForNonNullColumn) {
return new StoreFileScanner(this, getScanner(cacheBlocks, pread, isCompaction), !isCompaction,
reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn,
reader.getDataBlockEncoding() == DataBlockEncoding.ROW_INDEX_V1);
} | 3.68 |
hibernate-validator_AbstractMethodOverrideCheck_collectOverriddenMethodsInInterfaces | /**
* Collect overridden methods in the interfaces of a given type.
*
* @param overridingMethod the method for which we want to find the overridden methods
* @param currentTypeElement the class we are currently analyzing
* @param methodInheritanceTreeBuilder the method inheritance tree builder
*/
private void collectOverriddenMethodsInInterfaces(ExecutableElement overridingMethod, TypeElement currentTypeElement,
MethodInheritanceTree.Builder methodInheritanceTreeBuilder) {
for ( TypeMirror implementedInterface : currentTypeElement.getInterfaces() ) {
TypeElement interfaceTypeElement = (TypeElement) typeUtils.asElement( implementedInterface );
ExecutableElement overriddenMethod = getOverriddenMethod( overridingMethod, interfaceTypeElement );
ExecutableElement newOverridingMethod;
if ( overriddenMethod != null ) {
methodInheritanceTreeBuilder.addOverriddenMethod( overridingMethod, overriddenMethod );
newOverridingMethod = overriddenMethod;
}
else {
newOverridingMethod = overridingMethod;
}
collectOverriddenMethodsInInterfaces( newOverridingMethod, interfaceTypeElement, methodInheritanceTreeBuilder );
}
} | 3.68 |
hadoop_TaskPool_onFailure | /**
* Task to invoke on failure.
* @param task task
* @return the builder
*/
public Builder<I> onFailure(FailureTask<I, ?> task) {
this.onFailure = task;
return this;
} | 3.68 |
hbase_RegionStateNode_isInState | /**
* Notice that, we will return true if {@code expected} is empty.
* <p/>
* This is a bit strange but we need this logic, for example, we can change the state to OPENING
* from any state, as in SCP we will not change the state to CLOSED before opening the region.
*/
public boolean isInState(State... expected) {
if (expected.length == 0) {
return true;
}
return getState().matches(expected);
} | 3.68 |
hbase_TableMapReduceUtil_getJar | /**
* Invoke 'getJar' on a custom JarFinder implementation. Useful for some job configuration
* contexts (HBASE-8140) and also for testing on MRv2. check if we have HADOOP-9426.
* @param my_class the class to find.
* @return a jar file that contains the class, or null.
*/
private static String getJar(Class<?> my_class) {
String ret = null;
try {
ret = JarFinder.getJar(my_class);
} catch (Exception e) {
// toss all other exceptions, related to reflection failure
throw new RuntimeException("getJar invocation failed.", e);
}
return ret;
} | 3.68 |
hudi_BaseHoodieWriteClient_commitCompaction | /**
* Commit a compaction operation. Allow passing additional meta-data to be stored in commit instant file.
*
* @param compactionInstantTime Compaction Instant Time
* @param metadata All the metadata that gets stored along with a commit
* @param extraMetadata Extra Metadata to be stored
*/
public void commitCompaction(String compactionInstantTime, HoodieCommitMetadata metadata,
Option<Map<String, String>> extraMetadata) {
tableServiceClient.commitCompaction(compactionInstantTime, metadata, extraMetadata);
} | 3.68 |
framework_VaadinService_getDeploymentConfiguration | /**
* Gets the deployment configuration. Should be overridden (or otherwise
* intercepted) if the no-arg constructor is used in order to prevent NPEs.
*
* @return the deployment configuration
*/
public DeploymentConfiguration getDeploymentConfiguration() {
return deploymentConfiguration;
} | 3.68 |
hbase_ScanQueryMatcher_setToNewRow | /**
* Set the row when there is change in row
*/
public void setToNewRow(Cell currentRow) {
this.currentRow = currentRow;
columns.reset();
reset();
} | 3.68 |
flink_KeyContextHandler_hasKeyContext | /**
* Whether the {@link Input} has "KeyContext". If false, we can omit the call of {@link
* Input#setKeyContextElement} for each record.
*
* @return True if the {@link Input} has "KeyContext", false otherwise.
*/
default boolean hasKeyContext() {
return hasKeyContext1();
} | 3.68 |
framework_ColumnVisibilityChangeEvent_getColumn | /**
* Returns the column where the visibility change occurred.
*
* @return the column where the visibility change occurred.
*/
public Column<?, T> getColumn() {
return column;
} | 3.68 |
morf_SchemaBean_tableExists | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.metadata.Schema#tableExists(java.lang.String)
*/
@Override
public boolean tableExists(String name) {
return tables.containsKey(name.toUpperCase());
} | 3.68 |
dubbo_MultiValueConverter_convertIfPossible | /**
* @deprecated will be removed in 3.3.0
*/
@Deprecated
static <T> T convertIfPossible(Object source, Class<?> multiValueType, Class<?> elementType) {
Class<?> sourceType = source.getClass();
MultiValueConverter converter = find(sourceType, multiValueType);
if (converter != null) {
return (T) converter.convert(source, multiValueType, elementType);
}
return null;
} | 3.68 |
framework_Escalator_getLogicalRowIndex | /**
* Gets the logical row index for the given table row element.
*
* @param tr
* the table row element inside this container
* @return the logical index of the given element
*/
public int getLogicalRowIndex(final TableRowElement tr) {
return tr.getSectionRowIndex();
} | 3.68 |
framework_AbstractComponent_setReadOnly | /**
* Sets the read-only status in the state of this {@code AbstractComponent}.
* This method should be made public in {@link Component Components} that
* implement {@link HasValue}.
*
* @param readOnly
* a boolean value specifying whether the component is put
* read-only mode or not
*/
protected void setReadOnly(boolean readOnly) {
if (getState(false) instanceof AbstractFieldState) {
if (readOnly != isReadOnly()) {
((AbstractFieldState) getState()).readOnly = readOnly;
}
} else {
throw new IllegalStateException(
"This component does not support the read-only mode, since state is of type "
+ getStateType().getSimpleName()
+ " and does not inherit "
+ AbstractFieldState.class.getSimpleName());
}
} | 3.68 |
shardingsphere-elasticjob_HttpParam_isWriteMethod | /**
* Is write method.
*
* @return write method or not
*/
public boolean isWriteMethod() {
return Arrays.asList("POST", "PUT", "DELETE").contains(method.toUpperCase());
} | 3.68 |
hbase_RegionState_isReadyToOnline | /**
* Check if a region state can transition to online
*/
public boolean isReadyToOnline() {
return isOpened() || isSplittingNew() || isMergingNew();
} | 3.68 |
dubbo_AbstractClusterInvoker_select | /**
* Select a invoker using loadbalance policy.</br>
* a) Firstly, select an invoker using loadbalance. If this invoker is in previously selected list, or,
* if this invoker is unavailable, then continue step b (reselect), otherwise return the first selected invoker</br>
* <p>
* b) Reselection, the validation rule for reselection: selected > available. This rule guarantees that
* the selected invoker has the minimum chance to be one in the previously selected list, and also
* guarantees this invoker is available.
*
* @param loadbalance load balance policy
* @param invocation invocation
* @param invokers invoker candidates
* @param selected exclude selected invokers or not
* @return the invoker which will final to do invoke.
* @throws RpcException exception
*/
protected Invoker<T> select(
LoadBalance loadbalance, Invocation invocation, List<Invoker<T>> invokers, List<Invoker<T>> selected)
throws RpcException {
if (CollectionUtils.isEmpty(invokers)) {
return null;
}
String methodName = invocation == null ? StringUtils.EMPTY_STRING : RpcUtils.getMethodName(invocation);
boolean sticky =
invokers.get(0).getUrl().getMethodParameter(methodName, CLUSTER_STICKY_KEY, DEFAULT_CLUSTER_STICKY);
// ignore overloaded method
if (stickyInvoker != null && !invokers.contains(stickyInvoker)) {
stickyInvoker = null;
}
// ignore concurrency problem
if (sticky && stickyInvoker != null && (selected == null || !selected.contains(stickyInvoker))) {
if (availableCheck && stickyInvoker.isAvailable()) {
return stickyInvoker;
}
}
Invoker<T> invoker = doSelect(loadbalance, invocation, invokers, selected);
if (sticky) {
stickyInvoker = invoker;
}
return invoker;
} | 3.68 |
hbase_RegionStates_createRegionStateNode | // ==========================================================================
// RegionStateNode helpers
// ==========================================================================
RegionStateNode createRegionStateNode(RegionInfo regionInfo) {
synchronized (regionsMapLock) {
RegionStateNode node = regionsMap.computeIfAbsent(regionInfo.getRegionName(),
key -> new RegionStateNode(regionInfo, regionInTransition));
if (encodedRegionsMap.get(regionInfo.getEncodedName()) != node) {
encodedRegionsMap.put(regionInfo.getEncodedName(), node);
}
return node;
}
} | 3.68 |
framework_VTabsheet_scrollAccordingToScrollTarget | /**
* Scroll the tab bar according to the last scrollTarget.
*
* @param scrollTarget
* the scroll button that was pressed
*/
private void scrollAccordingToScrollTarget(
com.google.gwt.dom.client.Element scrollTarget) {
if (scrollTarget == null) {
return;
}
int newFirstIndex = -1;
// Scroll left.
if (hasScrolledTabs() && scrollTarget == scrollerPrev) {
newFirstIndex = tb.scrollLeft(scrollerIndex);
// Scroll right.
} else if (hasClippedTabs() && scrollTarget == scrollerNext) {
newFirstIndex = tb.scrollRight(scrollerIndex);
}
if (newFirstIndex != -1) {
scrollerIndex = newFirstIndex;
Tab currentFirst = tb.getTab(newFirstIndex);
currentFirst.setStyleNames(scrollerIndex == activeTabIndex, true,
true);
scrollerPositionTabId = currentFirst.id;
updateTabScroller();
}
// scrolling updated first visible styles but only removed the previous
// focus style if the focused tab was also the first tab
if (selectionHandler.focusedTabIndex >= 0
&& selectionHandler.focusedTabIndex != scrollerIndex) {
tb.getTab(selectionHandler.focusedTabIndex).setStyleNames(
selectionHandler.focusedTabIndex == activeTabIndex, false);
}
// For this to work well, make sure the method gets called only from
// user events.
selectionHandler.focusTabAtIndex(scrollerIndex);
/*
* Update the bookkeeping or the next keyboard navigation starts from
* the wrong tab.
*
* Note: unusually, this can move the focusedTabIndex to point to a
* disabled tab. We could add more logic that only focuses an
* unselectable first tab if there are no selectable tabs in view at
* all, but for now it's left like this for simplicity. Another option
* would be to put canSelectTab(scrollerIndex) around both of these
* lines, but that would have more impact on the experienced behavior
* (using only keyboard or only the arrow buttons seems more likely than
* mixing them up actively).
*/
selectionHandler.focusedTabIndex = scrollerIndex;
} | 3.68 |
hbase_Encryption_getSupportedCiphers | /**
* Get names of supported encryption algorithms
* @return Array of strings, each represents a supported encryption algorithm
*/
public static String[] getSupportedCiphers(Configuration conf) {
return getCipherProvider(conf).getSupportedCiphers();
} | 3.68 |
querydsl_StringExpression_toUpperCase | /**
* Create a {@code this.toUpperCase()} expression
*
* <p>Get the upper case form</p>
*
* @return this.toUpperCase()
* @see java.lang.String#toUpperCase()
*/
public StringExpression toUpperCase() {
return upper();
} | 3.68 |
flink_CatalogManager_resolveCatalogTable | /** Resolves a {@link CatalogTable} to a validated {@link ResolvedCatalogTable}. */
public ResolvedCatalogTable resolveCatalogTable(CatalogTable table) {
Preconditions.checkNotNull(schemaResolver, "Schema resolver is not initialized.");
if (table instanceof ResolvedCatalogTable) {
return (ResolvedCatalogTable) table;
}
final ResolvedSchema resolvedSchema = table.getUnresolvedSchema().resolve(schemaResolver);
// Validate partition keys are included in physical columns
final List<String> physicalColumns =
resolvedSchema.getColumns().stream()
.filter(Column::isPhysical)
.map(Column::getName)
.collect(Collectors.toList());
table.getPartitionKeys()
.forEach(
partitionKey -> {
if (!physicalColumns.contains(partitionKey)) {
throw new ValidationException(
String.format(
"Invalid partition key '%s'. A partition key must "
+ "reference a physical column in the schema. "
+ "Available columns are: %s",
partitionKey, physicalColumns));
}
});
return new ResolvedCatalogTable(table, resolvedSchema);
} | 3.68 |
hadoop_MountResponse_writeMNTResponse | /**
* Response for RPC call {@link MountInterface.MNTPROC#MNT}.
* @param status status of mount response
* @param xdr XDR message object
* @param xid transaction id
* @param handle file handle
* @return response XDR
*/
public static XDR writeMNTResponse(int status, XDR xdr, int xid,
byte[] handle) {
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
xdr.writeInt(status);
if (status == MNT_OK) {
xdr.writeVariableOpaque(handle);
// Only MountV3 returns a list of supported authFlavors
xdr.writeInt(1);
xdr.writeInt(AuthFlavor.AUTH_SYS.getValue());
}
return xdr;
} | 3.68 |
framework_TwinColSelect_setRows | /**
* Sets the number of rows in the editor. If the number of rows is set to 0,
* the actual number of displayed rows is determined implicitly by the
* adapter.
* <p>
* If a height if set (using {@link #setHeight(String)} or
* {@link #setHeight(float, int)}) it overrides the number of rows. Leave
* the height undefined to use this method. This is the opposite of how
* {@link #setColumns(int)} work.
*
*
* @param rows
* the number of rows to set.
*/
public void setRows(int rows) {
if (rows < 0) {
rows = 0;
}
if (this.rows != rows) {
this.rows = rows;
markAsDirty();
}
} | 3.68 |
hadoop_IOStatisticsBinding_measureDurationOfInvocation | /**
* Given an IOException raising callable/lambda expression,
* execute it and update the relevant statistic,
* returning the measured duration.
*
* {@link #trackDurationOfInvocation(DurationTrackerFactory, String, InvocationRaisingIOE)}
* with the duration returned for logging etc.; added as a new
* method to avoid linking problems with any code calling the existing
* method.
*
* @param factory factory of duration trackers
* @param statistic statistic key
* @param input input callable.
* @return the duration of the operation, as measured by the duration tracker.
* @throws IOException IO failure.
*/
public static Duration measureDurationOfInvocation(
DurationTrackerFactory factory,
String statistic,
InvocationRaisingIOE input) throws IOException {
// create the tracker outside try-with-resources so
// that failures can be set in the catcher.
DurationTracker tracker = createTracker(factory, statistic);
try {
// exec the input function and return its value
input.apply();
} catch (IOException | RuntimeException e) {
// input function failed: note it
tracker.failed();
// and rethrow
throw e;
} finally {
// update the tracker.
// this is called after the catch() call would have
// set the failed flag.
tracker.close();
}
return tracker.asDuration();
} | 3.68 |
hbase_Mutation_getACL | /** Returns The serialized ACL for this operation, or null if none */
public byte[] getACL() {
return getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
} | 3.68 |
framework_AbstractInMemoryContainer_getFilters | /**
* Returns the internal collection of filters. The returned collection
* should not be modified by callers outside this class.
*
* @return Set<Filter>
*/
protected Set<Filter> getFilters() {
return filters;
} | 3.68 |
hadoop_ServiceRecord_attributes | /**
* The map of "other" attributes set when parsing. These
* are not included in the JSON value of this record when it
* is generated.
* @return a map of any unknown attributes in the deserialized JSON.
*/
@JsonAnyGetter
public Map<String, String> attributes() {
return attributes;
} | 3.68 |
hadoop_PeriodicRLESparseResourceAllocation_getCapacityAtTime | /**
* Get capacity at time based on periodic repetition.
*
* @param tick UTC time for which the allocated {@link Resource} is queried.
* @return {@link Resource} allocated at specified time
*/
public Resource getCapacityAtTime(long tick) {
long convertedTime = (tick % timePeriod);
return super.getCapacityAtTime(convertedTime);
} | 3.68 |
AreaShop_GeneralRegion_restrictedToWorld | /**
* Check if for renting you need to be in the correct world.
* @return true if you need to be in the same world as the region, otherwise false
*/
public boolean restrictedToWorld() {
return getBooleanSetting("general.restrictedToWorld") || restrictedToRegion();
} | 3.68 |
morf_FieldLiteral_hashCode | /**
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
return new HashCodeBuilder()
.appendSuper(super.hashCode())
.append(this.value)
.append(this.dataType)
.toHashCode();
} | 3.68 |
flink_JoinNode_getOperator | /**
* Gets the contract object for this match node.
*
* @return The contract.
*/
@Override
public InnerJoinOperatorBase<?, ?, ?, ?> getOperator() {
return (InnerJoinOperatorBase<?, ?, ?, ?>) super.getOperator();
} | 3.68 |
framework_UIDL_getMapAttribute | /**
* Gets the named attribute as a Map of named values (key/value pairs).
*
* @param name
* the name of the attribute to get
* @return the attribute Map
*/
public ValueMap getMapAttribute(String name) {
return attr().getValueMap(name);
} | 3.68 |
dubbo_AbstractConnectionClient_getCounter | /**
* Get counter
*/
public long getCounter() {
return COUNTER_UPDATER.get(this);
} | 3.68 |
framework_MenuBarsWithNesting_createSecondMenuBar | /*
* Returns a menu bar containing items with icons. The last menu item is
* nested and its submenu contains items with and without icons.
*/
private MenuBar createSecondMenuBar() {
MenuBar menuBar = new MenuBar();
int n = itemNames.length;
for (int i = 0; i < n - 1; i++) {
menuBar.addItem(itemNames[i], itemIcons[i], selectionCommand);
}
MenuItem last = menuBar.addItem(itemNames[n - 1], itemIcons[n - 1],
null);
for (int i = 0; i < nestedItemnames.length; i++) {
last.addItem(nestedItemnames[i], nestedItemIcons[i],
selectionCommand);
}
return menuBar;
} | 3.68 |
morf_AliasedField_eq | /**
* @param value value to compare to
* @return criteria for equality of this field to value.
*/
public Criterion eq(Object value) {
return Criterion.eq(this, value);
} | 3.68 |
morf_AliasedField_immutableDslEnabled | /**
* TODO remove when we remove the old mutable behaviour
*
* @return true if immutable builder behaviour is enabled.
*/
public static boolean immutableDslEnabled() {
if (isImmutableBuilderEnabledThreadLocalOverride.get() != null) {
return isImmutableBuilderEnabledThreadLocalOverride.get();
}
return Boolean.TRUE.toString()
.equalsIgnoreCase(System.getProperty("AliasedField.IMMUTABLE_DSL_ENABLED"));
} | 3.68 |
flink_FlinkContainers_getTaskManagers | /** Gets TaskManager containers. */
public List<GenericContainer<?>> getTaskManagers() {
return this.taskManagers;
} | 3.68 |
hadoop_ServletUtil_getParameter | /**
* Get a parameter from a ServletRequest.
* Return null if the parameter contains only white spaces.
*
* @param request request.
* @param name name.
* @return get a parameter from a ServletRequest.
*/
public static String getParameter(ServletRequest request, String name) {
String s = request.getParameter(name);
if (s == null) {
return null;
}
s = s.trim();
return s.length() == 0? null: s;
} | 3.68 |
hbase_CellUtil_isDelete | /**
* Return true if a delete type, a {@link KeyValue.Type#Delete} or a {KeyValue.Type#DeleteFamily}
* or a {@link KeyValue.Type#DeleteColumn} KeyValue type.
*/
@SuppressWarnings("deprecation")
public static boolean isDelete(final Cell cell) {
return PrivateCellUtil.isDelete(cell.getTypeByte());
} | 3.68 |
graphhopper_ArrayUtil_transform | /**
* Maps one array using another, i.e. every element arr[x] is replaced by map[arr[x]]
*/
public static void transform(IntIndexedContainer arr, IntIndexedContainer map) {
for (int i = 0; i < arr.size(); ++i)
arr.set(i, map.get(arr.get(i)));
} | 3.68 |
hbase_ClusterConnectionFactory_createAsyncClusterConnection | /**
* Create a new {@link AsyncClusterConnection} instance to be used at server side where we have a
* {@link ConnectionRegistryEndpoint}.
*/
public static AsyncClusterConnection createAsyncClusterConnection(
ConnectionRegistryEndpoint endpoint, Configuration conf, SocketAddress localAddress, User user)
throws IOException {
ShortCircuitConnectionRegistry registry = new ShortCircuitConnectionRegistry(endpoint);
return createAsyncClusterConnection(conf, registry, localAddress, user);
} | 3.68 |
AreaShop_SignsFeature_getSignLocations | /**
* Get a list with all sign locations.
* @return A List with all sign locations
*/
public List<Location> getSignLocations() {
List<Location> result = new ArrayList<>();
for(RegionSign sign : signs.values()) {
result.add(sign.getLocation());
}
return result;
} | 3.68 |
morf_OracleMetaDataProvider_viewExists | /**
* @see org.alfasoftware.morf.metadata.Schema#viewExists(java.lang.String)
*/
@Override
public boolean viewExists(String name) {
return viewMap().containsKey(name.toUpperCase());
} | 3.68 |
hadoop_ResourceRequest_setExecutionTypeRequest | /**
* Set the <code>ExecutionTypeRequest</code> of the requested container.
*
* @param execSpec
* ExecutionTypeRequest of the requested container
*/
@Public
@Evolving
public void setExecutionTypeRequest(ExecutionTypeRequest execSpec) {
throw new UnsupportedOperationException();
} | 3.68 |
hadoop_SliderFileSystem_deleteComponentsVersionDirIfEmpty | /**
* Deletes the components version directory.
*
* @param serviceVersion
* @throws IOException
*/
public void deleteComponentsVersionDirIfEmpty(String serviceVersion)
throws IOException {
Path path = new Path(new Path(getAppDir(), "components"), serviceVersion);
if (fileSystem.exists(path) && fileSystem.listStatus(path).length == 0) {
fileSystem.delete(path, true);
LOG.info("deleted dir {}", path);
}
Path publicResourceDir = new Path(new Path(getBasePath(),
getAppDir().getName() + "/" + "components"), serviceVersion);
if (fileSystem.exists(publicResourceDir)
&& fileSystem.listStatus(publicResourceDir).length == 0) {
fileSystem.delete(publicResourceDir, true);
LOG.info("deleted public resource dir {}", publicResourceDir);
}
} | 3.68 |
flink_DataStructureConverter_toInternalOrNull | /**
* Converts to internal data structure or {@code null}.
*
* <p>The nullability could be derived from the data type. However, this method reduces null
* checks.
*/
default I toInternalOrNull(E external) {
if (external == null) {
return null;
}
return toInternal(external);
} | 3.68 |
flink_TimeWindow_intersects | /** Returns {@code true} if this window intersects the given window. */
public boolean intersects(TimeWindow other) {
return this.start <= other.end && this.end >= other.start;
} | 3.68 |
hbase_RequestConverter_buildGetSpaceQuotaSnapshotsRequest | /**
* Returns a {@link GetSpaceQuotaSnapshotsRequest} object.
*/
public static GetSpaceQuotaSnapshotsRequest buildGetSpaceQuotaSnapshotsRequest() {
return GetSpaceQuotaSnapshotsRequest.getDefaultInstance();
} | 3.68 |
querydsl_GroupBy_min | /**
* Create a new aggregating min expression
*
* @param expression expression for which the minimum value will be used in the group by projection
* @return wrapper expression
*/
public static <E extends Comparable<? super E>> AbstractGroupExpression<E, E> min(Expression<E> expression) {
return new GMin<E>(expression);
} | 3.68 |
hadoop_ServiceLauncher_warn | /**
* Print a warning message.
* <p>
* This tries to log to the log's warn() operation.
* If the log at that level is disabled it logs to system error
* @param text warning text
*/
protected void warn(String text) {
if (LOG.isWarnEnabled()) {
LOG.warn(text);
} else {
System.err.println(text);
}
} | 3.68 |
hudi_HoodieStreamerUtils_createHoodieRecords | /**
* Generates HoodieRecords for the avro data read from source.
* Takes care of dropping columns, precombine, auto key generation.
* Both AVRO and SPARK record types are supported.
*/
static Option<JavaRDD<HoodieRecord>> createHoodieRecords(HoodieStreamer.Config cfg, TypedProperties props, Option<JavaRDD<GenericRecord>> avroRDDOptional,
SchemaProvider schemaProvider, HoodieRecord.HoodieRecordType recordType, boolean autoGenerateRecordKeys,
String instantTime) {
boolean shouldCombine = cfg.filterDupes || cfg.operation.equals(WriteOperationType.UPSERT);
Set<String> partitionColumns = getPartitionColumns(props);
return avroRDDOptional.map(avroRDD -> {
JavaRDD<HoodieRecord> records;
SerializableSchema avroSchema = new SerializableSchema(schemaProvider.getTargetSchema());
SerializableSchema processedAvroSchema = new SerializableSchema(isDropPartitionColumns(props) ? HoodieAvroUtils.removeMetadataFields(avroSchema.get()) : avroSchema.get());
if (recordType == HoodieRecord.HoodieRecordType.AVRO) {
records = avroRDD.mapPartitions(
(FlatMapFunction<Iterator<GenericRecord>, HoodieRecord>) genericRecordIterator -> {
if (autoGenerateRecordKeys) {
props.setProperty(KeyGenUtils.RECORD_KEY_GEN_PARTITION_ID_CONFIG, String.valueOf(TaskContext.getPartitionId()));
props.setProperty(KeyGenUtils.RECORD_KEY_GEN_INSTANT_TIME_CONFIG, instantTime);
}
BuiltinKeyGenerator builtinKeyGenerator = (BuiltinKeyGenerator) HoodieSparkKeyGeneratorFactory.createKeyGenerator(props);
List<HoodieRecord> avroRecords = new ArrayList<>();
while (genericRecordIterator.hasNext()) {
GenericRecord genRec = genericRecordIterator.next();
HoodieKey hoodieKey = new HoodieKey(builtinKeyGenerator.getRecordKey(genRec), builtinKeyGenerator.getPartitionPath(genRec));
GenericRecord gr = isDropPartitionColumns(props) ? HoodieAvroUtils.removeFields(genRec, partitionColumns) : genRec;
HoodieRecordPayload payload = shouldCombine ? DataSourceUtils.createPayload(cfg.payloadClassName, gr,
(Comparable) HoodieAvroUtils.getNestedFieldVal(gr, cfg.sourceOrderingField, false, props.getBoolean(
KeyGeneratorOptions.KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED.key(),
Boolean.parseBoolean(KeyGeneratorOptions.KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED.defaultValue()))))
: DataSourceUtils.createPayload(cfg.payloadClassName, gr);
avroRecords.add(new HoodieAvroRecord<>(hoodieKey, payload));
}
return avroRecords.iterator();
});
} else if (recordType == HoodieRecord.HoodieRecordType.SPARK) {
// TODO we should remove it if we can read InternalRow from source.
records = avroRDD.mapPartitions(itr -> {
if (autoGenerateRecordKeys) {
props.setProperty(KeyGenUtils.RECORD_KEY_GEN_PARTITION_ID_CONFIG, String.valueOf(TaskContext.getPartitionId()));
props.setProperty(KeyGenUtils.RECORD_KEY_GEN_INSTANT_TIME_CONFIG, instantTime);
}
BuiltinKeyGenerator builtinKeyGenerator = (BuiltinKeyGenerator) HoodieSparkKeyGeneratorFactory.createKeyGenerator(props);
StructType baseStructType = AvroConversionUtils.convertAvroSchemaToStructType(processedAvroSchema.get());
StructType targetStructType = isDropPartitionColumns(props) ? AvroConversionUtils
.convertAvroSchemaToStructType(HoodieAvroUtils.removeFields(processedAvroSchema.get(), partitionColumns)) : baseStructType;
HoodieAvroDeserializer deserializer = SparkAdapterSupport$.MODULE$.sparkAdapter().createAvroDeserializer(processedAvroSchema.get(), baseStructType);
return new CloseableMappingIterator<>(ClosableIterator.wrap(itr), rec -> {
InternalRow row = (InternalRow) deserializer.deserialize(rec).get();
String recordKey = builtinKeyGenerator.getRecordKey(row, baseStructType).toString();
String partitionPath = builtinKeyGenerator.getPartitionPath(row, baseStructType).toString();
return new HoodieSparkRecord(new HoodieKey(recordKey, partitionPath),
HoodieInternalRowUtils.getCachedUnsafeProjection(baseStructType, targetStructType).apply(row), targetStructType, false);
});
});
} else {
throw new UnsupportedOperationException(recordType.name());
}
return records;
});
} | 3.68 |
querydsl_Expressions_numberPath | /**
* Create new Path expression
*
* @param type type of expression
* @param metadata path metadata
* @param <T> type of expression
* @return path expression
*/
public static <T extends Number & Comparable<?>> NumberPath<T> numberPath(Class<? extends T> type, PathMetadata metadata) {
return new NumberPath<T>(type, metadata);
} | 3.68 |
hbase_AsyncAdmin_replicationPeerModificationSwitch | /**
* Enable or disable replication peer modification.
* <p/>
* This is especially useful when you want to change the replication peer storage.
* @param on {@code true} means enable, otherwise disable
* @return the previous enable/disable state wrapped by a {@link CompletableFuture}
*/
default CompletableFuture<Boolean> replicationPeerModificationSwitch(boolean on) {
return replicationPeerModificationSwitch(on, false);
} | 3.68 |
flink_UserDefinedFunctionHelper_validateClass | /** Validates a {@link UserDefinedFunction} class for usage in the API. */
private static void validateClass(
Class<? extends UserDefinedFunction> functionClass,
boolean requiresDefaultConstructor) {
if (TableFunction.class.isAssignableFrom(functionClass)) {
validateNotSingleton(functionClass);
}
validateInstantiation(functionClass, requiresDefaultConstructor);
validateImplementationMethods(functionClass);
} | 3.68 |
hbase_TableSchemaModel___getIsMeta | /** Returns true if IS_META attribute exists and is truel */
public boolean __getIsMeta() {
Object o = attrs.get(IS_META);
return o != null && Boolean.parseBoolean(o.toString());
} | 3.68 |
flink_RpcEndpoint_close | /** Shutdown the {@link ScheduledThreadPoolExecutor} and remove all the pending tasks. */
@Override
public void close() {
if (!mainScheduledExecutor.isShutdown()) {
mainScheduledExecutor.shutdownNow();
}
} | 3.68 |
hadoop_NMStateStoreService_serviceInit | /** Initialize the state storage */
@Override
public void serviceInit(Configuration conf) throws IOException {
initStorage(conf);
} | 3.68 |
hbase_MasterObserver_preRestoreSnapshot | /**
* Called before a snapshot is restored. Called as part of restoreSnapshot RPC call.
* @param ctx the environment to interact with the framework and master
* @param snapshot the SnapshotDescriptor for the snapshot
* @param tableDescriptor the TableDescriptor of the table to restore
*/
default void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException {
} | 3.68 |
framework_TreeFilesystem_nodeExpand | /**
* Handle tree expand event, populate expanded node's childs with new files
* and directories.
*/
@Override
public void nodeExpand(ExpandEvent event) {
final Item i = tree.getItem(event.getItemId());
if (!tree.hasChildren(i)) {
// populate tree's node which was expanded
populateNode(event.getItemId().toString(), event.getItemId());
}
} | 3.68 |
flink_XORShiftRandom_next | /**
* All other methods like nextInt()/nextDouble()... depends on this, so we just need to
* overwrite this.
*
* @param bits Random bits
* @return The next pseudorandom value from this random number generator's sequence
*/
@Override
public int next(int bits) {
long nextSeed = seed ^ (seed << 21);
nextSeed ^= (nextSeed >>> 35);
nextSeed ^= (nextSeed << 4);
seed = nextSeed;
return (int) (nextSeed & ((1L << bits) - 1));
} | 3.68 |
hbase_AsyncAdmin_getRegionServers | /** Returns current live region servers list wrapped by {@link CompletableFuture} */
default CompletableFuture<Collection<ServerName>> getRegionServers() {
return getClusterMetrics(EnumSet.of(Option.SERVERS_NAME))
.thenApply(ClusterMetrics::getServersName);
} | 3.68 |
framework_VComboBox_getIconUri | /**
* Get the URI of the icon. Used when constructing the displayed option.
*
* @return real (translated) icon URI or null if none
*/
public String getIconUri() {
ApplicationConnection client = connector.getConnection();
return client.translateVaadinUri(untranslatedIconUri);
} | 3.68 |
flink_Tuple15_toString | /**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13, f14), where the individual fields are the value returned by calling
* {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "("
+ StringUtils.arrayAwareToString(this.f0)
+ ","
+ StringUtils.arrayAwareToString(this.f1)
+ ","
+ StringUtils.arrayAwareToString(this.f2)
+ ","
+ StringUtils.arrayAwareToString(this.f3)
+ ","
+ StringUtils.arrayAwareToString(this.f4)
+ ","
+ StringUtils.arrayAwareToString(this.f5)
+ ","
+ StringUtils.arrayAwareToString(this.f6)
+ ","
+ StringUtils.arrayAwareToString(this.f7)
+ ","
+ StringUtils.arrayAwareToString(this.f8)
+ ","
+ StringUtils.arrayAwareToString(this.f9)
+ ","
+ StringUtils.arrayAwareToString(this.f10)
+ ","
+ StringUtils.arrayAwareToString(this.f11)
+ ","
+ StringUtils.arrayAwareToString(this.f12)
+ ","
+ StringUtils.arrayAwareToString(this.f13)
+ ","
+ StringUtils.arrayAwareToString(this.f14)
+ ")";
} | 3.68 |
hbase_AuthUtil_getAuthRenewalChore | /**
* Checks if security is enabled and if so, launches chore for refreshing kerberos ticket.
* @return a ScheduledChore for renewals.
*/
@InterfaceAudience.Private
public static ScheduledChore getAuthRenewalChore(final UserGroupInformation user,
Configuration conf) {
if (!user.hasKerberosCredentials() || !isAuthRenewalChoreEnabled(conf)) {
return null;
}
Stoppable stoppable = createDummyStoppable();
// if you're in debug mode this is useful to avoid getting spammed by the getTGT()
// you can increase this, keeping in mind that the default refresh window is 0.8
// e.g. 5min tgt * 0.8 = 4min refresh so interval is better be way less than 1min
final int CHECK_TGT_INTERVAL = 30 * 1000; // 30sec
return new ScheduledChore("RefreshCredentials", stoppable, CHECK_TGT_INTERVAL) {
@Override
protected void chore() {
try {
user.checkTGTAndReloginFromKeytab();
} catch (IOException e) {
LOG.error("Got exception while trying to refresh credentials: " + e.getMessage(), e);
}
}
};
} | 3.68 |
hbase_WALSplitUtil_getMutationsFromWALEntry | /**
* This function is used to construct mutations from a WALEntry. It also reconstructs WALKey &
* WALEdit from the passed in WALEntry
* @param logEntry pair of WALKey and WALEdit instance stores WALKey and WALEdit instances
* extracted from the passed in WALEntry.
* @return list of Pair<MutationType, Mutation> to be replayed
* @deprecated Since 3.0.0, will be removed in 4.0.0.
*/
@Deprecated
public static List<MutationReplay> getMutationsFromWALEntry(AdminProtos.WALEntry entry,
CellScanner cells, Pair<WALKey, WALEdit> logEntry, Durability durability) throws IOException {
if (entry == null) {
// return an empty array
return Collections.emptyList();
}
long replaySeqId = (entry.getKey().hasOrigSequenceNumber())
? entry.getKey().getOrigSequenceNumber()
: entry.getKey().getLogSequenceNumber();
int count = entry.getAssociatedCellCount();
List<MutationReplay> mutations = new ArrayList<>();
Cell previousCell = null;
Mutation m = null;
WALKeyImpl key = null;
WALEdit val = null;
if (logEntry != null) {
val = new WALEdit();
}
for (int i = 0; i < count; i++) {
// Throw index out of bounds if our cell count is off
if (!cells.advance()) {
throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
}
Cell cell = cells.current();
if (val != null) {
val.add(cell);
}
boolean isNewRowOrType =
previousCell == null || previousCell.getTypeByte() != cell.getTypeByte()
|| !CellUtil.matchingRows(previousCell, cell);
if (isNewRowOrType) {
// Create new mutation
if (CellUtil.isDelete(cell)) {
m = new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
// Deletes don't have nonces.
mutations.add(new MutationReplay(ClientProtos.MutationProto.MutationType.DELETE, m,
HConstants.NO_NONCE, HConstants.NO_NONCE));
} else {
m = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
// Puts might come from increment or append, thus we need nonces.
long nonceGroup =
entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;
long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
mutations.add(
new MutationReplay(ClientProtos.MutationProto.MutationType.PUT, m, nonceGroup, nonce));
}
}
if (CellUtil.isDelete(cell)) {
((Delete) m).add(cell);
} else {
((Put) m).add(cell);
}
m.setDurability(durability);
previousCell = cell;
}
// reconstruct WALKey
if (logEntry != null) {
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey walKeyProto =
entry.getKey();
List<UUID> clusterIds = new ArrayList<>(walKeyProto.getClusterIdsCount());
for (HBaseProtos.UUID uuid : entry.getKey().getClusterIdsList()) {
clusterIds.add(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits()));
}
key = new WALKeyImpl(walKeyProto.getEncodedRegionName().toByteArray(),
TableName.valueOf(walKeyProto.getTableName().toByteArray()), replaySeqId,
walKeyProto.getWriteTime(), clusterIds, walKeyProto.getNonceGroup(), walKeyProto.getNonce(),
null);
logEntry.setFirst(key);
logEntry.setSecond(val);
}
return mutations;
} | 3.68 |
framework_FocusableFlexTable_addFocusHandler | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.HasFocusHandlers#addFocusHandler(com.
* google.gwt.event.dom.client.FocusHandler)
*/
@Override
public HandlerRegistration addFocusHandler(FocusHandler handler) {
return addDomHandler(handler, FocusEvent.getType());
} | 3.68 |
morf_MergeStatement_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "SQL MERGE INTO [" + table + "] USING [" + selectStatement + "] KEY [" + tableUniqueKey + "] IF UPDATING [" + ifUpdating + "]";
} | 3.68 |
flink_BatchShuffleReadBufferPool_destroy | /** Destroys this buffer pool and after which, no buffer can be allocated any more. */
public void destroy() {
synchronized (buffers) {
destroyed = true;
buffers.clear();
buffers.notifyAll();
}
} | 3.68 |
hbase_IncrementingEnvironmentEdge_currentTime | /**
* {@inheritDoc}
* <p>
* This method increments a known value for the current time each time this method is called. The
* first value is 1.
* </p>
*/
@Override
public synchronized long currentTime() {
return timeIncrement++;
} | 3.68 |
framework_ContainerHierarchicalWrapper_addItem | /**
* Adds a new Item by its ID to the underlying container and to the
* hierarchy.
*
* @param itemId
* the ID of the Item to be created.
* @return the added Item or <code>null</code> if the operation failed.
* @throws UnsupportedOperationException
* if the addItem is not supported.
*/
@Override
public Item addItem(Object itemId) throws UnsupportedOperationException {
// Null ids are not accepted
if (itemId == null) {
return null;
}
final Item item = container.addItem(itemId);
if (!hierarchical && item != null) {
addToHierarchyWrapper(itemId);
}
return item;
} | 3.68 |
framework_HierarchyMapper_getHierarchy | /**
* Getst hte full hierarchy tree starting from given node. The starting node
* can be omitted.
*
* @param parent
* the parent node to start from
* @param includeParent
* {@code true} to include the parent; {@code false} if not
* @return the flattened hierarchy as a stream
*/
private Stream<T> getHierarchy(T parent, boolean includeParent) {
return Stream.of(parent)
.flatMap(node -> getChildrenStream(node, includeParent));
} | 3.68 |
hbase_ExtendedCellBuilderFactory_create | /**
* Allows creating a cell with the given CellBuilderType.
* @param type the type of CellBuilder(DEEP_COPY or SHALLOW_COPY).
* @return the cell that is created
*/
public static ExtendedCellBuilder create(CellBuilderType type) {
switch (type) {
case SHALLOW_COPY:
return new IndividualBytesFieldCellBuilder();
case DEEP_COPY:
return new KeyValueBuilder();
default:
throw new UnsupportedOperationException("The type:" + type + " is unsupported");
}
} | 3.68 |
dubbo_FileCacheStoreFactory_getCacheMap | /**
* for unit test only
*/
@Deprecated
static Map<String, FileCacheStore> getCacheMap() {
return cacheMap;
} | 3.68 |
hadoop_DataNodeFaultInjector_delayWriteToDisk | /**
* Used as a hook to delay writing a packet to disk.
*/
public void delayWriteToDisk() {} | 3.68 |
hadoop_BlockMissingException_getFile | /**
* Returns the name of the corrupted file.
* @return name of corrupted file
*/
public String getFile() {
return filename;
} | 3.68 |
querydsl_GenericExporter_setKeywords | /**
* Set the keywords to be used
*
* @param keywords
*/
public void setKeywords(Collection<String> keywords) {
codegenModule.bind(CodegenModule.KEYWORDS, keywords);
} | 3.68 |
hbase_ProcedureWALPrettyPrinter_run | /**
* Pass one or more log file names and formatting options and it will dump out a text version of
* the contents on <code>stdout</code>. Command line arguments Thrown upon file system errors etc.
*/
@Override
public int run(final String[] args) throws IOException {
// create options
Options options = new Options();
options.addOption("h", "help", false, "Output help message");
options.addOption("f", "file", true, "File to print");
final List<Path> files = new ArrayList<>();
try {
CommandLine cmd = new DefaultParser().parse(options, args);
if (cmd.hasOption("f")) {
files.add(new Path(cmd.getOptionValue("f")));
}
if (files.isEmpty() || cmd.hasOption("h")) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("ProcedureWALPrettyPrinter ", options, true);
return (-1);
}
} catch (ParseException e) {
LOG.error("Failed to parse commandLine arguments", e);
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("ProcedureWALPrettyPrinter ", options, true);
return (-1);
}
// get configuration, file system, and process the given files
for (Path file : files) {
processFile(getConf(), file);
}
return (0);
} | 3.68 |
hbase_JVM_runUnixMXBeanMethod | /**
* Load the implementation of UnixOperatingSystemMXBean for Oracle jvm and runs the desired
* method.
* @param mBeanMethodName : method to run from the interface UnixOperatingSystemMXBean
* @return the method result
*/
private Long runUnixMXBeanMethod(String mBeanMethodName) {
Object unixos;
Class<?> classRef;
Method mBeanMethod;
try {
classRef = Class.forName("com.sun.management.UnixOperatingSystemMXBean");
if (classRef.isInstance(osMbean)) {
mBeanMethod = classRef.getMethod(mBeanMethodName);
unixos = classRef.cast(osMbean);
return (Long) mBeanMethod.invoke(unixos);
}
} catch (Exception e) {
LOG.warn(
"Not able to load class or method for" + " com.sun.management.UnixOperatingSystemMXBean.",
e);
}
return null;
} | 3.68 |
hbase_KeyValue_createByteArray | /**
* @param qualifier can be a ByteBuffer or a byte[], or null.
* @param value can be a ByteBuffer or a byte[], or null.
*/
private static byte[] createByteArray(final byte[] row, final int roffset, final int rlength,
final byte[] family, final int foffset, int flength, final Object qualifier, final int qoffset,
int qlength, final long timestamp, final Type type, final Object value, final int voffset,
int vlength, List<Tag> tags) {
checkParameters(row, rlength, family, flength, qlength, vlength);
// Calculate length of tags area
int tagsLength = 0;
if (tags != null && !tags.isEmpty()) {
for (Tag t : tags) {
tagsLength += t.getValueLength() + Tag.INFRASTRUCTURE_SIZE;
}
}
RawCell.checkForTagsLength(tagsLength);
// Allocate right-sized byte array.
int keyLength = (int) getKeyDataStructureSize(rlength, flength, qlength);
byte[] bytes =
new byte[(int) getKeyValueDataStructureSize(rlength, flength, qlength, vlength, tagsLength)];
// Write key, value and key row length.
int pos = 0;
pos = Bytes.putInt(bytes, pos, keyLength);
pos = Bytes.putInt(bytes, pos, vlength);
pos = Bytes.putShort(bytes, pos, (short) (rlength & 0x0000ffff));
pos = Bytes.putBytes(bytes, pos, row, roffset, rlength);
pos = Bytes.putByte(bytes, pos, (byte) (flength & 0x0000ff));
if (flength != 0) {
pos = Bytes.putBytes(bytes, pos, family, foffset, flength);
}
if (qlength > 0) {
if (qualifier instanceof ByteBuffer) {
pos = Bytes.putByteBuffer(bytes, pos, (ByteBuffer) qualifier);
} else {
pos = Bytes.putBytes(bytes, pos, (byte[]) qualifier, qoffset, qlength);
}
}
pos = Bytes.putLong(bytes, pos, timestamp);
pos = Bytes.putByte(bytes, pos, type.getCode());
if (vlength > 0) {
if (value instanceof ByteBuffer) {
pos = Bytes.putByteBuffer(bytes, pos, (ByteBuffer) value);
} else {
pos = Bytes.putBytes(bytes, pos, (byte[]) value, voffset, vlength);
}
}
// Add the tags after the value part
if (tagsLength > 0) {
pos = Bytes.putAsShort(bytes, pos, tagsLength);
for (Tag t : tags) {
int tlen = t.getValueLength();
pos = Bytes.putAsShort(bytes, pos, tlen + Tag.TYPE_LENGTH_SIZE);
pos = Bytes.putByte(bytes, pos, t.getType());
Tag.copyValueTo(t, bytes, pos);
pos += tlen;
}
}
return bytes;
} | 3.68 |
hbase_QuotaObserverChore_fetchAllTablesWithQuotasDefined | /**
* Computes the set of all tables that have quotas defined. This includes tables with quotas
* explicitly set on them, in addition to tables that exist namespaces which have a quota defined.
*/
TablesWithQuotas fetchAllTablesWithQuotasDefined() throws IOException {
final Scan scan = QuotaTableUtil.makeScan(null);
final TablesWithQuotas tablesWithQuotas = new TablesWithQuotas(conn, conf);
try (final QuotaRetriever scanner = new QuotaRetriever()) {
scanner.init(conn, scan);
for (QuotaSettings quotaSettings : scanner) {
// Only one of namespace and tablename should be 'null'
final String namespace = quotaSettings.getNamespace();
final TableName tableName = quotaSettings.getTableName();
if (QuotaType.SPACE != quotaSettings.getQuotaType()) {
continue;
}
if (namespace != null) {
assert tableName == null;
// Collect all of the tables in the namespace
TableName[] tablesInNS = conn.getAdmin().listTableNamesByNamespace(namespace);
for (TableName tableUnderNs : tablesInNS) {
if (LOG.isTraceEnabled()) {
LOG.trace(
"Adding " + tableUnderNs + " under " + namespace + " as having a namespace quota");
}
tablesWithQuotas.addNamespaceQuotaTable(tableUnderNs);
}
} else {
assert tableName != null;
if (LOG.isTraceEnabled()) {
LOG.trace("Adding " + tableName + " as having table quota.");
}
// namespace is already null, must be a non-null tableName
tablesWithQuotas.addTableQuotaTable(tableName);
}
}
return tablesWithQuotas;
}
} | 3.68 |
framework_CalendarTest_commitCalendarEvent | /* Adds/updates the event in the data source and fires change event. */
private void commitCalendarEvent()
throws ValidationException, CommitException {
scheduleEventFieldGroup.commit();
BasicEvent event = getFormCalendarEvent();
scheduledEventBinder.writeBean(event);
if (event.getEnd() == null) {
event.setEnd(event.getStart());
}
if (!dataSource.containsEvent(event)) {
dataSource.addEvent(event);
}
getUI().removeWindow(scheduleEventPopup);
} | 3.68 |
morf_MySql_sqlDialect | /**
* @see org.alfasoftware.morf.jdbc.DatabaseType#sqlDialect(java.lang.String)
*/
@Override
public SqlDialect sqlDialect(String schemaName) {
return new MySqlDialect();
} | 3.68 |
querydsl_MetaDataExporter_setBeanPackageName | /**
* Override the bean package name (default: packageName)
*
* @param beanPackageName package name for bean sources
*/
public void setBeanPackageName(@Nullable String beanPackageName) {
this.beanPackageName = beanPackageName;
} | 3.68 |
morf_OracleMetaDataProvider_viewMap | /**
* Use to access the metadata for the views in the specified connection.
* Lazily initialises the metadata, and only loads it once.
*
* @return View metadata.
*/
private Map<String, View> viewMap() {
if (viewMap != null) {
return viewMap;
}
viewMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
readViewMap();
return viewMap;
} | 3.68 |
flink_Transformation_setUid | /**
* Sets an ID for this {@link Transformation}. This is will later be hashed to a uidHash which
* is then used to create the JobVertexID (that is shown in logs and the web ui).
*
* <p>The specified ID is used to assign the same operator ID across job submissions (for
* example when starting a job from a savepoint).
*
* <p><strong>Important</strong>: this ID needs to be unique per transformation and job.
* Otherwise, job submission will fail.
*
* @param uid The unique user-specified ID of this transformation.
*/
public void setUid(String uid) {
this.uid = uid;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.