name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_WALKeyImpl_internTableName | /**
* Drop this instance's tablename byte array and instead hold a reference to the provided
* tablename. This is not meant to be a general purpose setter - it's only used to collapse
* references to conserve memory.
*/
void internTableName(TableName tablename) {
// We should not use this as a setter - only to swap
// in a new reference to the same table name.
assert tablename.equals(this.tablename);
this.tablename = tablename;
} | 3.68 |
graphhopper_LandmarkStorage_setLandmarkSuggestions | /**
* This method forces the landmark preparation to skip the landmark search and uses the specified landmark list instead.
* Useful for manual tuning of larger areas to safe import time or improve quality.
*/
public LandmarkStorage setLandmarkSuggestions(List<LandmarkSuggestion> landmarkSuggestions) {
if (landmarkSuggestions == null)
throw new IllegalArgumentException("landmark suggestions cannot be null");
this.landmarkSuggestions = landmarkSuggestions;
return this;
} | 3.68 |
dubbo_TTable_drawSeparationLine | /**
* draw separation line
*/
private String drawSeparationLine(final int[] widthCacheArray) {
final StringBuilder separationLineSB = new StringBuilder();
final int lastCol = indexLastCol(widthCacheArray);
final int colCount = widthCacheArray.length;
for (int colIndex = 0; colIndex < colCount; colIndex++) {
final int width = widthCacheArray[colIndex];
if (width <= 0) {
continue;
}
final boolean isFirstCol = colIndex == 0;
final boolean isLastCol = colIndex == lastCol;
if (isFirstCol && border.has(Border.BORDER_OUTER_LEFT)) {
separationLineSB.append('+');
}
if (!isFirstCol && border.has(Border.BORDER_INNER_V)) {
separationLineSB.append('+');
}
separationLineSB.append(repeat("-", width + 2 * padding));
if (isLastCol && border.has(Border.BORDER_OUTER_RIGHT)) {
separationLineSB.append('+');
}
}
return separationLineSB.toString();
} | 3.68 |
hadoop_BoundedResourcePool_numAvailable | /**
* Number of items available to be acquired. Mostly for testing purposes.
* @return the number available.
*/
public synchronized int numAvailable() {
return (size - numCreated()) + items.size();
} | 3.68 |
flink_CoFeedbackTransformation_addFeedbackEdge | /**
* Adds a feedback edge. The parallelism of the {@code Transformation} must match the
* parallelism of the input {@code Transformation} of the upstream {@code Transformation}.
*
* @param transform The new feedback {@code Transformation}.
*/
public void addFeedbackEdge(Transformation<F> transform) {
if (transform.getParallelism() != this.getParallelism()) {
throw new UnsupportedOperationException(
"Parallelism of the feedback stream must match the parallelism of the original"
+ " stream. Parallelism of original stream: "
+ this.getParallelism()
+ "; parallelism of feedback stream: "
+ transform.getParallelism());
}
feedbackEdges.add(transform);
} | 3.68 |
hudi_CompactionUtils_buildFromFileSlices | /**
* Generate compaction plan from file-slices.
*
* @param partitionFileSlicePairs list of partition file-slice pairs
* @param extraMetadata Extra Metadata
* @param metricsCaptureFunction Metrics Capture function
*/
public static HoodieCompactionPlan buildFromFileSlices(List<Pair<String, FileSlice>> partitionFileSlicePairs,
Option<Map<String, String>> extraMetadata,
Option<Function<Pair<String, FileSlice>, Map<String, Double>>> metricsCaptureFunction) {
HoodieCompactionPlan.Builder builder = HoodieCompactionPlan.newBuilder();
extraMetadata.ifPresent(builder::setExtraMetadata);
builder.setOperations(partitionFileSlicePairs.stream()
.map(pfPair -> buildFromFileSlice(pfPair.getKey(), pfPair.getValue(), metricsCaptureFunction))
.collect(Collectors.toList()));
builder.setVersion(LATEST_COMPACTION_METADATA_VERSION);
return builder.build();
} | 3.68 |
framework_DataGenerator_refreshData | /**
* Informs the {@code DataGenerator} that a data object has been updated.
* This method should update any unneeded information stored for given item.
*
* @param item
* the updated item
*/
public default void refreshData(T item) {
} | 3.68 |
graphhopper_EdgeChangeBuilder_build | /**
* Builds a mapping between real node ids and the set of changes for their adjacent edges.
*
* @param edgeChangesAtRealNodes output parameter, you need to pass an empty & modifiable map and the results will
* be added to it
*/
static void build(IntArrayList closestEdges, List<VirtualEdgeIteratorState> virtualEdges, int firstVirtualNodeId, IntObjectMap<QueryOverlay.EdgeChanges> edgeChangesAtRealNodes) {
new EdgeChangeBuilder(closestEdges, virtualEdges, firstVirtualNodeId, edgeChangesAtRealNodes).build();
} | 3.68 |
flink_SessionWindowAssigner_mergeWindow | /**
* Merge curWindow and other, return a new window which covers curWindow and other if they are
* overlapped. Otherwise, returns the curWindow itself.
*/
private TimeWindow mergeWindow(
TimeWindow curWindow, TimeWindow other, Collection<TimeWindow> mergedWindow) {
if (curWindow.intersects(other)) {
mergedWindow.add(other);
return curWindow.cover(other);
} else {
return curWindow;
}
} | 3.68 |
querydsl_Expressions_constant | /**
* Create a Constant expression for the given value
*
* @param value constant
* @return constant expression
*/
public static <T> Expression<T> constant(T value) {
return ConstantImpl.create(value);
} | 3.68 |
framework_Table_bindPropertyToField | /**
* Binds an item property to a field generated by TableFieldFactory. The
* default behavior is to bind property straight to Field. If
* Property.Viewer type property (e.g. PropertyFormatter) is already set for
* field, the property is bound to that Property.Viewer.
*
* @param rowId
* @param colId
* @param property
* @param field
* @since 6.7.3
*/
protected void bindPropertyToField(Object rowId, Object colId,
Property property, Field field) {
// check if field has a property that is Viewer set. In that case we
// expect developer has e.g. PropertyFormatter that he wishes to use and
// assign the property to the Viewer instead.
boolean hasFilterProperty = field.getPropertyDataSource() != null
&& (field.getPropertyDataSource() instanceof Property.Viewer);
if (hasFilterProperty) {
((Property.Viewer) field.getPropertyDataSource())
.setPropertyDataSource(property);
} else {
field.setPropertyDataSource(property);
}
} | 3.68 |
hbase_Bytes_toBinaryFromHex | /**
* Takes a ASCII digit in the range A-F0-9 and returns the corresponding integer/ordinal value.
* @param ch The hex digit.
* @return The converted hex value as a byte.
*/
public static byte toBinaryFromHex(byte ch) {
if (ch >= 'A' && ch <= 'F') return (byte) ((byte) 10 + (byte) (ch - 'A'));
// else
return (byte) (ch - '0');
} | 3.68 |
hadoop_WorkerId_setHostname | /**
* Set hostname for Worker.
* @param wkhostname : Hostname of worker
*/
public final void setHostname(final Text wkhostname) {
this.hostname = wkhostname;
} | 3.68 |
hbase_BaseLoadBalancer_getRegionAssignmentsByServer | // return a modifiable map, as we may add more entries into the returned map.
private Map<ServerName, List<RegionInfo>>
getRegionAssignmentsByServer(Collection<RegionInfo> regions) {
return provider != null
? new HashMap<>(provider.getSnapShotOfAssignment(regions))
: new HashMap<>();
} | 3.68 |
shardingsphere-elasticjob_DataSourceRegistry_getInstance | /**
* Get instance of {@link DataSourceRegistry}.
*
* @return {@link DataSourceRegistry} singleton
*/
public static DataSourceRegistry getInstance() {
if (null == instance) {
synchronized (DataSourceRegistry.class) {
if (null == instance) {
instance = new DataSourceRegistry();
}
}
}
return instance;
} | 3.68 |
hmily_RepositoryPathUtils_buildDbTableName | /**
* Build db table name string.
*
* @param applicationName the application name
* @return the string
*/
public static String buildDbTableName(final String applicationName) {
return CommonConstant.DB_SUFFIX + applicationName.replaceAll("-", "_");
} | 3.68 |
hbase_ZKConfig_validateClusterKey | /**
* Verifies that the given key matches the expected format for a ZooKeeper cluster key. The Quorum
* for the ZK cluster can have one the following formats (see examples below):
* <ol>
* <li>s1,s2,s3 (no client port in the list, the client port could be obtained from
* clientPort)</li>
* <li>s1:p1,s2:p2,s3:p3 (with client port, which could be same or different for each server, in
* this case, the clientPort would be ignored)</li>
* <li>s1:p1,s2,s3:p3 (mix of (1) and (2) - if port is not specified in a server, it would use the
* clientPort; otherwise, it would use the specified port)</li>
* </ol>
* @param key the cluster key to validate
* @throws IOException if the key could not be parsed
*/
public static void validateClusterKey(String key) throws IOException {
transformClusterKey(key);
} | 3.68 |
graphhopper_AbstractPathDetailsBuilder_startInterval | /**
* It is only possible to open one interval at a time. Calling <code>startInterval</code> when
* the interval is already open results in an Exception.
*
* @param firstIndex the index the PathDetail starts
*/
public void startInterval(int firstIndex) {
Object value = getCurrentValue();
if (isOpen)
throw new IllegalStateException("PathDetailsBuilder is already in an open state with value: " + currentDetail.getValue()
+ " trying to open a new one with value: " + value);
currentDetail = new PathDetail(value);
currentDetail.setFirst(firstIndex);
isOpen = true;
} | 3.68 |
hadoop_TimelineV2Client_createTimelineClient | /**
* Creates an instance of the timeline v.2 client.
*
* @param appId the application id with which the timeline client is
* associated
* @return the created timeline client instance
*/
@Public
public static TimelineV2Client createTimelineClient(ApplicationId appId) {
TimelineV2Client client = new TimelineV2ClientImpl(appId);
return client;
} | 3.68 |
hadoop_ClusterMetrics_getGrayListedTaskTrackerCount | /**
* Get the number of graylisted trackers in the cluster.
*
* @return graylisted tracker count
*/
public int getGrayListedTaskTrackerCount() {
return numGraylistedTrackers;
} | 3.68 |
querydsl_AbstractSQLDeleteClause_addFlag | /**
* Add the given Expression at the given position as a query flag
*
* @param position position
* @param flag query flag
* @return the current object
*/
public C addFlag(Position position, Expression<?> flag) {
metadata.addFlag(new QueryFlag(position, flag));
return (C) this;
} | 3.68 |
querydsl_SQLMergeClause_addFlag | /**
* Add the given Expression at the given position as a query flag
*
* @param position position
* @param flag query flag
* @return the current object
*/
public SQLMergeClause addFlag(Position position, Expression<?> flag) {
metadata.addFlag(new QueryFlag(position, flag));
return this;
} | 3.68 |
hadoop_KerberosSecurityTestcase_createTestDir | /**
* Create a working directory, it should be the build directory. Under
* this directory an ApacheDS working directory will be created, this
* directory will be deleted when the MiniKdc stops.
*/
public void createTestDir() {
workDir = new File(System.getProperty("test.dir", "target"));
} | 3.68 |
flink_ExistingSavepoint_readListState | /**
* Read operator {@code ListState} from a {@code Savepoint} when a custom serializer was used;
* e.g., a different serializer than the one returned by {@code
* TypeInformation#createSerializer}.
*
* @param uid The uid of the operator.
* @param name The (unique) name for the state.
* @param typeInfo The type of the elements in the state.
* @param serializer The serializer used to write the elements into state.
* @param <T> The type of the values that are in the list state.
* @return A {@code DataSet} representing the elements in state.
* @throws IOException If the savepoint path is invalid or the uid does not exist.
*/
public <T> DataSource<T> readListState(
String uid, String name, TypeInformation<T> typeInfo, TypeSerializer<T> serializer)
throws IOException {
OperatorState operatorState = metadata.getOperatorState(uid);
ListStateDescriptor<T> descriptor = new ListStateDescriptor<>(name, serializer);
ListStateInputFormat<T> inputFormat =
new ListStateInputFormat<>(
operatorState, env.getConfiguration(), stateBackend, descriptor);
return env.createInput(inputFormat, typeInfo);
} | 3.68 |
hadoop_IncrementalBlockReportManager_addRDBI | /**
* Add a block for notification to NameNode.
* If another entry exists for the same block it is removed.
*/
@VisibleForTesting
synchronized void addRDBI(ReceivedDeletedBlockInfo rdbi,
DatanodeStorage storage) {
// Make sure another entry for the same block is first removed.
// There may only be one such entry.
for (PerStorageIBR perStorage : pendingIBRs.values()) {
if (perStorage.remove(rdbi.getBlock()) != null) {
break;
}
}
getPerStorageIBR(storage).put(rdbi);
} | 3.68 |
graphhopper_BikeCommonPriorityParser_convertClassValueToPriority | // Conversion of class value to priority. See http://wiki.openstreetmap.org/wiki/Class:bicycle
private PriorityCode convertClassValueToPriority(String tagvalue) {
int classvalue;
try {
classvalue = Integer.parseInt(tagvalue);
} catch (NumberFormatException e) {
return UNCHANGED;
}
switch (classvalue) {
case 3:
return BEST;
case 2:
return VERY_NICE;
case 1:
return PREFER;
case -1:
return SLIGHT_AVOID;
case -2:
return AVOID;
case -3:
return AVOID_MORE;
default:
return UNCHANGED;
}
} | 3.68 |
flink_DateTimeUtils_unixTimestamp | /**
* Returns the value of the argument as an unsigned integer in seconds since '1970-01-01
* 00:00:00' UTC.
*/
public static long unixTimestamp(String dateStr, String format, TimeZone tz) {
long ts = internalParseTimestampMillis(dateStr, format, tz);
if (ts == Long.MIN_VALUE) {
return Long.MIN_VALUE;
} else {
// return the seconds
return ts / 1000;
}
} | 3.68 |
flink_Either_obtainRight | /**
* Utility function for {@link EitherSerializer} to support object reuse.
*
* <p>To support object reuse both subclasses of Either contain a reference to an instance of
* the other type. This method provides access to and initializes the cross-reference.
*
* @param input container for Left or Right value
* @param rightSerializer for creating an instance of the right type
* @param <L> the type of Left
* @param <R> the type of Right
* @return input if Right type else input's Right reference
*/
@Internal
public static <L, R> Right<L, R> obtainRight(
Either<L, R> input, TypeSerializer<R> rightSerializer) {
if (input.isRight()) {
return (Right<L, R>) input;
} else {
Left<L, R> left = (Left<L, R>) input;
if (left.right == null) {
left.right = Right.of(rightSerializer.createInstance());
left.right.left = left;
}
return left.right;
}
} | 3.68 |
flink_PushFilterIntoSourceScanRuleBase_resolveFiltersAndCreateTableSourceTable | /**
* Resolves filters using the underlying sources {@link SupportsFilterPushDown} and creates a
* new {@link TableSourceTable} with the supplied predicates.
*
* @param convertiblePredicates Predicates to resolve
* @param oldTableSourceTable TableSourceTable to copy
* @param scan Underlying table scan to push to
* @param relBuilder Builder to push the scan to
* @return A tuple, constituting of the resolved filters and the newly created {@link
* TableSourceTable}
*/
protected Tuple2<SupportsFilterPushDown.Result, TableSourceTable>
resolveFiltersAndCreateTableSourceTable(
RexNode[] convertiblePredicates,
TableSourceTable oldTableSourceTable,
TableScan scan,
RelBuilder relBuilder) {
// record size before applyFilters for update statistics
int originPredicatesSize = convertiblePredicates.length;
// update DynamicTableSource
DynamicTableSource newTableSource = oldTableSourceTable.tableSource().copy();
SupportsFilterPushDown.Result result =
FilterPushDownSpec.apply(
Arrays.asList(convertiblePredicates),
newTableSource,
SourceAbilityContext.from(scan));
relBuilder.push(scan);
List<RexNode> acceptedPredicates =
convertExpressionToRexNode(result.getAcceptedFilters(), relBuilder);
FilterPushDownSpec filterPushDownSpec = new FilterPushDownSpec(acceptedPredicates);
TableSourceTable newTableSourceTable =
oldTableSourceTable.copy(
newTableSource,
oldTableSourceTable.getStatistic(),
new SourceAbilitySpec[] {filterPushDownSpec});
return new Tuple2<>(result, newTableSourceTable);
} | 3.68 |
hbase_HFileBlockIndex_readMultiLevelIndexRoot | /**
* Read the root-level metadata of a multi-level block index. Based on
* {@link #readRootIndex(DataInput, int)}, but also reads metadata necessary to compute the
* mid-key in a multi-level index.
* @param blk the HFile block
* @param numEntries the number of root-level index entries
*/
public void readMultiLevelIndexRoot(HFileBlock blk, final int numEntries) throws IOException {
DataInputStream in = readRootIndex(blk, numEntries);
// HFileBlock.getByteStream() returns a byte stream for reading the data(excluding checksum)
// of root index block, so after reading the root index there is no need to subtract the
// checksum bytes.
if (in.available() < MID_KEY_METADATA_SIZE) {
// No mid-key metadata available.
return;
}
midLeafBlockOffset = in.readLong();
midLeafBlockOnDiskSize = in.readInt();
midKeyEntry = in.readInt();
} | 3.68 |
framework_VTextualDate_buildDate | /**
* Updates the text field according to the current date (provided by
* {@link #getDate()}). Takes care of updating text, enabling and disabling
* the field, setting/removing readonly status and updating readonly styles.
* <p>
* For internal use only. May be removed or replaced in the future.
* <p>
* TODO: Split part of this into a method that only updates the text as this
* is what usually is needed except for updateFromUIDL.
*/
public void buildDate() {
removeStyleName(getStylePrimaryName() + PARSE_ERROR_CLASSNAME);
// Create the initial text for the textfield
String dateText;
Date currentDate = getDate();
if (currentDate != null) {
dateText = getDateTimeService().formatDate(currentDate,
getFormatString());
} else {
dateText = "";
}
setText(dateText);
text.setEnabled(enabled);
text.setReadOnly(readonly);
if (readonly) {
text.addStyleName("v-readonly");
Roles.getTextboxRole().setAriaReadonlyProperty(text.getElement(),
true);
} else {
text.removeStyleName("v-readonly");
Roles.getTextboxRole()
.removeAriaReadonlyProperty(text.getElement());
}
} | 3.68 |
hudi_HoodieTableConfig_getRawRecordKeyFieldProp | /**
* @returns the record key field prop.
*/
public String getRawRecordKeyFieldProp() {
return getStringOrDefault(RECORDKEY_FIELDS, null);
} | 3.68 |
framework_Potus_setLeftOffice | /**
* @param leftOffice
* the leftOffice to set
*/
public void setLeftOffice(Date leftOffice) {
this.leftOffice = leftOffice;
} | 3.68 |
hbase_PBType_outputStreamFromByteRange | /**
* Create a {@link CodedOutputStream} from a {@link PositionedByteRange}. Be sure to update
* {@code dst}'s position after writing to the stream.
* <p/>
* For example:
*
* <pre>
* CodedOutputStream os = outputStreamFromByteRange(dst);
* int before = os.spaceLeft(), after, written;
* val.writeTo(os);
* after = os.spaceLeft();
* written = before - after;
* dst.setPosition(dst.getPosition() + written);
* </pre>
*/
public static CodedOutputStream outputStreamFromByteRange(PositionedByteRange dst) {
return CodedOutputStream.newInstance(dst.getBytes(), dst.getOffset() + dst.getPosition(),
dst.getRemaining());
} | 3.68 |
hadoop_FileIoProvider_syncFileRange | /**
* Call sync_file_range on the given file descriptor.
*
* @param volume target volume. null if unavailable.
*/
public void syncFileRange(
@Nullable FsVolumeSpi volume, FileDescriptor outFd,
long offset, long numBytes, int flags) throws NativeIOException {
final long begin = profilingEventHook.beforeFileIo(volume, SYNC, 0);
try {
faultInjectorEventHook.beforeFileIo(volume, SYNC, 0);
NativeIO.POSIX.syncFileRangeIfPossible(outFd, offset, numBytes, flags);
profilingEventHook.afterFileIo(volume, SYNC, begin, 0);
} catch (Exception e) {
onFailure(volume, begin);
throw e;
}
} | 3.68 |
hudi_OptionsResolver_isSimpleBucketIndexType | /**
* Returns whether the table index is simple bucket index.
*/
public static boolean isSimpleBucketIndexType(Configuration conf) {
return isBucketIndexType(conf) && getBucketEngineType(conf).equals(HoodieIndex.BucketIndexEngineType.SIMPLE);
} | 3.68 |
hudi_HoodieFileGroup_getAllFileSlices | /**
* Provides a stream of committed file slices, sorted reverse base commit time.
*/
public Stream<FileSlice> getAllFileSlices() {
if (!timeline.empty()) {
return fileSlices.values().stream().filter(this::isFileSliceCommitted);
}
return Stream.empty();
} | 3.68 |
morf_SelectStatement_forUpdate | /**
* Tells the database to pessimistically lock the tables.
*
* @return a new select statement with the change applied.
*/
public SelectStatement forUpdate() {
return copyOnWriteOrMutate(
(SelectStatementBuilder b) -> b.forUpdate(),
() -> this.forUpdate = true
);
} | 3.68 |
hadoop_SnappyDecompressor_setInputFromSavedData | /**
* If a write would exceed the capacity of the direct buffers, it is set
* aside to be loaded by this function while the compressed data are
* consumed.
*/
void setInputFromSavedData() {
compressedDirectBufLen = Math.min(userBufLen, directBufferSize);
// Reinitialize snappy's input direct buffer
compressedDirectBuf.rewind();
((ByteBuffer) compressedDirectBuf).put(userBuf, userBufOff,
compressedDirectBufLen);
// Note how much data is being fed to snappy
userBufOff += compressedDirectBufLen;
userBufLen -= compressedDirectBufLen;
} | 3.68 |
hadoop_ServerWebApp_resolveAuthority | /**
* Resolves the host and port InetSocketAddress the
* web server is listening to.
* <p>
* This implementation looks for the following 2 properties:
* <ul>
* <li>#SERVER_NAME#.http.hostname</li>
* <li>#SERVER_NAME#.http.port</li>
* </ul>
*
* @return the host and port InetSocketAddress the
* web server is listening to.
* @throws ServerException thrown
* if any of the above 2 properties is not defined.
*/
protected InetSocketAddress resolveAuthority() throws ServerException {
String hostnameKey = getName() + HTTP_HOSTNAME;
String portKey = getName() + HTTP_PORT;
String host = System.getProperty(hostnameKey);
String port = System.getProperty(portKey);
if (host == null) {
throw new ServerException(ServerException.ERROR.S13, hostnameKey);
}
if (port == null) {
throw new ServerException(ServerException.ERROR.S13, portKey);
}
try {
InetAddress add = InetAddress.getByName(host);
int portNum = Integer.parseInt(port);
return new InetSocketAddress(add, portNum);
} catch (UnknownHostException ex) {
throw new ServerException(ServerException.ERROR.S14, ex.toString(), ex);
}
} | 3.68 |
framework_SelectItemCaptionRefresh_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Selected option should be updated when item caption changes in the Select.";
} | 3.68 |
zxing_IntentIntegrator_shareText | /**
* Shares the given text by encoding it as a barcode, such that another user can
* scan the text off the screen of the device.
*
* @param text the text string to encode as a barcode
* @param type type of data to encode. See {@code com.google.zxing.client.android.Contents.Type} constants.
* @return the {@link AlertDialog} that was shown to the user prompting them to download the app
* if a prompt was needed, or null otherwise
*/
public final AlertDialog shareText(CharSequence text, CharSequence type) {
Intent intent = new Intent();
intent.addCategory(Intent.CATEGORY_DEFAULT);
intent.setAction(BS_PACKAGE + ".ENCODE");
intent.putExtra("ENCODE_TYPE", type);
intent.putExtra("ENCODE_DATA", text);
String targetAppPackage = findTargetAppPackage(intent);
if (targetAppPackage == null) {
return showDownloadDialog();
}
intent.setPackage(targetAppPackage);
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP);
intent.addFlags(FLAG_NEW_DOC);
attachMoreExtras(intent);
if (fragment == null) {
activity.startActivity(intent);
} else {
fragment.startActivity(intent);
}
return null;
} | 3.68 |
hbase_HRegionFileSystem_preCommitStoreFile | /**
* Generate the filename in the main family store directory for moving the file from a build/temp
* location.
* @param familyName Family that will gain the file
* @param buildPath {@link Path} to the file to commit.
* @param seqNum Sequence Number to append to the file name (less then 0 if no sequence
* number)
* @param generateNewName False if you want to keep the buildPath name
* @return The new {@link Path} of the to be committed file
*/
private Path preCommitStoreFile(final String familyName, final Path buildPath, final long seqNum,
final boolean generateNewName) throws IOException {
Path storeDir = getStoreDir(familyName);
if (!fs.exists(storeDir) && !createDir(storeDir))
throw new IOException("Failed creating " + storeDir);
String name = buildPath.getName();
if (generateNewName) {
name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_");
}
Path dstPath = new Path(storeDir, name);
if (!fs.exists(buildPath)) {
throw new FileNotFoundException(buildPath.toString());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Committing " + buildPath + " as " + dstPath);
}
return dstPath;
} | 3.68 |
framework_LoginForm_createUsernameField | /**
* Customize the user name field. Only for overriding, do not call.
*
* @return the user name field
* @since 7.7
*/
protected TextField createUsernameField() {
throwIfInitialized();
TextField field = new TextField(getUsernameCaption());
field.focus();
return field;
} | 3.68 |
hadoop_OBSFileSystem_listStatus | /**
* This public interface is provided specially for Huawei MRS. List the
* statuses of the files/directories in the given path if the path is a
* directory. When recursive is true, iterator all objects in the given path
* and its sub directories.
*
* @param f given path
* @param recursive whether to iterator objects in sub direcotries
* @return the statuses of the files/directories in the given patch
* @throws FileNotFoundException when the path does not exist
* @throws IOException see specific implementation
*/
public FileStatus[] listStatus(final Path f, final boolean recursive)
throws FileNotFoundException, IOException {
long startTime = System.currentTimeMillis();
long threadId = Thread.currentThread().getId();
try {
FileStatus[] statuses = OBSCommonUtils.innerListStatus(this, f,
recursive);
long endTime = System.currentTimeMillis();
LOG.debug(
"List status for path:{}, thread:{}, timeUsedInMilliSec:{}", f,
threadId, endTime - startTime);
return statuses;
} catch (ObsException e) {
throw OBSCommonUtils.translateException(
"listStatus with recursive flag["
+ (recursive ? "true] " : "false] "), f, e);
}
} | 3.68 |
framework_Tree_hasChildren | /**
* Tests if the Item specified with <code>itemId</code> has child Items.
*
* @see Container.Hierarchical#hasChildren(Object)
*/
@Override
public boolean hasChildren(Object itemId) {
return ((Container.Hierarchical) items).hasChildren(itemId);
} | 3.68 |
pulsar_ZipFiles_lines | /**
* Get a lazily loaded stream of lines from a gzipped file, similar to
* {@link Files#lines(java.nio.file.Path)}.
*
* @param path
* The path to the zipped file.
* @return stream with lines.
*/
public static Stream<String> lines(Path path) {
ZipInputStream zipStream = null;
try {
zipStream = new ZipInputStream(Files.newInputStream(path));
} catch (IOException e) {
closeSafely(zipStream);
throw new UncheckedIOException(e);
}
// Reader decoder = new InputStreamReader(gzipStream, Charset.defaultCharset());
BufferedReader reader = new BufferedReader(new InputStreamReader(zipStream));
return reader.lines().onClose(() -> closeSafely(reader));
} | 3.68 |
flink_NettyShuffleUtils_getMinMaxNetworkBuffersPerResultPartition | /**
* Calculates and returns local network buffer pool size used by the result partition. The
* left/right value of the returned pair represent the min/max buffers require by the pool.
*/
public static Pair<Integer, Integer> getMinMaxNetworkBuffersPerResultPartition(
final int configuredNetworkBuffersPerChannel,
final int numFloatingBuffersPerGate,
final int sortShuffleMinParallelism,
final int sortShuffleMinBuffers,
final int numSubpartitions,
final boolean enableTieredStorage,
final int tieredStoreExclusiveBuffers,
final ResultPartitionType type) {
boolean isSortShuffle =
type.isBlockingOrBlockingPersistentResultPartition()
&& numSubpartitions >= sortShuffleMinParallelism;
int min;
if (isSortShuffle) {
min = sortShuffleMinBuffers;
} else {
min =
enableTieredStorage
? Math.min(tieredStoreExclusiveBuffers, numSubpartitions + 1)
: (numSubpartitions + 1);
}
int max =
type.isBounded()
? numSubpartitions * configuredNetworkBuffersPerChannel
+ numFloatingBuffersPerGate
: (isSortShuffle
? Math.max(min, 4 * numSubpartitions)
: NetworkBufferPool.UNBOUNDED_POOL_SIZE);
// for each upstream hash-based blocking/pipelined subpartition, at least one buffer is
// needed even the configured network buffers per channel is 0 and this behavior is for
// performance. If it's not guaranteed that each subpartition can get at least one buffer,
// more partial buffers with little data will be outputted to network/disk and recycled to
// be used by other subpartitions which can not get a buffer for data caching.
return Pair.of(min, Math.max(min, max));
} | 3.68 |
pulsar_LongHierarchicalLedgerRangeIterator_advance | /**
* Resolves the difference between cases 1 and 2 after nextLevelIterator is exhausted.
* Pre-condition: nextLevelIterator == null, thisLevelIterator != null
* Post-condition: nextLevelIterator == null && !thisLevelIterator.hasNext() OR
* nextLevelIterator.hasNext() == true and nextLevelIterator.next()
* yields the next result of next()
* @throws IOException Exception representing error
*/
void advance() throws IOException {
while (thisLevelIterator.hasNext()) {
String node = thisLevelIterator.next();
if (level == 0 && !isLedgerParentNode(node)) {
continue;
}
LedgerManager.LedgerRangeIterator nextIterator = level < 3
? new InnerIterator(path + "/" + node, level + 1)
: new LeafIterator(path + "/" + node);
if (nextIterator.hasNext()) {
nextLevelIterator = nextIterator;
break;
}
}
} | 3.68 |
hbase_CacheConfig_shouldLockOnCacheMiss | /**
* If we make sure the block could not be cached, we will not acquire the lock otherwise we will
* acquire lock
*/
public boolean shouldLockOnCacheMiss(BlockType blockType) {
if (blockType == null) {
return true;
}
return shouldCacheBlockOnRead(blockType.getCategory());
} | 3.68 |
flink_WatermarkStrategy_createTimestampAssigner | /**
* Instantiates a {@link TimestampAssigner} for assigning timestamps according to this strategy.
*/
@Override
default TimestampAssigner<T> createTimestampAssigner(
TimestampAssignerSupplier.Context context) {
// By default, this is {@link RecordTimestampAssigner},
// for cases where records come out of a source with valid timestamps, for example from
// Kafka.
return new RecordTimestampAssigner<>();
} | 3.68 |
flink_CompletedCheckpoint_registerSharedStatesAfterRestored | /**
* Register all shared states in the given registry. This method is called before the checkpoint
* is added into the store.
*
* @param sharedStateRegistry The registry where shared states are registered
* @param restoreMode the mode in which this checkpoint was restored from
*/
public void registerSharedStatesAfterRestored(
SharedStateRegistry sharedStateRegistry, RestoreMode restoreMode) {
// in claim mode we should not register any shared handles
if (!props.isUnclaimed()) {
sharedStateRegistry.registerAllAfterRestored(this, restoreMode);
}
} | 3.68 |
morf_SqlDialect_sqlRepresentationOfColumnType | /**
* Creates the SQL representation of a column data type.
*
* @param column The column to map.
* @return The SQL representation for the column type.
* @see #sqlRepresentationOfColumnType(Column, boolean, boolean, boolean)
*/
protected String sqlRepresentationOfColumnType(Column column) {
StringBuilder defaultSqlRepresentation = new StringBuilder(sqlRepresentationOfColumnType(column, false, true, true));
// Many RDBMS implementations get funny about specifying nullability at all
// on autonumbered columns, and it's irrelevant in any case, so we just
// avoid it.
if (!column.isAutoNumbered()) {
defaultSqlRepresentation.append(column.isNullable() ? "" : " NOT NULL");
}
return defaultSqlRepresentation.toString();
} | 3.68 |
hadoop_AbfsOperationMetrics_getBytesFailed | /**
*
* @return bytes failed to transfer.
*/
AtomicLong getBytesFailed() {
return bytesFailed;
} | 3.68 |
hbase_IncreasingToUpperBoundRegionSplitPolicy_getSizeToCheck | /**
* @return Region max size or {@code count of regions cubed * 2 * flushsize}, which ever is
* smaller; guard against there being zero regions on this server.
*/
protected long getSizeToCheck(final int tableRegionsCount) {
// safety check for 100 to avoid numerical overflow in extreme cases
return tableRegionsCount == 0 || tableRegionsCount > 100
? getDesiredMaxFileSize()
: Math.min(getDesiredMaxFileSize(),
initialSize * tableRegionsCount * tableRegionsCount * tableRegionsCount);
} | 3.68 |
hbase_MetricsTableRequests_updateGet | /**
* Update the Get time histogram .
* @param time time it took
* @param blockBytesScanned size of block bytes scanned to retrieve the response
*/
public void updateGet(long time, long blockBytesScanned) {
if (isEnableTableLatenciesMetrics()) {
getTimeHistogram.update(time);
if (blockBytesScanned > 0) {
blockBytesScannedCount.increment(blockBytesScanned);
getBlockBytesScanned.update(blockBytesScanned);
}
}
} | 3.68 |
hbase_Delete_addFamily | /**
* Delete all columns of the specified family with a timestamp less than or equal to the specified
* timestamp.
* <p>
* Overrides previous calls to deleteColumn and deleteColumns for the specified family.
* @param family family name
* @param timestamp maximum version timestamp
* @return this for invocation chaining
*/
public Delete addFamily(final byte[] family, final long timestamp) {
if (timestamp < 0) {
throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp);
}
List<Cell> list = getCellList(family);
if (!list.isEmpty()) {
list.clear();
}
KeyValue kv = new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamily);
list.add(kv);
return this;
} | 3.68 |
flink_TimeEvictor_of | /**
* Creates a {@code TimeEvictor} that keeps the given number of elements. Eviction is done
* before/after the window function based on the value of doEvictAfter.
*
* @param windowSize The amount of time for which to keep elements.
* @param doEvictAfter Whether eviction is done after window function.
*/
public static <W extends Window> TimeEvictor<W> of(Time windowSize, boolean doEvictAfter) {
return new TimeEvictor<>(windowSize.toMilliseconds(), doEvictAfter);
} | 3.68 |
flink_CatalogManager_listCatalogs | /**
* Retrieves the set of names of all registered catalogs, including all initialized catalogs and
* all catalogs stored in the {@link CatalogStore}.
*
* @return a set of names of registered catalogs
*/
public Set<String> listCatalogs() {
return Collections.unmodifiableSet(
Stream.concat(
catalogs.keySet().stream(),
catalogStoreHolder.catalogStore().listCatalogs().stream())
.collect(Collectors.toSet()));
} | 3.68 |
framework_VCalendarPanel_handleNavigationMonthMode | /**
* Handle the keyboard navigation when the resolution is set to MONTH.
*
* @param keycode
* The keycode to handle
* @param ctrl
* Was the ctrl key pressed?
* @param shift
* Was the shift key pressed?
* @return
*/
protected boolean handleNavigationMonthMode(int keycode, boolean ctrl,
boolean shift) {
// Ctrl selection not supported
if (ctrl) {
return false;
} else if (keycode == getPreviousKey()) {
focusNextYear(1); // Add 1 year
return true;
} else if (keycode == getForwardKey()) {
focusNextMonth(); // Add 1 month
return true;
} else if (keycode == getNextKey()) {
focusPreviousYear(1); // Subtract 1 year
return true;
} else if (keycode == getBackwardKey()) {
focusPreviousMonth(); // Subtract 1 month
return true;
} else if (keycode == getSelectKey()) {
value = (Date) focusedDate.clone();
onSubmit();
return true;
} else if (keycode == getResetKey()) {
// Restore showing value the selected value
focusedDate.setTime(value.getTime());
renderCalendar();
return true;
} else if (keycode == getCloseKey() || keycode == KeyCodes.KEY_TAB) {
onCancel();
// TODO fire close event
return true;
}
return false;
} | 3.68 |
hbase_RawCell_createCell | /** Returns A new cell which is having the extra tags also added to it. */
public static Cell createCell(Cell cell, List<Tag> tags) {
return PrivateCellUtil.createCell(cell, tags);
} | 3.68 |
framework_VScrollTable_getFooterCell | /**
* Gets a footer cell by using a column index.
*
* @param index
* The index of the column
* @return The Cell
*/
public FooterCell getFooterCell(int index) {
if (index < visibleCells.size()) {
return (FooterCell) visibleCells.get(index);
} else {
return null;
}
} | 3.68 |
hbase_RegionReplicationFlushRequester_requestFlush | /**
* Request a flush for the given region.
* <p/>
* The sequence id of the edit which we fail to replicate. A flush must happen after this sequence
* id to recover the failure.
*/
synchronized void requestFlush(long sequenceId) {
// if there is already a flush task, just reuse it.
if (pendingFlushRequest != null) {
pendingFlushRequestSequenceId = Math.max(sequenceId, pendingFlushRequestSequenceId);
return;
}
// check last flush time
long elapsedSecs = TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - lastRequestNanos);
if (elapsedSecs >= minIntervalSecs) {
request();
return;
}
// schedule a timer task
HashedWheelTimer timer = getTimer();
pendingFlushRequestSequenceId = sequenceId;
pendingFlushRequest =
timer.newTimeout(this::flush, minIntervalSecs - elapsedSecs, TimeUnit.SECONDS);
} | 3.68 |
framework_JavaScriptConnectorHelper_addResizeListener | // Called from JSNI to add a listener
private void addResizeListener(Element element,
final JavaScriptObject callbackFunction) {
Map<JavaScriptObject, ElementResizeListener> elementListeners = resizeListeners
.get(element);
if (elementListeners == null) {
elementListeners = new HashMap<>();
resizeListeners.put(element, elementListeners);
}
ElementResizeListener listener = elementListeners.get(callbackFunction);
if (listener == null) {
LayoutManager layoutManager = LayoutManager
.get(connector.getConnection());
listener = event -> invokeElementResizeCallback(event.getElement(),
callbackFunction);
layoutManager.addElementResizeListener(element, listener);
elementListeners.put(callbackFunction, listener);
}
} | 3.68 |
pulsar_RangeCache_put | /**
* Insert.
*
* @param key
* @param value
* ref counted value with at least 1 ref to pass on the cache
* @return whether the entry was inserted in the cache
*/
public boolean put(Key key, Value value) {
// retain value so that it's not released before we put it in the cache and calculate the weight
value.retain();
try {
if (entries.putIfAbsent(key, value) == null) {
size.addAndGet(weighter.getSize(value));
return true;
} else {
return false;
}
} finally {
value.release();
}
} | 3.68 |
hadoop_LongValueSum_getCombinerOutput | /**
* @return return an array of one element. The element is a string
* representation of the aggregated value. The return value is
* expected to be used by the a combiner.
*/
public ArrayList<String> getCombinerOutput() {
ArrayList<String> retv = new ArrayList<String>(1);
retv.add(""+sum);
return retv;
} | 3.68 |
hbase_ResponseConverter_setControllerException | /**
* Stores an exception encountered during RPC invocation so it can be passed back through to the
* client.
* @param controller the controller instance provided by the client when calling the service
* @param ioe the exception encountered
*/
public static void setControllerException(RpcController controller, IOException ioe) {
if (controller != null) {
if (controller instanceof ServerRpcController) {
((ServerRpcController) controller).setFailedOn(ioe);
} else {
controller.setFailed(StringUtils.stringifyException(ioe));
}
}
} | 3.68 |
framework_DifferentFeaturesForDifferentClients_createInstance | // Must override as default implementation isn't allowed to
// instantiate our non-public classes
@Override
public UI createInstance(UICreateEvent event) {
try {
return event.getUIClass().newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
} | 3.68 |
flink_ReducingStateDescriptor_getReduceFunction | /** Returns the reduce function to be used for the reducing state. */
public ReduceFunction<T> getReduceFunction() {
return reduceFunction;
} | 3.68 |
flink_CliClient_executeInteractive | /**
* Execute statement from the user input and prints status information and/or errors on the
* terminal.
*/
private void executeInteractive(LineReader inputLineReader) {
// make space from previous output and test the writer
terminal.writer().println();
terminal.writer().flush();
// print welcome
terminal.writer().append(CliStrings.MESSAGE_WELCOME);
LineReader lineReader =
inputLineReader == null
? createLineReader(terminal, ExecutionMode.INTERACTIVE_EXECUTION)
: inputLineReader;
getAndExecuteStatements(lineReader, false);
} | 3.68 |
flink_KvStateLocation_getRegistrationName | /**
* Returns the name under which the KvState instances have been registered.
*
* @return Name under which the KvState instances have been registered.
*/
public String getRegistrationName() {
return registrationName;
} | 3.68 |
hadoop_InverseMapper_map | /** The inverse function. Input keys and values are swapped.*/
public void map(K key, V value,
OutputCollector<V, K> output, Reporter reporter)
throws IOException {
output.collect(value, key);
} | 3.68 |
hadoop_PublishedConfiguration_asConfiguration | /**
* Convert to Hadoop XML
* @return the configuration as a Hadoop Configuratin
*/
public Configuration asConfiguration() {
Configuration conf = new Configuration(false);
try {
ConfigHelper.addConfigMap(conf, entries, "");
} catch (BadConfigException e) {
// triggered on a null value; switch to a runtime (and discard the stack)
throw new RuntimeException(e.toString());
}
return conf;
} | 3.68 |
AreaShop_FileManager_markGroupsAutoDirty | /**
* Mark all RegionGroups that they should regenerate regions.
*/
public void markGroupsAutoDirty() {
for(RegionGroup group : getGroups()) {
group.autoDirty();
}
} | 3.68 |
hbase_RestoreTool_getTableArchivePath | /**
* return value represent path for:
* ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn"
* @param tableName table name
* @return path to table archive
* @throws IOException exception
*/
Path getTableArchivePath(TableName tableName) throws IOException {
Path baseDir =
new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId),
HConstants.HFILE_ARCHIVE_DIRECTORY);
Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR);
Path archivePath = new Path(dataDir, tableName.getNamespaceAsString());
Path tableArchivePath = new Path(archivePath, tableName.getQualifierAsString());
if (!fs.exists(tableArchivePath) || !fs.getFileStatus(tableArchivePath).isDirectory()) {
LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " does not exists");
tableArchivePath = null; // empty table has no archive
}
return tableArchivePath;
} | 3.68 |
morf_DatabaseSchemaManager_dropAllTables | /**
* Drop all tables so that the schema is empty.
*/
public void dropAllTables() {
ProducerCache producerCache = new ProducerCache();
try {
Schema databaseSchema = producerCache.get().getSchema();
ImmutableList<Table> tablesToDrop = ImmutableList.copyOf(databaseSchema.tables());
List<String> script = Lists.newArrayList();
for (Table table : tablesToDrop) {
script.addAll(dialect.get().dropStatements(table));
}
executeScript(script);
} finally {
producerCache.close();
}
tables.get().clear();
tablesNotNeedingTruncate.get().clear();
} | 3.68 |
hbase_Union4_decodeD | /**
* Read an instance of the fourth type parameter from buffer {@code src}.
*/
public D decodeD(PositionedByteRange src) {
return (D) decode(src);
} | 3.68 |
flink_TaskStateStats_getSummaryStats | /** @return Summary of the subtask stats. */
public TaskStateStatsSummary getSummaryStats() {
return summaryStats;
} | 3.68 |
flink_CompositeType_getFlatFields | /**
* Returns the flat field descriptors for the given field expression.
*
* @param fieldExpression The field expression for which the flat field descriptors are
* computed.
* @return The list of descriptors for the flat fields which are specified by the field
* expression.
*/
@PublicEvolving
public List<FlatFieldDescriptor> getFlatFields(String fieldExpression) {
List<FlatFieldDescriptor> result = new ArrayList<FlatFieldDescriptor>();
this.getFlatFields(fieldExpression, 0, result);
return result;
} | 3.68 |
hbase_WALEntryBatch_getWalEntries | /** Returns the WAL Entries. */
public List<Entry> getWalEntries() {
return walEntriesWithSize.stream().map(Pair::getFirst).collect(Collectors.toList());
} | 3.68 |
framework_Panel_getScrollLeft | /*
* (non-Javadoc)
*
* @see com.vaadin.server.Scrollable#setScrollable(boolean)
*/
@Override
public int getScrollLeft() {
return getState(false).scrollLeft;
} | 3.68 |
flink_KafkaStandaloneGenerator_main | /** Entry point to the kafka data producer. */
public static void main(String[] args) throws Exception {
final KafkaCollector[] collectors = new KafkaCollector[NUM_PARTITIONS];
// create the generator threads
for (int i = 0; i < collectors.length; i++) {
collectors[i] = new KafkaCollector(BROKER_ADDRESS, TOPIC, i);
}
StandaloneThreadedGenerator.runGenerator(collectors);
} | 3.68 |
framework_CvalChecker_validateProduct | /**
* Validate whether there is a valid license key for a product.
*
* @param productName
* for example vaadin-touchkit
* @param productVersion
* for instance 4.0.1
* @return CvalInfo Server response or cache response if server is offline
* @throws InvalidCvalException
* when there is no a valid license for the product
* @throws UnreachableCvalServerException
* when we have license key but server is unreachable
*/
public CvalInfo validateProduct(String productName, String productVersion,
String productTitle)
throws InvalidCvalException, UnreachableCvalServerException {
String key = getDeveloperLicenseKey(productName, productVersion,
productTitle);
CvalInfo info = null;
if (key != null && !key.isEmpty()) {
info = getCachedLicenseInfo(productName);
if (info != null && !info.isValidInfo(productName, key)) {
deleteCache(productName);
info = null;
}
info = askLicenseServer(productName, key, productVersion, info);
if (info != null && info.isValidInfo(productName, key)
&& info.isValidVersion(computeMajorVersion(productVersion))
&& !info.isLicenseExpired()) {
return info;
}
}
throw new InvalidCvalException(productName, productVersion,
productTitle, key, info);
} | 3.68 |
hbase_DictionaryCache_contains | // Visible for testing
public static boolean contains(String dictionaryPath) {
if (CACHE != null) {
return CACHE.asMap().containsKey(dictionaryPath);
}
return false;
} | 3.68 |
hadoop_TFile_close | /**
* Close the scanner. Release all resources. The behavior of using the
* scanner after calling close is not defined. The entry returned by the
* previous entry() call will be invalid.
*/
@Override
public void close() throws IOException {
parkCursorAtEnd();
} | 3.68 |
rocketmq-connect_Serializer_configure | /**
* Configure this class.
* @param configs configs in key/value pairs
*/
default void configure(Map<String, ?> configs) {
// intentionally left blank
} | 3.68 |
hbase_BucketCache_stopWriterThreads | /**
* Only used in test
*/
void stopWriterThreads() throws InterruptedException {
for (WriterThread writerThread : writerThreads) {
writerThread.disableWriter();
writerThread.interrupt();
writerThread.join();
}
} | 3.68 |
pulsar_PulsarLedgerIdGenerator_handleTheDeletePath | //If the config rootPath when use zk metadata store, it will append rootPath as the prefix of the path.
//So when we get the path from the stat, we should truncate the rootPath.
private String handleTheDeletePath(String path) {
if (store instanceof ZKMetadataStore) {
String rootPath = ((ZKMetadataStore) store).getRootPath();
if (rootPath == null) {
return path;
}
return path.replaceFirst(rootPath, "");
}
return path;
} | 3.68 |
framework_CvalChecker_setLicenseProvider | /*
* Change the license provider, only used in tests.
*/
final CvalChecker setLicenseProvider(CvalServer p) {
provider = p;
return this;
} | 3.68 |
hadoop_AbfsThrottlingInterceptFactory_factory | /**
* Returns instance of throttling intercept.
* @param accountName Account name.
* @return instance of throttling intercept.
*/
private static AbfsClientThrottlingIntercept factory(final String accountName) {
return new AbfsClientThrottlingIntercept(accountName, abfsConfig);
} | 3.68 |
hbase_CatalogJanitor_hasNoReferences | /**
* @param p A pair where the first boolean says whether or not the daughter region directory
* exists in the filesystem and then the second boolean says whether the daughter has
* references to the parent.
* @return True the passed <code>p</code> signifies no references.
*/
private static boolean hasNoReferences(final Pair<Boolean, Boolean> p) {
return !p.getFirst() || !p.getSecond();
} | 3.68 |
hbase_HBaseTestingUtility_createRegionAndWAL | /**
* Create a region with it's own WAL. Be sure to call
* {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
*/
public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
final Configuration conf, final TableDescriptor htd, boolean initialize) throws IOException {
ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null,
MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
WAL wal = createWal(conf, rootDir, info);
return HRegion.createHRegion(info, rootDir, conf, htd, wal, initialize);
} | 3.68 |
framework_VAcceptCriterion_needsServerSideCheck | /**
* Returns whether a server side check is needed for determining acceptance.
*
* @param drag
* the drag event
* @param criterioUIDL
* accept criterion UIDL
* @return {@code true} if a server side check is needed, {@code false}
* otherwise
*/
public boolean needsServerSideCheck(VDragEvent drag, UIDL criterioUIDL) {
return false;
} | 3.68 |
morf_OracleDialect_getSqlForYYYYMMDDToDate | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForYYYYMMDDToDate(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForYYYYMMDDToDate(Function function) {
return "TO_DATE(" + getSqlFrom(function.getArguments().get(0)) + ", 'yyyymmdd')";
} | 3.68 |
hadoop_UpdateContainerSchedulerEvent_getOriginalToken | /**
* Original Token before update.
*
* @return Container Token.
*/
public ContainerTokenIdentifier getOriginalToken() {
return this.originalToken;
} | 3.68 |
morf_AbstractSelectStatement_fullOuterJoin | /**
* Specifies an full outer join to a subselect:
*
* <blockquote><pre>
* TableReference sale = tableRef("Sale");
* TableReference customer = tableRef("Customer");
*
* // Define the subselect - a group by showing total sales by age in the
* // previous month.
* SelectStatement amountsByAgeLastMonth = select(field("age"), sum(field("amount")))
* .from(sale)
* .innerJoin(customer, sale.field("customerId").eq(customer.field("id")))
* .where(sale.field("month").eq(5))
* .groupBy(customer.field("age")
* .alias("amountByAge");
*
* // The outer select, showing each sale this month as a percentage of the sales
* // to that age the previous month
* SelectStatement outer = select(
* sale.field("id"),
* sale.field("amount")
* // May cause division by zero (!)
* .divideBy(isNull(amountsByAgeLastMonth.asTable().field("amount"), 0))
* .multiplyBy(literal(100))
* )
* .from(sale)
* .innerJoin(customer, sale.field("customerId").eq(customer.field("id")))
* .fullOuterJoin(amountsByAgeLastMonth, amountsByAgeLastMonth.asTable().field("age").eq(customer.field("age")));
* </pre></blockquote>
*
* @param subSelect the sub select statement to join on to
* @param criterion the criteria on which to join the tables
* @return a new select statement with the change applied.
*/
public T fullOuterJoin(SelectStatement subSelect, Criterion criterion) {
return copyOnWriteOrMutate(
b -> b.fullOuterJoin(subSelect, criterion),
() -> joins.add(new Join(JoinType.FULL_OUTER_JOIN, subSelect, criterion))
);
} | 3.68 |
hbase_MasterObserver_preClearDeadServers | /**
* Called before clear dead region servers.
*/
default void preClearDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
} | 3.68 |
pulsar_LocalBookkeeperEnsemble_waitForConnection | // Waiting for the SyncConnected event from the ZooKeeper server
public void waitForConnection() throws IOException {
try {
if (!clientConnectLatch.await(zkSessionTimeOut, TimeUnit.MILLISECONDS)) {
throw new IOException("Couldn't connect to zookeeper server");
}
} catch (InterruptedException e) {
throw new IOException("Interrupted when connecting to zookeeper server", e);
}
} | 3.68 |
hadoop_Server_getPrefix | /**
* Returns the server prefix for server configuration properties.
* <p>
* By default it is the server name.
*
* @return the prefix for server configuration properties.
*/
public String getPrefix() {
return getName();
} | 3.68 |
hadoop_UnmanagedApplicationManager_getAppId | /**
* Returns the application id of the UAM.
*
* @return application id of the UAM
*/
public ApplicationId getAppId() {
return this.applicationId;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.