name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_AbstractPagedOutputView_getCurrentPositionInSegment | /**
* Gets the current write position (the position where the next bytes will be written) in the
* current memory segment.
*
* @return The current write offset in the current memory segment.
*/
public int getCurrentPositionInSegment() {
return this.positionInSegment;
} | 3.68 |
hbase_Compactor_getFileDetails | /**
* Extracts some details about the files to compact that are commonly needed by compactors.
* @param filesToCompact Files.
* @param allFiles Whether all files are included for compaction
* @parma major If major compaction
* @return The result.
*/
private FileDetails getFileDetails(Collection<HStoreFile> filesToCompact, boolean allFiles,
boolean major) throws IOException {
FileDetails fd = new FileDetails();
long oldestHFileTimestampToKeepMVCC =
EnvironmentEdgeManager.currentTime() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);
for (HStoreFile file : filesToCompact) {
if (allFiles && (file.getModificationTimestamp() < oldestHFileTimestampToKeepMVCC)) {
// when isAllFiles is true, all files are compacted so we can calculate the smallest
// MVCC value to keep
if (fd.minSeqIdToKeep < file.getMaxMemStoreTS()) {
fd.minSeqIdToKeep = file.getMaxMemStoreTS();
}
}
long seqNum = file.getMaxSequenceId();
fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
StoreFileReader r = file.getReader();
if (r == null) {
LOG.warn("Null reader for " + file.getPath());
continue;
}
// NOTE: use getEntries when compacting instead of getFilterEntries, otherwise under-sized
// blooms can cause progress to be miscalculated or if the user switches bloom
// type (e.g. from ROW to ROWCOL)
long keyCount = r.getEntries();
fd.maxKeyCount += keyCount;
// calculate the latest MVCC readpoint in any of the involved store files
Map<byte[], byte[]> fileInfo = r.loadFileInfo();
// calculate the total size of the compacted files
fd.totalCompactedFilesSize += r.length();
byte[] tmp = null;
// Get and set the real MVCCReadpoint for bulk loaded files, which is the
// SeqId number.
if (r.isBulkLoaded()) {
fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
} else {
tmp = fileInfo.get(HFile.Writer.MAX_MEMSTORE_TS_KEY);
if (tmp != null) {
fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
}
}
tmp = fileInfo.get(HFileInfo.MAX_TAGS_LEN);
if (tmp != null) {
fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
}
// If required, calculate the earliest put timestamp of all involved storefiles.
// This is used to remove family delete marker during compaction.
long earliestPutTs = 0;
if (allFiles) {
tmp = fileInfo.get(EARLIEST_PUT_TS);
if (tmp == null) {
// There's a file with no information, must be an old one
// assume we have very old puts
fd.earliestPutTs = earliestPutTs = PrivateConstants.OLDEST_TIMESTAMP;
} else {
earliestPutTs = Bytes.toLong(tmp);
fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
}
}
tmp = fileInfo.get(TIMERANGE_KEY);
fd.latestPutTs =
tmp == null ? HConstants.LATEST_TIMESTAMP : TimeRangeTracker.parseFrom(tmp).getMax();
LOG.debug(
"Compacting {}, keycount={}, bloomtype={}, size={}, "
+ "encoding={}, compression={}, seqNum={}{}",
(file.getPath() == null ? null : file.getPath().getName()), keyCount,
r.getBloomFilterType().toString(), TraditionalBinaryPrefix.long2String(r.length(), "", 1),
r.getHFileReader().getDataBlockEncoding(),
major ? majorCompactionCompression : minorCompactionCompression, seqNum,
(allFiles ? ", earliestPutTs=" + earliestPutTs : ""));
}
return fd;
} | 3.68 |
framework_DataProvider_getId | /**
* Gets an identifier for the given item. This identifier is used by the
* framework to determine equality between two items.
* <p>
* Default is to use item itself as its own identifier. If the item has
* {@link Object#equals(Object)} and {@link Object#hashCode()} implemented
* in a way that it can be compared to other items, no changes are required.
* <p>
* <strong>Note:</strong> This method will be called often by the Framework.
* It should not do any expensive operations.
*
* @param item
* the item to get identifier for; not {@code null}
* @return the identifier for given item; not {@code null}
*/
public default Object getId(T item) {
Objects.requireNonNull(item, "Cannot provide an id for a null item.");
return item;
} | 3.68 |
framework_CSSInjectWithColorpicker_createFontSizeSelect | /**
* Creates a font size selection control
*/
private Component createFontSizeSelect() {
final ComboBox<Integer> select = new ComboBox<>(null,
Arrays.asList(8, 9, 10, 12, 14, 16, 20, 25, 30, 40, 50));
select.setWidth("100px");
select.setValue(12);
select.setPlaceholder("Font size");
select.setDescription("Font size");
select.setEmptySelectionAllowed(false);
select.addValueChangeListener(event -> {
// Get the new font size
Integer fontSize = select.getValue();
// Get the stylesheet of the page
Styles styles = Page.getCurrent().getStyles();
// inject the new font size as a style. We need .v-app to
// override Vaadin's default styles here
styles.add(".v-app .v-textarea.text-label { font-size:"
+ String.valueOf(fontSize) + "px; }");
});
return select;
} | 3.68 |
pulsar_ProducerImpl_failPendingBatchMessages | /**
* fail any pending batch messages that were enqueued, however batch was not closed out.
*
*/
private void failPendingBatchMessages(PulsarClientException ex) {
if (batchMessageContainer.isEmpty()) {
return;
}
final int numMessagesInBatch = batchMessageContainer.getNumMessagesInBatch();
final long currentBatchSize = batchMessageContainer.getCurrentBatchSize();
final int batchAllocatedSizeBytes = batchMessageContainer.getBatchAllocatedSizeBytes();
semaphoreRelease(numMessagesInBatch);
client.getMemoryLimitController().releaseMemory(currentBatchSize + batchAllocatedSizeBytes);
batchMessageContainer.discard(ex);
} | 3.68 |
hadoop_IOStatisticsStoreImpl_trackDuration | /**
* If the store is tracking the given key, return the
* duration tracker for it. If not tracked, return the
* stub tracker.
* @param key statistic key prefix
* @param count #of times to increment the matching counter in this
* operation.
* @return a tracker.
*/
@Override
public DurationTracker trackDuration(final String key, final long count) {
if (counterMap.containsKey(key)) {
return new StatisticDurationTracker(this, key, count);
} else {
return stubDurationTracker();
}
} | 3.68 |
hadoop_PathLocation_getSourcePath | /**
* Get the source path in the global namespace for this path location.
*
* @return The path in the global namespace.
*/
public String getSourcePath() {
return this.sourcePath;
} | 3.68 |
pulsar_LinuxInfoUtils_isUsable | /**
* Determine whether nic is usable.
* @param nicPath Nic path
* @return whether nic is usable.
*/
private static boolean isUsable(Path nicPath) {
try {
String operstate = readTrimStringFromFile(nicPath.resolve("operstate"));
Operstate operState = Operstate.valueOf(operstate.toUpperCase(Locale.ROOT));
switch (operState) {
case UP:
case UNKNOWN:
case DORMANT:
return true;
default:
return false;
}
} catch (Exception e) {
log.warn("[LinuxInfo] Failed to read {} NIC operstate, the detail is: {}", nicPath, e.getMessage());
// Read operstate got error.
return false;
}
} | 3.68 |
graphhopper_ViaRouting_buildEdgeRestrictions | /**
* Determines restrictions for the start/target edges to account for the heading, pass_through and curbside parameters
* for a single via-route leg.
*
* @param fromHeading the heading at the start node of this leg, or NaN if no restriction should be applied
* @param toHeading the heading at the target node (the vehicle's heading when arriving at the target), or NaN if
* no restriction should be applied
* @param incomingEdge the last edge of the previous leg (or {@link EdgeIterator#NO_EDGE} if not available
*/
private static EdgeRestrictions buildEdgeRestrictions(
QueryGraph queryGraph, Snap fromSnap, Snap toSnap,
double fromHeading, double toHeading, int incomingEdge, boolean passThrough,
String fromCurbside, String toCurbside, DirectedEdgeFilter edgeFilter) {
EdgeRestrictions edgeRestrictions = new EdgeRestrictions();
// curbsides
if (!fromCurbside.equals(CURBSIDE_ANY) || !toCurbside.equals(CURBSIDE_ANY)) {
DirectedEdgeFilter directedEdgeFilter = (edge, reverse) -> {
// todo: maybe find a cleaner way to obtain the original edge given a VirtualEdgeIterator (not VirtualEdgeIteratorState)
if (queryGraph.isVirtualEdge(edge.getEdge())) {
EdgeIteratorState virtualEdge = queryGraph.getEdgeIteratorStateForKey(edge.getEdgeKey());
EdgeIteratorState origEdge = queryGraph.getEdgeIteratorStateForKey(((VirtualEdgeIteratorState) virtualEdge).getOriginalEdgeKey());
return edgeFilter.accept(origEdge, reverse);
} else
return edgeFilter.accept(edge, reverse);
};
DirectionResolver directionResolver = new DirectionResolver(queryGraph, directedEdgeFilter);
DirectionResolverResult fromDirection = directionResolver.resolveDirections(fromSnap.getClosestNode(), fromSnap.getQueryPoint());
DirectionResolverResult toDirection = directionResolver.resolveDirections(toSnap.getClosestNode(), toSnap.getQueryPoint());
int sourceOutEdge = DirectionResolverResult.getOutEdge(fromDirection, fromCurbside);
int targetInEdge = DirectionResolverResult.getInEdge(toDirection, toCurbside);
if (fromSnap.getClosestNode() == toSnap.getClosestNode()) {
// special case where we go from one point back to itself. for example going from a point A
// with curbside right to the same point with curbside right is interpreted as 'being there
// already' -> empty path. Similarly if the curbside for the start/target is not even specified
// there is no need to drive a loop. However, going from point A/right to point A/left (or the
// other way around) means we need to drive some kind of loop to get back to the same location
// (arriving on the other side of the road).
if (Helper.isEmpty(fromCurbside) || Helper.isEmpty(toCurbside) ||
fromCurbside.equals(CURBSIDE_ANY) || toCurbside.equals(CURBSIDE_ANY) ||
fromCurbside.equals(toCurbside)) {
// we just disable start/target edge constraints to get an empty path
sourceOutEdge = ANY_EDGE;
targetInEdge = ANY_EDGE;
}
}
edgeRestrictions.setSourceOutEdge(sourceOutEdge);
edgeRestrictions.setTargetInEdge(targetInEdge);
}
// heading
if (!Double.isNaN(fromHeading) || !Double.isNaN(toHeading)) {
// todo: for heading/pass_through with edge-based routing (especially CH) we have to find the edge closest
// to the heading and use it as sourceOutEdge/targetInEdge here. the heading penalty will not be applied
// this way (unless we implement this), but this is more or less ok as we can use finite u-turn costs
// instead. maybe the hardest part is dealing with headings that cannot be fulfilled, like in one-way
// streets. see also #1765
HeadingResolver headingResolver = new HeadingResolver(queryGraph);
if (!Double.isNaN(fromHeading))
edgeRestrictions.getUnfavoredEdges().addAll(headingResolver.getEdgesWithDifferentHeading(fromSnap.getClosestNode(), fromHeading));
if (!Double.isNaN(toHeading)) {
toHeading += 180;
if (toHeading > 360)
toHeading -= 360;
edgeRestrictions.getUnfavoredEdges().addAll(headingResolver.getEdgesWithDifferentHeading(toSnap.getClosestNode(), toHeading));
}
}
// pass through
if (incomingEdge != NO_EDGE && passThrough)
edgeRestrictions.getUnfavoredEdges().add(incomingEdge);
return edgeRestrictions;
} | 3.68 |
hadoop_HsController_singleJobCounter | /*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#singleJobCounter()
*/
@Override
public void singleJobCounter() throws IOException{
super.singleJobCounter();
} | 3.68 |
hadoop_ResourceEstimatorService_getEstimatedResourceAllocation | /**
* Get estimated {code Resource} allocation for the pipeline.
*
* @param pipelineId id of the pipeline.
* @return Json format of {@link RLESparseResourceAllocation}.
* @throws SkylineStoreException if fails to get estimated {code Resource}
* allocation from {@link SkylineStore}.
*/
@GET @Path("/skylinestore/estimation/{pipelineId}")
@Produces(MediaType.APPLICATION_JSON)
public String getEstimatedResourceAllocation(
@PathParam("pipelineId") String pipelineId) throws SkylineStoreException {
RLESparseResourceAllocation result = skylineStore.getEstimation(pipelineId);
final String skyline = gson.toJson(result, rleType);
LOGGER.debug("Query the skyline store for pipelineId: {}." + pipelineId);
return skyline;
} | 3.68 |
framework_VAbstractTextualDate_updateBufferedResolutions | /**
* Updates {@link VDateField#bufferedResolutions bufferedResolutions} before
* sending a response to the server.
* <p>
* The method can be overridden by subclasses to provide a custom logic for
* date variables to avoid overriding the {@link #onChange(ChangeEvent)}
* method.
*
* <p>
* Note that this method should not send the buffered values. For that, use
* {@link #sendBufferedValues()}.
*
* @since 8.2
*/
protected void updateBufferedResolutions() {
Date currentDate = getDate();
if (currentDate != null) {
bufferedResolutions.put(
getResolutions().filter(this::isYear).findFirst().get(),
currentDate.getYear() + 1900);
}
} | 3.68 |
MagicPlugin_Targeting_getCurBlock | /**
* Returns the current block along the line of vision
*
* @return The block
*/
public Block getCurBlock()
{
return currentBlock;
} | 3.68 |
flink_InputSelection_fairSelectNextIndex | /**
* Fairly select one of the available inputs for reading.
*
* @param inputMask The mask of inputs that are selected. Note -1 for this is interpreted as all
* of the 32 inputs are available.
* @param availableInputsMask The mask of all available inputs. Note -1 for this is interpreted
* as all of the 32 inputs are available.
* @param lastReadInputIndex The index of last read input.
* @return the index of the input for reading or {@link InputSelection#NONE_AVAILABLE} (if
* {@code inputMask} is empty or the inputs in {@code inputMask} are unavailable).
*/
public static int fairSelectNextIndex(
long inputMask, long availableInputsMask, int lastReadInputIndex) {
long combineMask = availableInputsMask & inputMask;
if (combineMask == 0) {
return NONE_AVAILABLE;
}
int nextReadInputIndex = selectFirstBitRightFromNext(combineMask, lastReadInputIndex + 1);
if (nextReadInputIndex >= 0) {
return nextReadInputIndex;
}
return selectFirstBitRightFromNext(combineMask, 0);
} | 3.68 |
hbase_WALKey_toStringMap | /**
* Produces a string map for this key. Useful for programmatic use and manipulation of the data
* stored in an WALKeyImpl, for example, printing as JSON.
* @return a Map containing data from this key
*/
default Map<String, Object> toStringMap() {
Map<String, Object> stringMap = new HashMap<>();
stringMap.put("table", getTableName());
stringMap.put("region", Bytes.toStringBinary(getEncodedRegionName()));
stringMap.put("sequence", getSequenceId());
Map<String, byte[]> extendedAttributes = getExtendedAttributes();
if (extendedAttributes != null) {
for (Map.Entry<String, byte[]> entry : extendedAttributes.entrySet()) {
stringMap.put(entry.getKey(), Bytes.toStringBinary(entry.getValue()));
}
}
return stringMap;
} | 3.68 |
framework_TreeTable_setAnimationsEnabled | /**
* Animations can be enabled by passing true to this method. Currently
* expanding rows slide in from the top and collapsing rows slide out the
* same way. NOTE! not supported in Internet Explorer 6 or 7.
*
* @param animationsEnabled
* true or false whether to enable animations or not.
*/
public void setAnimationsEnabled(boolean animationsEnabled) {
this.animationsEnabled = animationsEnabled;
markAsDirty();
} | 3.68 |
hbase_LruBlockCache_getCachedFileNamesForTest | /**
* Used in testing. May be very inefficient.
* @return the set of cached file names
*/
SortedSet<String> getCachedFileNamesForTest() {
SortedSet<String> fileNames = new TreeSet<>();
for (BlockCacheKey cacheKey : map.keySet()) {
fileNames.add(cacheKey.getHfileName());
}
return fileNames;
} | 3.68 |
hadoop_GetGroupsBase_run | /**
* Get the groups for the users given and print formatted output to the
* {@link PrintStream} configured earlier.
*/
@Override
public int run(String[] args) throws Exception {
if (args.length == 0) {
args = new String[] { UserGroupInformation.getCurrentUser().getUserName() };
}
for (String username : args) {
StringBuilder sb = new StringBuilder();
sb.append(username + " :");
for (String group : getUgmProtocol().getGroupsForUser(username)) {
sb.append(" ")
.append(group);
}
out.println(sb);
}
return 0;
} | 3.68 |
flink_StreamGraphGenerator_transform | /**
* Transforms one {@code Transformation}.
*
* <p>This checks whether we already transformed it and exits early in that case. If not it
* delegates to one of the transformation specific methods.
*/
private Collection<Integer> transform(Transformation<?> transform) {
if (alreadyTransformed.containsKey(transform)) {
return alreadyTransformed.get(transform);
}
LOG.debug("Transforming " + transform);
if (transform.getMaxParallelism() <= 0) {
// if the max parallelism hasn't been set, then first use the job wide max parallelism
// from the ExecutionConfig.
int globalMaxParallelismFromConfig = executionConfig.getMaxParallelism();
if (globalMaxParallelismFromConfig > 0) {
transform.setMaxParallelism(globalMaxParallelismFromConfig);
}
}
transform
.getSlotSharingGroup()
.ifPresent(
slotSharingGroup -> {
final ResourceSpec resourceSpec =
SlotSharingGroupUtils.extractResourceSpec(slotSharingGroup);
if (!resourceSpec.equals(ResourceSpec.UNKNOWN)) {
slotSharingGroupResources.compute(
slotSharingGroup.getName(),
(name, profile) -> {
if (profile == null) {
return ResourceProfile.fromResourceSpec(
resourceSpec, MemorySize.ZERO);
} else if (!ResourceProfile.fromResourceSpec(
resourceSpec, MemorySize.ZERO)
.equals(profile)) {
throw new IllegalArgumentException(
"The slot sharing group "
+ slotSharingGroup.getName()
+ " has been configured with two different resource spec.");
} else {
return profile;
}
});
}
});
// call at least once to trigger exceptions about MissingTypeInfo
transform.getOutputType();
@SuppressWarnings("unchecked")
final TransformationTranslator<?, Transformation<?>> translator =
(TransformationTranslator<?, Transformation<?>>)
translatorMap.get(transform.getClass());
Collection<Integer> transformedIds;
if (translator != null) {
transformedIds = translate(translator, transform);
} else {
transformedIds = legacyTransform(transform);
}
// need this check because the iterate transformation adds itself before
// transforming the feedback edges
if (!alreadyTransformed.containsKey(transform)) {
alreadyTransformed.put(transform, transformedIds);
}
return transformedIds;
} | 3.68 |
framework_VAbstractCalendarPanel_isAcceptedByRangeEnd | /**
* Accepts dates earlier than or equal to rangeStart, depending on the
* resolution. If the resolution is set to DAY, the range will compare on a
* day-basis. If the resolution is set to YEAR, only years are compared. So
* even if the range is set to one millisecond in next year, also next year
* will be included.
*
* @param date
* @param minResolution
* @return
*/
private boolean isAcceptedByRangeEnd(Date date, R minResolution) {
assert (date != null);
// rangeEnd == null means that we accept all values above rangeStart
if (rangeEnd == null) {
return true;
}
// If dateStrResolution has more year digits than rangeEnd, we need
// to pad it in order to be lexicographically compatible
String dateStrResolution = dateStrResolution(date, minResolution);
String paddedEnd = rangeEnd;
int yearDigits = dateStrResolution.indexOf("-");
if (yearDigits == -1) {
yearDigits = dateStrResolution.length();
}
while (paddedEnd.indexOf("-") < yearDigits) {
paddedEnd = "0" + paddedEnd;
}
return paddedEnd.substring(0, dateStrResolution.length())
.compareTo(dateStrResolution) >= 0;
} | 3.68 |
hadoop_AbstractDNSToSwitchMapping_getSwitchMap | /**
* Get a copy of the map (for diagnostics)
* @return a clone of the map or null for none known
*/
public Map<String, String> getSwitchMap() {
return null;
} | 3.68 |
framework_Table_getColumnFooter | /**
* Gets the footer caption beneath the rows.
*
* @param propertyId
* The propertyId of the column *
* @return The caption of the footer or NULL if not set
*/
public String getColumnFooter(Object propertyId) {
return columnFooters.get(propertyId);
} | 3.68 |
hbase_MetricsSource_setTimeStampNextToReplicate | /**
* TimeStamp of next edit targeted for replication. Used for calculating lag, as if this timestamp
* is greater than timestamp of last shipped, it means there's at least one edit pending
* replication.
* @param timeStampNextToReplicate timestamp of next edit in the queue that should be replicated.
*/
public void setTimeStampNextToReplicate(long timeStampNextToReplicate) {
this.timeStampNextToReplicate = timeStampNextToReplicate;
} | 3.68 |
AreaShop_GeneralRegion_saveRequired | /**
* Indicate this region needs to be saved, saving will happen by a repeating task.
*/
public void saveRequired() {
saveRequired = true;
} | 3.68 |
framework_VAbstractSplitPanel_constructDom | /**
* Constructs the DOM structure for this widget.
*/
protected void constructDom() {
DOM.appendChild(splitter, DOM.createDiv()); // for styling
DOM.appendChild(getElement(), wrapper);
wrapper.getStyle().setPosition(Position.RELATIVE);
wrapper.getStyle().setWidth(100, Unit.PCT);
wrapper.getStyle().setHeight(100, Unit.PCT);
DOM.appendChild(wrapper, firstContainer);
DOM.appendChild(wrapper, splitter);
DOM.appendChild(wrapper, secondContainer);
splitter.getStyle().setPosition(Position.ABSOLUTE);
secondContainer.getStyle().setPosition(Position.ABSOLUTE);
setStylenames();
} | 3.68 |
framework_Table_getItemIdsInRange | /**
* Gets items ids from a range of key values
*
* @param itemId
* The start key
* @param length
* amount of items to be retrieved
* @return
*/
private LinkedHashSet<Object> getItemIdsInRange(Object itemId,
final int length) {
LinkedHashSet<Object> ids = new LinkedHashSet<Object>();
for (int i = 0; i < length; i++) {
// should not be null unless client-server are out of sync
assert itemId != null;
ids.add(itemId);
itemId = nextItemId(itemId);
}
return ids;
} | 3.68 |
flink_SkipListUtils_getPrevIndexNode | /**
* Returns previous key pointer on the given index level.
*
* @param memorySegment memory segment for key space.
* @param offset offset of key space in the memory segment.
* @param totalLevel the level of the node.
* @param level on which level to get the previous key pointer of the node.
*/
public static long getPrevIndexNode(
MemorySegment memorySegment, int offset, int totalLevel, int level) {
int of = getIndexOffset(offset, totalLevel, level);
return memorySegment.getLong(of);
} | 3.68 |
hbase_MiniHBaseCluster_getLiveMasterThreads | /** Returns List of live master threads (skips the aborted and the killed) */
public List<JVMClusterUtil.MasterThread> getLiveMasterThreads() {
return this.hbaseCluster.getLiveMasters();
} | 3.68 |
flink_CopyOnWriteSkipListStateMapSnapshot_writeValue | /** Write value from bytes. */
private void writeValue(long valuePointer, DataOutputView outputView) throws IOException {
outputView.write(owningStateMap.helpGetBytesForState(valuePointer));
} | 3.68 |
flink_CoGroupOperatorBase_setGroupOrderForInputOne | /**
* Sets the order of the elements within a group for the first input.
*
* @param order The order for the elements in a group.
*/
public void setGroupOrderForInputOne(Ordering order) {
setGroupOrder(0, order);
} | 3.68 |
dubbo_NetUtils_isPortInUsed | /**
* Check the port whether is in use in os
* @param port port to check
* @return true if it's occupied
*/
public static boolean isPortInUsed(int port) {
try (ServerSocket ignored = new ServerSocket(port)) {
return false;
} catch (IOException e) {
// continue
}
return true;
} | 3.68 |
hbase_KeyStoreFileType_fromPropertyValue | /**
* Converts a property value to a StoreFileType enum. If the property value is <code>null</code>
* or an empty string, returns <code>null</code>.
* @param propertyValue the property value.
* @return the KeyStoreFileType, or <code>null</code> if <code>propertyValue</code> is
* <code>null</code> or empty.
* @throws IllegalArgumentException if <code>propertyValue</code> is not one of "JKS", "PEM",
* "BCFKS", "PKCS12", or empty/null.
*/
public static KeyStoreFileType fromPropertyValue(String propertyValue) {
if (propertyValue == null || propertyValue.length() == 0) {
return null;
}
return KeyStoreFileType.valueOf(propertyValue.toUpperCase());
} | 3.68 |
hbase_TableSplit_toString | /**
* Returns the details about this instance as a string.
* @return The values of this instance as a string.
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("Split(");
sb.append("tablename=").append(tableName);
// null scan input is represented by ""
String printScan = "";
if (!scan.equals("")) {
try {
// get the real scan here in toString, not the Base64 string
printScan = TableMapReduceUtil.convertStringToScan(scan).toString();
} catch (IOException e) {
printScan = "";
}
sb.append(", scan=").append(printScan);
}
sb.append(", startrow=").append(Bytes.toStringBinary(startRow));
sb.append(", endrow=").append(Bytes.toStringBinary(endRow));
sb.append(", regionLocation=").append(regionLocation);
sb.append(", regionname=").append(encodedRegionName);
sb.append(")");
return sb.toString();
} | 3.68 |
morf_ConnectionResourcesBean_setPassword | /**
* @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#setPassword(java.lang.String)
*/
@Override
public void setPassword(String password) {
this.password = password;
} | 3.68 |
framework_Window_removeResizeListener | /**
* Remove a resize listener.
*
* @param listener
*/
@Deprecated
public void removeResizeListener(ResizeListener listener) {
removeListener(ResizeEvent.class, listener);
} | 3.68 |
hbase_AsyncTable_ifEquals | /**
* Check for equality.
* @param value the expected value
*/
default CheckAndMutateBuilder ifEquals(byte[] value) {
return ifMatches(CompareOperator.EQUAL, value);
} | 3.68 |
hudi_CleanPlanner_isFileSliceNeededForPendingMajorOrMinorCompaction | /*
* Determine if file slice needed to be preserved for pending compaction or log compaction.
* @param fileSlice File slice
* @return true if file slice needs to be preserved, false otherwise.
*/
private boolean isFileSliceNeededForPendingMajorOrMinorCompaction(FileSlice fileSlice) {
return isFileSliceNeededForPendingCompaction(fileSlice) || isFileSliceNeededForPendingLogCompaction(fileSlice);
} | 3.68 |
graphhopper_StringUtils_getLevenshteinDistance | /**
* <p>Find the Levenshtein distance between two Strings.</p>
* <p>
* <p>This is the number of changes needed to change one String into
* another, where each change is a single character modification (deletion,
* insertion or substitution).</p>
* <p>
* <p>The implementation uses a single-dimensional array of length s.length() + 1. See
* <a href="http://blog.softwx.net/2014/12/optimizing-levenshtein-algorithm-in-c.html">
* http://blog.softwx.net/2014/12/optimizing-levenshtein-algorithm-in-c.html</a> for details.</p>
* <p>
* <pre>
* StringUtils.getLevenshteinDistance(null, *) = IllegalArgumentException
* StringUtils.getLevenshteinDistance(*, null) = IllegalArgumentException
* StringUtils.getLevenshteinDistance("","") = 0
* StringUtils.getLevenshteinDistance("","a") = 1
* StringUtils.getLevenshteinDistance("aaapppp", "") = 7
* StringUtils.getLevenshteinDistance("frog", "fog") = 1
* StringUtils.getLevenshteinDistance("fly", "ant") = 3
* StringUtils.getLevenshteinDistance("elephant", "hippo") = 7
* StringUtils.getLevenshteinDistance("hippo", "elephant") = 7
* StringUtils.getLevenshteinDistance("hippo", "zzzzzzzz") = 8
* StringUtils.getLevenshteinDistance("hello", "hallo") = 1
* </pre>
*
* @param s the first String, must not be null
* @param t the second String, must not be null
* @return result distance
* @throws IllegalArgumentException if either String input {@code null}
* @since 3.0 Changed signature from getLevenshteinDistance(String, String) to
* getLevenshteinDistance(CharSequence, CharSequence)
*/
public static int getLevenshteinDistance(CharSequence s, CharSequence t) {
if (s == null || t == null) {
throw new IllegalArgumentException("Strings must not be null");
}
int n = s.length();
int m = t.length();
if (n == 0) {
return m;
} else if (m == 0) {
return n;
}
if (n > m) {
// swap the input strings to consume less memory
final CharSequence tmp = s;
s = t;
t = tmp;
n = m;
m = t.length();
}
final int p[] = new int[n + 1];
// indexes into strings s and t
int i; // iterates through s
int j; // iterates through t
int upper_left;
int upper;
char t_j; // jth character of t
int cost;
for (i = 0; i <= n; i++) {
p[i] = i;
}
for (j = 1; j <= m; j++) {
upper_left = p[0];
t_j = t.charAt(j - 1);
p[0] = j;
for (i = 1; i <= n; i++) {
upper = p[i];
cost = s.charAt(i - 1) == t_j ? 0 : 1;
// minimum of cell to the left+1, to the top+1, diagonally left and up +cost
p[i] = Math.min(Math.min(p[i - 1] + 1, p[i] + 1), upper_left + cost);
upper_left = upper;
}
}
return p[n];
} | 3.68 |
zxing_MultiFormatReader_decodeWithState | /**
* Decode an image using the state set up by calling setHints() previously. Continuous scan
* clients will get a <b>large</b> speed increase by using this instead of decode().
*
* @param image The pixel data to decode
* @return The contents of the image
* @throws NotFoundException Any errors which occurred
*/
public Result decodeWithState(BinaryBitmap image) throws NotFoundException {
// Make sure to set up the default state so we don't crash
if (readers == null) {
setHints(null);
}
return decodeInternal(image);
} | 3.68 |
flink_GroupCombineNode_getOperator | /**
* Gets the operator represented by this optimizer node.
*
* @return The operator represented by this optimizer node.
*/
@Override
public GroupCombineOperatorBase<?, ?, ?> getOperator() {
return (GroupCombineOperatorBase<?, ?, ?>) super.getOperator();
} | 3.68 |
hadoop_SuccessData_getDescription | /**
* @return any description text.
*/
public String getDescription() {
return description;
} | 3.68 |
zxing_Detector_extractParameters | /**
* Extracts the number of data layers and data blocks from the layer around the bull's eye.
*
* @param bullsEyeCorners the array of bull's eye corners
* @return the number of errors corrected during parameter extraction
* @throws NotFoundException in case of too many errors or invalid parameters
*/
private int extractParameters(ResultPoint[] bullsEyeCorners) throws NotFoundException {
if (!isValid(bullsEyeCorners[0]) || !isValid(bullsEyeCorners[1]) ||
!isValid(bullsEyeCorners[2]) || !isValid(bullsEyeCorners[3])) {
throw NotFoundException.getNotFoundInstance();
}
int length = 2 * nbCenterLayers;
// Get the bits around the bull's eye
int[] sides = {
sampleLine(bullsEyeCorners[0], bullsEyeCorners[1], length), // Right side
sampleLine(bullsEyeCorners[1], bullsEyeCorners[2], length), // Bottom
sampleLine(bullsEyeCorners[2], bullsEyeCorners[3], length), // Left side
sampleLine(bullsEyeCorners[3], bullsEyeCorners[0], length) // Top
};
// bullsEyeCorners[shift] is the corner of the bulls'eye that has three
// orientation marks.
// sides[shift] is the row/column that goes from the corner with three
// orientation marks to the corner with two.
shift = getRotation(sides, length);
// Flatten the parameter bits into a single 28- or 40-bit long
long parameterData = 0;
for (int i = 0; i < 4; i++) {
int side = sides[(shift + i) % 4];
if (compact) {
// Each side of the form ..XXXXXXX. where Xs are parameter data
parameterData <<= 7;
parameterData += (side >> 1) & 0x7F;
} else {
// Each side of the form ..XXXXX.XXXXX. where Xs are parameter data
parameterData <<= 10;
parameterData += ((side >> 2) & (0x1f << 5)) + ((side >> 1) & 0x1F);
}
}
// Corrects parameter data using RS. Returns just the data portion
// without the error correction.
CorrectedParameter correctedParam = getCorrectedParameterData(parameterData, compact);
int correctedData = correctedParam.getData();
if (compact) {
// 8 bits: 2 bits layers and 6 bits data blocks
nbLayers = (correctedData >> 6) + 1;
nbDataBlocks = (correctedData & 0x3F) + 1;
} else {
// 16 bits: 5 bits layers and 11 bits data blocks
nbLayers = (correctedData >> 11) + 1;
nbDataBlocks = (correctedData & 0x7FF) + 1;
}
return correctedParam.getErrorsCorrected();
} | 3.68 |
hbase_SaslClientAuthenticationProviders_getSimpleProvider | /**
* Returns the provider and token pair for SIMPLE authentication. This method is a "hack" while
* SIMPLE authentication for HBase does not flow through the SASL codepath.
*/
public Pair<SaslClientAuthenticationProvider, Token<? extends TokenIdentifier>>
getSimpleProvider() {
Optional<SaslClientAuthenticationProvider> optional = providers.stream()
.filter((p) -> p instanceof SimpleSaslClientAuthenticationProvider).findFirst();
return new Pair<>(optional.get(), null);
} | 3.68 |
flink_RemoteStorageScanner_run | /** Iterate the watched segment ids and check related file status. */
@Override
public void run() {
try {
Iterator<
Map.Entry<
Tuple2<TieredStoragePartitionId, TieredStorageSubpartitionId>,
Integer>>
iterator = requiredSegmentIds.entrySet().iterator();
boolean scanned = false;
while (iterator.hasNext()) {
Map.Entry<Tuple2<TieredStoragePartitionId, TieredStorageSubpartitionId>, Integer>
ids = iterator.next();
TieredStoragePartitionId partitionId = ids.getKey().f0;
TieredStorageSubpartitionId subpartitionId = ids.getKey().f1;
int requiredSegmentId = ids.getValue();
int maxSegmentId = scannedMaxSegmentIds.getOrDefault(ids.getKey(), -1);
if (maxSegmentId >= requiredSegmentId
&& checkSegmentExist(partitionId, subpartitionId, requiredSegmentId)) {
scanned = true;
iterator.remove();
checkNotNull(notifier).notifyAvailable(partitionId, subpartitionId);
} else {
// The segment should be watched again because it's not found.
// If the segment belongs to other tiers and has been consumed, the segment will
// be replaced by newly watched segment with larger segment id. This logic is
// ensured by the method {@code watchSegment}.
scanMaxSegmentId(partitionId, subpartitionId);
}
}
lastInterval =
scanned ? INITIAL_SCAN_INTERVAL_MS : scanStrategy.getInterval(lastInterval);
start();
} catch (Throwable throwable) {
// handle un-expected exception as unhandledExceptionHandler is not
// worked for ScheduledExecutorService.
FatalExitExceptionHandler.INSTANCE.uncaughtException(Thread.currentThread(), throwable);
}
} | 3.68 |
framework_Table_setTableFieldFactory | /**
* Sets the TableFieldFactory that is used to create editor for table cells.
*
* The TableFieldFactory is only used if the Table is editable. By default
* the DefaultFieldFactory is used.
*
* @param fieldFactory
* the field factory to set.
* @see #isEditable
* @see DefaultFieldFactory
*/
public void setTableFieldFactory(TableFieldFactory fieldFactory) {
this.fieldFactory = fieldFactory;
// Assure visual refresh
refreshRowCache();
} | 3.68 |
hadoop_TimelineAuthenticationFilterInitializer_initFilter | /**
* Initializes {@link TimelineAuthenticationFilter}.
* <p>
* Propagates to {@link TimelineAuthenticationFilter} configuration all YARN
* configuration properties prefixed with
* {@value org.apache.hadoop.yarn.conf.YarnConfiguration#TIMELINE_HTTP_AUTH_PREFIX}.
*
* @param container
* The filter container.
* @param conf
* Configuration for run-time parameters.
*/
@Override
public void initFilter(FilterContainer container, Configuration conf) {
setAuthFilterConfig(conf);
String authType = filterConfig.get(AuthenticationFilter.AUTH_TYPE);
if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
filterConfig.put(AuthenticationFilter.AUTH_TYPE,
PseudoDelegationTokenAuthenticationHandler.class.getName());
} else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
filterConfig.put(AuthenticationFilter.AUTH_TYPE,
KerberosDelegationTokenAuthenticationHandler.class.getName());
}
filterConfig.put(DelegationTokenAuthenticationHandler.TOKEN_KIND,
TimelineDelegationTokenIdentifier.KIND_NAME.toString());
container.addGlobalFilter("Timeline Authentication Filter",
TimelineAuthenticationFilter.class.getName(),
filterConfig);
} | 3.68 |
querydsl_StringExpression_length | /**
* Create a {@code this.length()} expression
*
* <p>Return the length of this String</p>
*
* @return this.length()
* @see java.lang.String#length()
*/
public NumberExpression<Integer> length() {
if (length == null) {
length = Expressions.numberOperation(Integer.class, Ops.STRING_LENGTH, mixin);
}
return length;
} | 3.68 |
flink_TaskSlot_add | /**
* Add the given task to the task slot. This is only possible if there is not already another
* task with the same execution attempt id added to the task slot. In this case, the method
* returns true. Otherwise the task slot is left unchanged and false is returned.
*
* <p>In case that the task slot state is not active an {@link IllegalStateException} is thrown.
* In case that the task's job id and allocation id don't match with the job id and allocation
* id for which the task slot has been allocated, an {@link IllegalArgumentException} is thrown.
*
* @param task to be added to the task slot
* @throws IllegalStateException if the task slot is not in state active
* @return true if the task was added to the task slot; otherwise false
*/
public boolean add(T task) {
// Check that this slot has been assigned to the job sending this task
Preconditions.checkArgument(
task.getJobID().equals(jobId),
"The task's job id does not match the "
+ "job id for which the slot has been allocated.");
Preconditions.checkArgument(
task.getAllocationId().equals(allocationId),
"The task's allocation "
+ "id does not match the allocation id for which the slot has been allocated.");
Preconditions.checkState(
TaskSlotState.ACTIVE == state, "The task slot is not in state active.");
T oldTask = tasks.put(task.getExecutionId(), task);
if (oldTask != null) {
tasks.put(task.getExecutionId(), oldTask);
return false;
} else {
return true;
}
} | 3.68 |
hbase_MasterCoprocessorHost_preBalanceSwitch | // This hook allows Coprocessor change value of balance switch.
public void preBalanceSwitch(final boolean b) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return;
}
execOperation(new MasterObserverOperation() {
@Override
public void call(MasterObserver observer) throws IOException {
observer.preBalanceSwitch(this, b);
}
});
} | 3.68 |
hbase_RequestConverter_buildClearRegionBlockCacheRequest | /**
* Creates a protocol buffer ClearRegionBlockCacheRequest
* @return a ClearRegionBlockCacheRequest
*/
public static ClearRegionBlockCacheRequest
buildClearRegionBlockCacheRequest(List<RegionInfo> hris) {
ClearRegionBlockCacheRequest.Builder builder = ClearRegionBlockCacheRequest.newBuilder();
hris.forEach(hri -> builder
.addRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, hri.getRegionName())));
return builder.build();
} | 3.68 |
druid_IPAddress_isClassC | /**
* Check if the IP address is belongs to a Class C IP address.
*
* @return Return <code>true</code> if the encapsulated IP address belongs to a class C IP address, otherwise
* returne <code>false</code>.
*/
public final boolean isClassC() {
return (ipAddress & 0x00000007) == 3;
} | 3.68 |
hbase_Reference_getFileRegion | /**
* */
public Range getFileRegion() {
return this.region;
} | 3.68 |
hbase_HBaseConfiguration_create | /**
* Creates a Configuration with HBase resources
* @param that Configuration to clone.
* @return a Configuration created with the hbase-*.xml files plus the given configuration.
*/
public static Configuration create(final Configuration that) {
Configuration conf = create();
merge(conf, that);
return conf;
} | 3.68 |
AreaShop_GeneralRegion_getFriendsFeature | /**
* Get the friends feature to query and manipulate friends of this region.
* @return The FriendsFeature of this region
*/
public FriendsFeature getFriendsFeature() {
return getFeature(FriendsFeature.class);
} | 3.68 |
hbase_ForeignExceptionDispatcher_dispatch | /**
* Sends an exception to all listeners.
* @param e {@link ForeignException} containing the cause. Can be null.
*/
private void dispatch(ForeignException e) {
// update all the listeners with the passed error
for (ForeignExceptionListener l : listeners) {
l.receive(e);
}
} | 3.68 |
hbase_ScanResultConsumerBase_onScanMetricsCreated | /**
* If {@code scan.isScanMetricsEnabled()} returns true, then this method will be called prior to
* all other methods in this interface to give you the {@link ScanMetrics} instance for this scan
* operation. The {@link ScanMetrics} instance will be updated on-the-fly during the scan, you can
* store it somewhere to get the metrics at any time if you want.
*/
default void onScanMetricsCreated(ScanMetrics scanMetrics) {
} | 3.68 |
morf_AbstractSelectStatementBuilder_leftOuterJoin | /**
* Specifies an left outer join to a subselect:
*
* <blockquote><pre>
* TableReference sale = tableRef("Sale");
* TableReference customer = tableRef("Customer");
*
* // Define the subselect - a group by showing total sales by age in the
* // previous month.
* SelectStatement amountsByAgeLastMonth = select(field("age"), sum(field("amount")))
* .from(sale)
* .innerJoin(customer, sale.field("customerId").eq(customer.field("id")))
* .where(sale.field("month").eq(5))
* .groupBy(customer.field("age")
* .alias("amountByAge");
*
* // The outer select, showing each sale this month as a percentage of the sales
* // to that age the previous month
* SelectStatement outer = select(
* sale.field("id"),
* sale.field("amount")
* // May cause division by zero (!)
* .divideBy(isNull(amountsByAgeLastMonth.asTable().field("amount"), 0))
* .multiplyBy(literal(100))
* )
* .from(sale)
* .innerJoin(customer, sale.field("customerId").eq(customer.field("id")))
* .leftOuterJoin(amountsByAgeLastMonth, amountsByAgeLastMonth.asTable().field("age").eq(customer.field("age")));
* </pre></blockquote>
*
* @param subSelect the sub select statement to join on to
* @param criterion the criteria on which to join the tables
* @return this, for method chaining.
*/
public T leftOuterJoin(SelectStatement subSelect, Criterion criterion) {
joins.add(new Join(JoinType.LEFT_OUTER_JOIN, subSelect, criterion));
return castToChild(this);
} | 3.68 |
zxing_LocaleManager_getProductSearchCountryTLD | /**
* The same as above, but specifically for Google Product Search.
*
* @param context application's {@link Context}
* @return The top-level domain to use.
*/
public static String getProductSearchCountryTLD(Context context) {
return doGetTLD(GOOGLE_PRODUCT_SEARCH_COUNTRY_TLD, context);
} | 3.68 |
AreaShop_AreaShop_registerDynamicPermissions | /**
* Register dynamic permissions controlled by config settings.
*/
private void registerDynamicPermissions() {
// Register limit groups of amount of regions a player can have
ConfigurationSection section = getConfig().getConfigurationSection("limitGroups");
if(section == null) {
return;
}
for(String group : section.getKeys(false)) {
if(!"default".equals(group)) {
Permission perm = new Permission("areashop.limits." + group);
try {
Bukkit.getPluginManager().addPermission(perm);
} catch(IllegalArgumentException e) {
warn("Could not add the following permission to be used as limit: " + perm.getName());
}
}
}
Bukkit.getPluginManager().recalculatePermissionDefaults(Bukkit.getPluginManager().getPermission("playerwarps.limits"));
} | 3.68 |
hadoop_SchedulerHealth_getAggregateFulFilledReservationsCount | /**
* Get the aggregate of all the fulfilled reservations count.
*
* @return aggregate fulfilled reservations count
*/
public Long getAggregateFulFilledReservationsCount() {
return getAggregateOperationCount(Operation.FULFILLED_RESERVATION);
} | 3.68 |
hadoop_ItemInfo_getRetryCount | /**
* Get the attempted retry count of the block for satisfy the policy.
*/
public int getRetryCount() {
return retryCount;
} | 3.68 |
dubbo_GenericBeanPostProcessorAdapter_doPostProcessBeforeInitialization | /**
* Adapter BeanPostProcessor#postProcessBeforeInitialization(Object, String) method , sub-type
* could override this method.
*
* @param bean Bean Object
* @param beanName Bean Name
* @return Bean Object
* @see BeanPostProcessor#postProcessBeforeInitialization(Object, String)
*/
protected T doPostProcessBeforeInitialization(T bean, String beanName) throws BeansException {
processBeforeInitialization(bean, beanName);
return bean;
} | 3.68 |
hbase_Bytes_binarySearch | /**
* Binary search for keys in indexes.
* @param arr array of byte arrays to search for
* @param key the key you want to find
* @param comparator a comparator to compare.
* @return zero-based index of the key, if the key is present in the array. Otherwise, a value -(i
* + 1) such that the key is between arr[i - 1] and arr[i] non-inclusively, where i is in
* [0, i], if we define arr[-1] = -Inf and arr[N] = Inf for an N-element array. The above
* means that this function can return 2N + 1 different values ranging from -(N + 1) to N
* - 1.
* @return the index of the block
*/
public static int binarySearch(Cell[] arr, Cell key, CellComparator comparator) {
int low = 0;
int high = arr.length - 1;
while (low <= high) {
int mid = low + ((high - low) >> 1);
// we have to compare in this order, because the comparator order
// has special logic when the 'left side' is a special key.
int cmp = comparator.compare(key, arr[mid]);
// key lives above the midpoint
if (cmp > 0) low = mid + 1;
// key lives below the midpoint
else if (cmp < 0) high = mid - 1;
// BAM. how often does this really happen?
else return mid;
}
return -(low + 1);
} | 3.68 |
hbase_MetricsHeapMemoryManager_setCurMemStoreSizeGauge | /**
* Set the current global memstore size used gauge
* @param memStoreSize the current memory usage in memstore, in bytes.
*/
public void setCurMemStoreSizeGauge(final long memStoreSize) {
source.setCurMemStoreSizeGauge(memStoreSize);
} | 3.68 |
morf_SqlUtils_bracket | /**
* Method that wraps a first elements of an (sub)expression with a bracket.
* <p>
* For example, in order to generate "(a + b) / c" SQL Math expression, we
* need to put first two elements (first subexpression) into a bracket. That
* could be achieved by the following DSL statement.
* </p>
*
* <pre>
* bracket(field("a").plus(field("b"))).divideBy(field("c"))
* </pre>
*
*
* @param expression the input Math expression that will be wrapped with
* brackets in output SQL
* @return new expression containing the input expression wrapped with
* brackets
*/
public static AliasedField bracket(MathsField expression) {
return new BracketedExpression(expression);
} | 3.68 |
hbase_User_getTokens | /**
* Returns all the tokens stored in the user's credentials.
*/
public Collection<Token<? extends TokenIdentifier>> getTokens() {
return ugi.getTokens();
} | 3.68 |
flink_StreamProjection_projectTupleX | /**
* Chooses a projectTupleX according to the length of {@link
* org.apache.flink.streaming.api.datastream.StreamProjection#fieldIndexes}.
*
* @return The projected DataStream.
* @see org.apache.flink.api.java.operators.ProjectOperator.Projection
*/
@SuppressWarnings("unchecked")
public <OUT extends Tuple> SingleOutputStreamOperator<OUT> projectTupleX() {
SingleOutputStreamOperator<OUT> projOperator = null;
switch (fieldIndexes.length) {
case 1:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple1();
break;
case 2:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple2();
break;
case 3:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple3();
break;
case 4:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple4();
break;
case 5:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple5();
break;
case 6:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple6();
break;
case 7:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple7();
break;
case 8:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple8();
break;
case 9:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple9();
break;
case 10:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple10();
break;
case 11:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple11();
break;
case 12:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple12();
break;
case 13:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple13();
break;
case 14:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple14();
break;
case 15:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple15();
break;
case 16:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple16();
break;
case 17:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple17();
break;
case 18:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple18();
break;
case 19:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple19();
break;
case 20:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple20();
break;
case 21:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple21();
break;
case 22:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple22();
break;
case 23:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple23();
break;
case 24:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple24();
break;
case 25:
projOperator = (SingleOutputStreamOperator<OUT>) projectTuple25();
break;
default:
throw new IllegalStateException("Excessive arity in tuple.");
}
return projOperator;
} | 3.68 |
graphhopper_PbfFieldDecoder_decodeLongitude | /**
* Decodes a raw longitude value into degrees.
* <p>
*
* @param rawLongitude The PBF encoded value.
* @return The longitude in degrees.
*/
public double decodeLongitude(long rawLongitude) {
return COORDINATE_SCALING_FACTOR * (coordLongitudeOffset + (coordGranularity * rawLongitude));
} | 3.68 |
flink_Tuple20_toString | /**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8,
* f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19), where the individual fields are the
* value returned by calling {@link Object#toString} on that field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "("
+ StringUtils.arrayAwareToString(this.f0)
+ ","
+ StringUtils.arrayAwareToString(this.f1)
+ ","
+ StringUtils.arrayAwareToString(this.f2)
+ ","
+ StringUtils.arrayAwareToString(this.f3)
+ ","
+ StringUtils.arrayAwareToString(this.f4)
+ ","
+ StringUtils.arrayAwareToString(this.f5)
+ ","
+ StringUtils.arrayAwareToString(this.f6)
+ ","
+ StringUtils.arrayAwareToString(this.f7)
+ ","
+ StringUtils.arrayAwareToString(this.f8)
+ ","
+ StringUtils.arrayAwareToString(this.f9)
+ ","
+ StringUtils.arrayAwareToString(this.f10)
+ ","
+ StringUtils.arrayAwareToString(this.f11)
+ ","
+ StringUtils.arrayAwareToString(this.f12)
+ ","
+ StringUtils.arrayAwareToString(this.f13)
+ ","
+ StringUtils.arrayAwareToString(this.f14)
+ ","
+ StringUtils.arrayAwareToString(this.f15)
+ ","
+ StringUtils.arrayAwareToString(this.f16)
+ ","
+ StringUtils.arrayAwareToString(this.f17)
+ ","
+ StringUtils.arrayAwareToString(this.f18)
+ ","
+ StringUtils.arrayAwareToString(this.f19)
+ ")";
} | 3.68 |
pulsar_Transactions_getPendingAckStatsAsync | /**
* Get transaction pending ack stats.
*
* @param topic the topic of this transaction pending ack stats
* @param subName the subscription name of this transaction pending ack stats
* @return the stats of transaction pending ack.
*/
default CompletableFuture<TransactionPendingAckStats> getPendingAckStatsAsync(String topic, String subName) {
return getPendingAckStatsAsync(topic, subName, false);
} | 3.68 |
hadoop_ActiveAuditManagerS3A_removeActiveSpanFromMap | /**
* remove the span from the reference map, shrinking the map in the process.
* if/when a new span is activated in the thread, a new entry will be created.
* and if queried for a span, the unbounded span will be automatically
* added to the map for this thread ID.
*
*/
@VisibleForTesting
boolean removeActiveSpanFromMap() {
// remove from the map
activeSpanMap.removeForCurrentThread();
if (deactivationsBeforePrune.decrementAndGet() == 0) {
// trigger a prune
activeSpanMap.prune();
deactivationsBeforePrune.set(pruneThreshold);
return true;
}
return false;
} | 3.68 |
flink_TimeUtils_singular | /**
* @param label the original label
* @return the singular format of the original label
*/
private static String[] singular(String label) {
return new String[] {label};
} | 3.68 |
hbase_GroupingTableMap_extractKeyValues | /**
* Extract columns values from the current record. This method returns null if any of the columns
* are not found. Override this method if you want to deal with nulls differently.
* @return array of byte values
*/
protected byte[][] extractKeyValues(Result r) {
byte[][] keyVals = null;
ArrayList<byte[]> foundList = new ArrayList<>();
int numCols = columns.length;
if (numCols > 0) {
for (Cell value : r.listCells()) {
byte[] column =
CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value));
for (int i = 0; i < numCols; i++) {
if (Bytes.equals(column, columns[i])) {
foundList.add(CellUtil.cloneValue(value));
break;
}
}
}
if (foundList.size() == numCols) {
keyVals = foundList.toArray(new byte[numCols][]);
}
}
return keyVals;
} | 3.68 |
hbase_IndividualBytesFieldCell_getTimestamp | // 4) Timestamp
@Override
public long getTimestamp() {
return timestamp;
} | 3.68 |
flink_JobExecutionResult_getAccumulatorResult | /**
* Gets the accumulator with the given name. Returns {@code null}, if no accumulator with that
* name was produced.
*
* @param accumulatorName The name of the accumulator.
* @param <T> The generic type of the accumulator value.
* @return The value of the accumulator with the given name.
*/
@SuppressWarnings("unchecked")
public <T> T getAccumulatorResult(String accumulatorName) {
OptionalFailure<Object> result = this.accumulatorResults.get(accumulatorName);
if (result != null) {
return (T) result.getUnchecked();
} else {
return null;
}
} | 3.68 |
morf_TableOutputter_getBoldFormat | /**
* @return the format to use for bold cells
* @throws WriteException if the format could not be created
*/
private WritableCellFormat getBoldFormat() throws WriteException {
WritableFont boldFont = new WritableFont(WritableFont.ARIAL, 8, WritableFont.BOLD);
WritableCellFormat boldHeading = new WritableCellFormat(boldFont);
boldHeading.setBorder(Border.BOTTOM, BorderLineStyle.MEDIUM);
boldHeading.setVerticalAlignment(VerticalAlignment.CENTRE);
boldHeading.setBackground(Colour.GRAY_25);
WritableCellFormat boldFormat = new WritableCellFormat(boldFont);
boldFormat.setVerticalAlignment(VerticalAlignment.TOP);
return boldFormat;
} | 3.68 |
framework_VFilterSelect_updateReadOnly | /** For internal use only. May be removed or replaced in the future. */
public void updateReadOnly() {
debug("VFS: updateReadOnly()");
tb.setReadOnly(readonly || !textInputEnabled);
} | 3.68 |
framework_SpaceSelectHandler_setDeselectAllowed | /**
* Sets whether pressing space for the currently selected row should
* deselect the row.
*
* @param deselectAllowed
* <code>true</code> to allow deselecting the selected row;
* otherwise <code>false</code>
*/
public void setDeselectAllowed(boolean deselectAllowed) {
this.deselectAllowed = deselectAllowed;
} | 3.68 |
graphhopper_MaxSpeedCalculator_fillMaxSpeed | /**
* This method sets max_speed values where the value is UNSET_SPEED to a value determined by
* the default speed library which is country-dependent.
*/
public void fillMaxSpeed(Graph graph, EncodingManager em) {
// In DefaultMaxSpeedParser and in OSMMaxSpeedParser we don't have the rural/urban info,
// but now we have and can fill the country-dependent max_speed value where missing.
EnumEncodedValue<UrbanDensity> udEnc = em.getEnumEncodedValue(UrbanDensity.KEY, UrbanDensity.class);
fillMaxSpeed(graph, em, edge -> edge.get(udEnc) != UrbanDensity.RURAL);
} | 3.68 |
framework_Panel_getActionManager | /*
* ACTIONS
*/
@Override
protected ActionManager getActionManager() {
if (actionManager == null) {
actionManager = new ActionManager(this);
}
return actionManager;
} | 3.68 |
hbase_SaslClientAuthenticationProvider_relogin | /**
* Executes any necessary logic to re-login the client. Not all implementations will have any
* logic that needs to be executed.
*/
default void relogin() throws IOException {
} | 3.68 |
flink_DistinctType_newBuilder | /** Creates a builder for a {@link DistinctType}. */
public static DistinctType.Builder newBuilder(
ObjectIdentifier objectIdentifier, LogicalType sourceType) {
return new DistinctType.Builder(objectIdentifier, sourceType);
} | 3.68 |
framework_AbstractRendererConnector_createRenderer | /**
* Creates a new Renderer instance associated with this renderer connector.
* <p>
* You should typically not override this method since the framework by
* default generates an implementation that uses
* {@link com.google.gwt.core.client.GWT#create(Class)} to create a renderer
* of the same type as returned by the most specific override of
* {@link #getRenderer()}. If you do override the method, you can't call
* <code>super.createRenderer()</code> since the metadata needed for that
* implementation is not generated if there's an override of the method.
*
* @return a new renderer to be used with this connector
*/
protected Renderer<T> createRenderer() {
// TODO generate type data
Type type = TypeData.getType(getClass());
try {
Type rendererType = type.getMethod("getRenderer").getReturnType();
@SuppressWarnings("unchecked")
Renderer<T> instance = (Renderer<T>) rendererType.createInstance();
return instance;
} catch (NoDataException e) {
throw new IllegalStateException(
"Default implementation of createRenderer() does not work for "
+ getClass().getSimpleName()
+ ". This might be caused by explicitely using "
+ "super.createRenderer() or some unspecified "
+ "problem with the widgetset compilation.",
e);
}
} | 3.68 |
MagicPlugin_Messages_getSpace | /**
* This relies on the negative space font RP:
* https://github.com/AmberWat/NegativeSpaceFont
*/
@Nonnull
@Override
public String getSpace(int pixels) {
if (pixels == 0) {
return "";
}
if (spaceAmounts.containsKey(pixels)) {
return spaceAmounts.get(pixels);
}
int totalPixels = pixels;
int absPixels = Math.abs(pixels);
List<Integer> spaceValues = pixels > 0 ? positiveSpace : negativeSpace;
StringBuilder output = new StringBuilder();
for (Integer spaceValue : spaceValues) {
int absValue = Math.abs(spaceValue);
// See if we can fit this space in
if (absPixels < absValue) continue;
// Append as many of these as we can
String entryGlyph = spaceAmounts.get(spaceValue);
int amount = absPixels / absValue;
for (int i = 0; i < amount; i++) {
output.append(entryGlyph);
}
// Subtract off the amount of space we just added
pixels = pixels - (amount * spaceValue);
// See if we are done
absPixels = Math.abs(pixels);
if (absPixels == 0) break;
}
// Cache this string so we don't have to recompute it later
String result = output.toString();
spaceAmounts.put(totalPixels, result);
return result;
} | 3.68 |
hbase_KeyValueHeap_getComparator | /**
* */
public CellComparator getComparator() {
return this.kvComparator;
} | 3.68 |
flink_MultipleInputNodeCreationProcessor_canBeInSameGroupWithOutputs | /**
* A node can only be assigned into the same multiple input group of its outputs if all outputs
* have a group and are the same.
*
* @return the {@link MultipleInputGroup} of the outputs if all outputs have a group and are the
* same, null otherwise
*/
private MultipleInputGroup canBeInSameGroupWithOutputs(ExecNodeWrapper wrapper) {
if (wrapper.outputs.isEmpty()) {
return null;
}
MultipleInputGroup outputGroup = wrapper.outputs.get(0).group;
if (outputGroup == null) {
return null;
}
for (ExecNodeWrapper outputWrapper : wrapper.outputs) {
if (outputWrapper.group != outputGroup) {
return null;
}
}
return outputGroup;
} | 3.68 |
hadoop_RenameOperation_endOfLoopActions | /**
* Operations to perform at the end of every loop iteration.
* <p>
* This may block the thread waiting for copies to complete
* and/or delete a page of data.
*/
private void endOfLoopActions() throws IOException {
if (keysToDelete.size() == pageSize) {
// finish ongoing copies then delete all queued keys.
completeActiveCopiesAndDeleteSources("paged delete");
} else {
if (activeCopies.size() == RENAME_PARALLEL_LIMIT) {
// the limit of active copies has been reached;
// wait for completion or errors to surface.
LOG.debug("Waiting for active copies to complete");
completeActiveCopies("batch threshold reached");
}
}
} | 3.68 |
flink_StateTable_put | /**
* Maps the composite of active key and given namespace to the specified state.
*
* @param namespace the namespace. Not null.
* @param state the state. Can be null.
*/
public void put(N namespace, S state) {
put(keyContext.getCurrentKey(), keyContext.getCurrentKeyGroupIndex(), namespace, state);
} | 3.68 |
hadoop_SequentialBlockGroupIdGenerator_hasValidBlockInRange | /**
* @param b A block object whose id is set to the starting point for check
* @return true if any ID in the range
* {id, id+HdfsConstants.MAX_BLOCKS_IN_GROUP} is pointed-to by a stored
* block.
*/
private boolean hasValidBlockInRange(Block b) {
final long id = b.getBlockId();
for (int i = 0; i < MAX_BLOCKS_IN_GROUP; i++) {
b.setBlockId(id + i);
if (blockManager.getStoredBlock(b) != null) {
return true;
}
}
return false;
} | 3.68 |
hbase_ZKUtil_listChildrenAndWatchThem | /**
* List all the children of the specified znode, setting a watch for children changes and also
* setting a watch on every individual child in order to get the NodeCreated and NodeDeleted
* events.
* @param zkw zookeeper reference
* @param znode node to get children of and watch
* @return list of znode names, null if the node doesn't exist
* @throws KeeperException if a ZooKeeper operation fails
*/
public static List<String> listChildrenAndWatchThem(ZKWatcher zkw, String znode)
throws KeeperException {
List<String> children = listChildrenAndWatchForNewChildren(zkw, znode);
if (children == null) {
return null;
}
for (String child : children) {
watchAndCheckExists(zkw, ZNodePaths.joinZNode(znode, child));
}
return children;
} | 3.68 |
hbase_AbstractRpcBasedConnectionRegistry_transformMetaRegionLocations | /**
* Simple helper to transform the result of getMetaRegionLocations() rpc.
*/
private static RegionLocations transformMetaRegionLocations(GetMetaRegionLocationsResponse resp) {
List<HRegionLocation> regionLocations = new ArrayList<>();
resp.getMetaLocationsList()
.forEach(location -> regionLocations.add(ProtobufUtil.toRegionLocation(location)));
return new RegionLocations(regionLocations);
} | 3.68 |
hadoop_CommitContext_switchToIOStatisticsContext | /**
* Switch to the context IOStatistics context,
* if needed.
*/
public void switchToIOStatisticsContext() {
IOStatisticsContext.setThreadIOStatisticsContext(ioStatisticsContext);
} | 3.68 |
hudi_HoodieTableConfig_setMetadataPartitionsInflight | /**
* Enables the specified metadata table partition as inflight.
*
* @param partitionTypes The list of partitions to enable as inflight.
*/
public void setMetadataPartitionsInflight(HoodieTableMetaClient metaClient, List<MetadataPartitionType> partitionTypes) {
Set<String> partitionsInflight = getMetadataPartitionsInflight();
partitionTypes.forEach(t -> {
ValidationUtils.checkArgument(!t.getPartitionPath().contains(CONFIG_VALUES_DELIMITER),
"Metadata Table partition path cannot contain a comma: " + t.getPartitionPath());
partitionsInflight.add(t.getPartitionPath());
});
setValue(TABLE_METADATA_PARTITIONS_INFLIGHT, partitionsInflight.stream().sorted().collect(Collectors.joining(CONFIG_VALUES_DELIMITER)));
update(metaClient.getFs(), new Path(metaClient.getMetaPath()), getProps());
LOG.info(String.format("MDT %s partitions %s have been set to inflight", metaClient.getBasePathV2(), partitionTypes));
} | 3.68 |
hbase_QuotaObserverChore_getTableQuotaSnapshots | /**
* Returns an unmodifiable view over the current {@link SpaceQuotaSnapshot} objects for each HBase
* table with a quota defined.
*/
public Map<TableName, SpaceQuotaSnapshot> getTableQuotaSnapshots() {
return readOnlyTableQuotaSnapshots;
} | 3.68 |
hadoop_DatanodeVolumeInfo_getReservedSpace | /**
* get reserved space.
*/
public long getReservedSpace() {
return reservedSpace;
} | 3.68 |
flink_RexNodeJsonDeserializer_deserializeSqlOperator | /** Logic shared with {@link AggregateCallJsonDeserializer}. */
static SqlOperator deserializeSqlOperator(JsonNode jsonNode, SerdeContext serdeContext) {
final SqlSyntax syntax;
if (jsonNode.has(FIELD_NAME_SYNTAX)) {
syntax =
serializableToCalcite(
SqlSyntax.class, jsonNode.required(FIELD_NAME_SYNTAX).asText());
} else {
syntax = SqlSyntax.FUNCTION;
}
if (jsonNode.has(FIELD_NAME_INTERNAL_NAME)) {
return deserializeInternalFunction(
jsonNode.required(FIELD_NAME_INTERNAL_NAME).asText(), syntax, serdeContext);
} else if (jsonNode.has(FIELD_NAME_CATALOG_NAME)) {
return deserializeCatalogFunction(jsonNode, syntax, serdeContext);
} else if (jsonNode.has(FIELD_NAME_CLASS)) {
return deserializeFunctionClass(jsonNode, serdeContext);
} else if (jsonNode.has(FIELD_NAME_SYSTEM_NAME)) {
return deserializeSystemFunction(
jsonNode.required(FIELD_NAME_SYSTEM_NAME).asText(), syntax, serdeContext);
} else if (jsonNode.has(FIELD_NAME_SQL_KIND)) {
return deserializeInternalFunction(
syntax, SqlKind.valueOf(jsonNode.get(FIELD_NAME_SQL_KIND).asText()));
} else {
throw new TableException("Invalid function call.");
}
} | 3.68 |
framework_Table_setColumnIcon | /**
* Sets the icon Resource for the specified column.
* <p>
* Throws IllegalArgumentException if the specified column is not visible.
* </p>
*
* @param propertyId
* the propertyId identifying the column.
* @param icon
* the icon Resource to set.
*/
public void setColumnIcon(Object propertyId, Resource icon) {
if (icon == null) {
columnIcons.remove(propertyId);
} else {
columnIcons.put(propertyId, icon);
}
markAsDirty();
} | 3.68 |
hbase_PrivateCellUtil_compareKeyBasedOnColHint | /**
* Used to compare two cells based on the column hint provided. This is specifically used when we
* need to optimize the seeks based on the next indexed key. This is an advanced usage API
* specifically needed for some optimizations.
* @param nextIndexedCell the next indexed cell
* @param currentCell the cell to be compared
* @param foff the family offset of the currentCell
* @param flen the family length of the currentCell
* @param colHint the column hint provided - could be null
* @param coff the offset of the column hint if provided, if not offset of the
* currentCell's qualifier
* @param clen the length of the column hint if provided, if not length of the
* currentCell's qualifier
* @param ts the timestamp to be seeked
* @param type the type to be seeked
* @return an int based on the given column hint TODO : To be moved out of here because this is a
* special API used in scan optimization.
*/
// compare a key against row/fam/qual/ts/type
public static final int compareKeyBasedOnColHint(CellComparator comparator, Cell nextIndexedCell,
Cell currentCell, int foff, int flen, byte[] colHint, int coff, int clen, long ts, byte type) {
int compare = comparator.compareRows(nextIndexedCell, currentCell);
if (compare != 0) {
return compare;
}
// If the column is not specified, the "minimum" key type appears the
// latest in the sorted order, regardless of the timestamp. This is used
// for specifying the last key/value in a given row, because there is no
// "lexicographically last column" (it would be infinitely long). The
// "maximum" key type does not need this behavior.
if (
nextIndexedCell.getFamilyLength() + nextIndexedCell.getQualifierLength() == 0
&& nextIndexedCell.getTypeByte() == KeyValue.Type.Minimum.getCode()
) {
// left is "bigger", i.e. it appears later in the sorted order
return 1;
}
if (flen + clen == 0 && type == KeyValue.Type.Minimum.getCode()) {
return -1;
}
compare = comparator.compareFamilies(nextIndexedCell, currentCell);
if (compare != 0) {
return compare;
}
if (colHint == null) {
compare = comparator.compareQualifiers(nextIndexedCell, currentCell);
} else {
compare = CellUtil.compareQualifiers(nextIndexedCell, colHint, coff, clen);
}
if (compare != 0) {
return compare;
}
// Next compare timestamps.
compare = comparator.compareTimestamps(nextIndexedCell.getTimestamp(), ts);
if (compare != 0) {
return compare;
}
// Compare types. Let the delete types sort ahead of puts; i.e. types
// of higher numbers sort before those of lesser numbers. Maximum (255)
// appears ahead of everything, and minimum (0) appears after
// everything.
return (0xff & type) - (0xff & nextIndexedCell.getTypeByte());
} | 3.68 |
framework_AriaHelper_clearCaption | /**
* Removes a binding to a caption added with bindCaption() from the provided
* Widget.
*
* @param widget
* Widget, that was bound to a caption before
*/
private static void clearCaption(Widget widget) {
Roles.getTextboxRole()
.removeAriaLabelledbyProperty(widget.getElement());
} | 3.68 |
Activiti_AbstractOperation_executeExecutionListeners | /**
* Executes the execution listeners defined on the given element, with the given event type,
* and passing the provided execution to the {@link ExecutionListener} instances.
*/
protected void executeExecutionListeners(HasExecutionListeners elementWithExecutionListeners,
ExecutionEntity executionEntity, String eventType) {
commandContext.getProcessEngineConfiguration().getListenerNotificationHelper()
.executeExecutionListeners(elementWithExecutionListeners, executionEntity, eventType);
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.