name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_DropTargetExtensionConnector_onDrop | /**
* Event handler for the {@code drop} event.
* <p>
* Override this method in case custom handling for the drop event is
* required. If the drop is allowed, the event should prevent default.
*
* @param event
* browser event to be handled
*/
protected void onDrop(Event event) {
NativeEvent nativeEvent = (NativeEvent) event;
if (isDropAllowed(nativeEvent)) {
JsArrayString typesJsArray = getTypes(
nativeEvent.getDataTransfer());
/*
* Handle event if transfer doesn't contain files.
*
* Spec: "Dragging files can currently only happen from outside a
* browsing context, for example from a file system manager
* application." Thus there cannot be at the same time both files
* and other data dragged
*/
if (!containsFiles(typesJsArray)) {
nativeEvent.preventDefault();
nativeEvent.stopPropagation();
List<String> types = new ArrayList<>();
Map<String, String> data = new HashMap<>();
for (int i = 0; i < typesJsArray.length(); i++) {
String type = typesJsArray.get(i);
types.add(type);
data.put(type, nativeEvent.getDataTransfer().getData(type));
}
sendDropEventToServer(types, data, DragSourceExtensionConnector
.getDropEffect(nativeEvent.getDataTransfer()),
nativeEvent);
}
}
removeDragOverStyle(nativeEvent);
} | 3.68 |
flink_HiveTableSource_toHiveTablePartition | /** Convert partition to HiveTablePartition. */
public HiveTablePartition toHiveTablePartition(Partition partition) {
return HivePartitionUtils.toHiveTablePartition(partitionKeys, tableProps, partition);
} | 3.68 |
framework_BeanUtil_getPropertyDescriptor | /**
* Returns the property descriptor for the property of the given name and
* declaring class. The property name may refer to a nested property, e.g.
* "property.subProperty" or "property.subProperty1.subProperty2". The
* property must have a public read method (or a chain of read methods in
* case of a nested property).
*
* @param beanType
* the type declaring the property
* @param propertyName
* the name of the property
* @return the corresponding descriptor
* @throws IntrospectionException
* if the introspection fails
*/
public static PropertyDescriptor getPropertyDescriptor(Class<?> beanType,
String propertyName) throws IntrospectionException {
if (propertyName.contains(".")) {
String[] parts = propertyName.split("\\.", 2);
// Get the type of the field in the bean class
Class<?> propertyBean = getPropertyType(beanType, parts[0]);
// Find the rest from the sub type
return getPropertyDescriptor(propertyBean, parts[1]);
} else {
List<PropertyDescriptor> descriptors = getBeanPropertyDescriptors(
beanType);
for (PropertyDescriptor descriptor : descriptors) {
final Method getMethod = descriptor.getReadMethod();
if (descriptor.getName().equals(propertyName)
&& getMethod != null
&& getMethod.getDeclaringClass() != Object.class) {
return descriptor;
}
}
return null;
}
} | 3.68 |
hadoop_HAState_setState | /**
* Move from the existing state to a new state
* @param context HA context
* @param s new state
* @throws ServiceFailedException on failure to transition to new state.
*/
public void setState(HAContext context, HAState s) throws ServiceFailedException {
if (this == s) { // Already in the new state
return;
}
throw new ServiceFailedException("Transition from state " + this + " to "
+ s + " is not allowed.");
} | 3.68 |
framework_VDragAndDropManager_startDrag | /**
* This method is used to start Vaadin client side drag and drop operation.
* Operation may be started by virtually any Widget.
* <p>
* Cancels possible existing drag. TODO figure out if this is always a bug
* if one is active. Maybe a good and cheap lifesaver thought.
* <p>
* If possible, method automatically detects current {@link VDropHandler}
* and fires {@link VDropHandler#dragEnter(VDragEvent)} event on it.
* <p>
* May also be used to control the drag and drop operation. If this option
* is used, {@link VDropHandler} is searched on mouse events and appropriate
* methods on it called automatically.
*
* @param transferable
* the VTransferable instance that represents the original
* dragged element
* @param startEvent
* the native event that starts the drag
* @param handleDragEvents
* if true, {@link VDragAndDropManager} handles the drag and drop
* operation GWT event preview.
* @return the drag event
*/
public VDragEvent startDrag(VTransferable transferable,
final NativeEvent startEvent, final boolean handleDragEvents) {
interruptDrag();
isStarted = false;
currentDrag = new VDragEvent(transferable, startEvent);
currentDrag.setCurrentGwtEvent(startEvent);
final Command startDrag = new Command() {
@Override
public void execute() {
isStarted = true;
addActiveDragSourceStyleName();
VDropHandler dh = null;
if (startEvent != null) {
dh = findDragTarget(Element.as(
currentDrag.getCurrentGwtEvent().getEventTarget()));
}
if (dh != null) {
// drag has started on a DropHandler, kind of drag over
// happens
currentDropHandler = dh;
dh.dragEnter(currentDrag);
}
if (handleDragEvents) {
handlerRegistration = Event.addNativePreviewHandler(
defaultDragAndDropEventHandler);
if (dragElement != null
&& dragElement.getParentElement() == null) {
attachDragElement();
}
}
// just capture something to prevent text selection in IE
Event.setCapture(RootPanel.getBodyElement());
}
private void addActiveDragSourceStyleName() {
ComponentConnector dragSource = currentDrag.getTransferable()
.getDragSource();
dragSource.getWidget()
.addStyleName(ACTIVE_DRAG_SOURCE_STYLENAME);
}
};
final int eventType = Event.as(startEvent).getTypeInt();
if (handleDragEvents && (eventType == Event.ONMOUSEDOWN
|| eventType == Event.ONTOUCHSTART)) {
// only really start drag event on mousemove
deferredStartRegistration = Event
.addNativePreviewHandler(new NativePreviewHandler() {
private int startX = WidgetUtil.getTouchOrMouseClientX(
currentDrag.getCurrentGwtEvent());
private int startY = WidgetUtil.getTouchOrMouseClientY(
currentDrag.getCurrentGwtEvent());
@Override
public void onPreviewNativeEvent(
NativePreviewEvent event) {
int typeInt = event.getTypeInt();
if (typeInt == -1 && event.getNativeEvent()
.getType().toLowerCase(Locale.ROOT)
.contains("pointer")) {
/*
* Ignore PointerEvents since IE10 and IE11 send
* also MouseEvents for backwards compatibility.
*/
return;
}
switch (typeInt) {
case Event.ONMOUSEOVER:
if (dragElement == null) {
break;
}
EventTarget currentEventTarget = event
.getNativeEvent()
.getCurrentEventTarget();
if (Node.is(currentEventTarget)
&& !dragElement.isOrHasChild(
Node.as(currentEventTarget))) {
// drag image appeared below, ignore
break;
}
case Event.ONKEYDOWN:
case Event.ONKEYPRESS:
case Event.ONKEYUP:
case Event.ONBLUR:
case Event.ONFOCUS:
// don't cancel possible drag start
break;
case Event.ONMOUSEOUT:
if (dragElement == null) {
break;
}
EventTarget relatedEventTarget = event
.getNativeEvent()
.getRelatedEventTarget();
if (Node.is(relatedEventTarget)
&& !dragElement.isOrHasChild(
Node.as(relatedEventTarget))) {
// drag image appeared below, ignore
break;
}
case Event.ONMOUSEMOVE:
case Event.ONTOUCHMOVE:
int currentX = WidgetUtil
.getTouchOrMouseClientX(
event.getNativeEvent());
int currentY = WidgetUtil
.getTouchOrMouseClientY(
event.getNativeEvent());
if (Math.abs(startX
- currentX) > MINIMUM_DISTANCE_TO_START_DRAG
|| Math.abs(startY
- currentY) > MINIMUM_DISTANCE_TO_START_DRAG) {
ensureDeferredRegistrationCleanup();
currentDrag.setCurrentGwtEvent(
event.getNativeEvent());
startDrag.execute();
}
break;
default:
ensureDeferredRegistrationCleanup();
currentDrag = null;
clearDragElement();
break;
}
}
});
} else {
startDrag.execute();
}
return currentDrag;
} | 3.68 |
hbase_RegionSplitCalculator_findBigRanges | /**
* Find specified number of top ranges in a big overlap group. It could return less if there are
* not that many top ranges. Once these top ranges are excluded, the big overlap group will be
* broken into ranges with no overlapping, or smaller overlapped groups, and most likely some
* holes.
* @param bigOverlap a list of ranges that overlap with each other
* @param count the max number of ranges to find
* @return a list of ranges that overlap with most others
*/
public static <R extends KeyRange> List<R> findBigRanges(Collection<R> bigOverlap, int count) {
List<R> bigRanges = new ArrayList<>();
// The key is the count of overlaps,
// The value is a list of ranges that have that many overlaps
TreeMap<Integer, List<R>> overlapRangeMap = new TreeMap<>();
for (R r : bigOverlap) {
// Calculates the # of overlaps for each region
// and populates rangeOverlapMap
byte[] startKey = r.getStartKey();
byte[] endKey = specialEndKey(r);
int overlappedRegions = 0;
for (R rr : bigOverlap) {
byte[] start = rr.getStartKey();
byte[] end = specialEndKey(rr);
if (
BYTES_COMPARATOR.compare(startKey, end) < 0 && BYTES_COMPARATOR.compare(endKey, start) > 0
) {
overlappedRegions++;
}
}
// One region always overlaps with itself,
// so overlappedRegions should be more than 1
// for actual overlaps.
if (overlappedRegions > 1) {
Integer key = Integer.valueOf(overlappedRegions);
List<R> ranges = overlapRangeMap.get(key);
if (ranges == null) {
ranges = new ArrayList<>();
overlapRangeMap.put(key, ranges);
}
ranges.add(r);
}
}
int toBeAdded = count;
for (Integer key : overlapRangeMap.descendingKeySet()) {
List<R> chunk = overlapRangeMap.get(key);
int chunkSize = chunk.size();
if (chunkSize <= toBeAdded) {
bigRanges.addAll(chunk);
toBeAdded -= chunkSize;
if (toBeAdded > 0) continue;
} else {
// Try to use the middle chunk in case the overlapping is
// chained, for example: [a, c), [b, e), [d, g), [f h)...
// In such a case, sideline the middle chunk will break
// the group efficiently.
int start = (chunkSize - toBeAdded) / 2;
int end = start + toBeAdded;
for (int i = start; i < end; i++) {
bigRanges.add(chunk.get(i));
}
}
break;
}
return bigRanges;
} | 3.68 |
framework_UnsupportedBrowserHandler_writeBrowserTooOldPage | /**
* Writes a page encouraging the user to upgrade to a more current browser.
*
* @param request
* @param response
* @throws IOException
*/
protected void writeBrowserTooOldPage(VaadinRequest request,
VaadinResponse response) throws IOException {
try (Writer page = response.getWriter()) {
WebBrowser b = VaadinSession.getCurrent().getBrowser();
// @formatter:off
page.write(
"<html>"
+ "<head>"
+ " <style>"
+ " html {"
+ " background: #fff;"
+ " color: #444;"
+ " font: 400 1em/1.5 \"Helvetica Neue\", Roboto, \"Segoe UI\", sans-serif;"
+ " padding: 2em;"
+ " }"
+ " body {"
+ " margin: 2em auto;"
+ " width: 27em;"
+ " max-width: 100%;"
+ " }"
+ " h1 {"
+ " line-height: 1.1;"
+ " margin: 2em 0 1em;"
+ " color: #000;"
+ " font-weight: 400;"
+ " }"
+ " em {"
+ " font-size: 1.2em;"
+ " font-style: normal;"
+ " display: block;"
+ " margin-bottom: 1.2em;"
+ " }"
+ " p {"
+ " margin: 0.5em 0 0;"
+ " }"
+ " a {"
+ " text-decoration: none;"
+ " color: #007df0;"
+ " }"
+ " sub {"
+ " display: block;"
+ " margin-top: 2.5em;"
+ " text-align: center;"
+ " border-top: 1px solid #eee;"
+ " padding-top: 2em;"
+ " }"
+ " sub,"
+ " small {"
+ " color: #999;"
+ " }"
+ " </style>"
+ "</head>"
+ "<body><h1>I'm sorry, but your browser is not supported</h1>"
+ "<p>The version (" + b.getBrowserMajorVersion()
+ "." + b.getBrowserMinorVersion()
+ ") of the browser you are using "
+ " is outdated and not supported.</p>"
+ "<p>You should <b>consider upgrading</b> to a more up-to-date browser.</p> "
+ "<p>The most popular browsers are <b>"
+ " <a href=\"https://www.google.com/chrome\">Chrome</a>,"
+ " <a href=\"http://www.mozilla.com/firefox\">Firefox</a>,"
+ (b.isWindows()
? " <a href=\"http://windows.microsoft.com/en-US/internet-explorer/downloads/ie\">Internet Explorer</a>,"
: "")
+ " <a href=\"http://www.opera.com/browser\">Opera</a>"
+ " and <a href=\"http://www.apple.com/safari\">Safari</a>.</b><br/>"
+ "Upgrading to the latest version of one of these <b>will make the web safer, faster and better looking.</b></p>"
+ (b.isIE()
? "<script type=\"text/javascript\" src=\"http://ajax.googleapis.com/ajax/libs/chrome-frame/1/CFInstall.min.js\"></script>"
+ "<p>If you can not upgrade your browser, please consider trying <a onclick=\"CFInstall.check({mode:'overlay'});return false;\" href=\"http://www.google.com/chromeframe\">Chrome Frame</a>.</p>"
: "")
+ "<p><sub><a onclick=\"document.cookie='"
+ FORCE_LOAD_COOKIE
+ "';window.location.reload();return false;\" href=\"#\">Continue without updating</a> (not recommended)</sub></p>"
+ "</body>\n" + "</html>");
// @formatter:on
}
} | 3.68 |
hadoop_AllocationFileQueueParser_loadQueue | /**
* Loads a queue from a queue element in the configuration file.
*/
private void loadQueue(String parentName, Element element,
QueueProperties.Builder builder) throws AllocationConfigurationException {
String queueName =
FairSchedulerUtilities.trimQueueName(element.getAttribute("name"));
if (queueName.contains(".")) {
throw new AllocationConfigurationException("Bad fair scheduler config "
+ "file: queue name (" + queueName + ") shouldn't contain period.");
}
if (queueName.isEmpty()) {
throw new AllocationConfigurationException("Bad fair scheduler config "
+ "file: queue name shouldn't be empty or "
+ "consist only of whitespace.");
}
if (parentName != null) {
queueName = parentName + "." + queueName;
}
NodeList fields = element.getChildNodes();
boolean isLeaf = true;
boolean isReservable = false;
boolean isMaxAMShareSet = false;
for (int j = 0; j < fields.getLength(); j++) {
Node fieldNode = fields.item(j);
if (!(fieldNode instanceof Element)) {
continue;
}
Element field = (Element) fieldNode;
if (MIN_RESOURCES.equals(field.getTagName())) {
String text = getTrimmedTextData(field);
ConfigurableResource val =
FairSchedulerConfiguration.parseResourceConfigValue(text, 0L);
builder.minQueueResources(queueName, val.getResource());
} else if (MAX_RESOURCES.equals(field.getTagName())) {
String text = getTrimmedTextData(field);
ConfigurableResource val =
FairSchedulerConfiguration.parseResourceConfigValue(text);
builder.maxQueueResources(queueName, val);
} else if (MAX_CHILD_RESOURCES.equals(field.getTagName())) {
String text = getTrimmedTextData(field);
ConfigurableResource val =
FairSchedulerConfiguration.parseResourceConfigValue(text);
builder.maxChildQueueResources(queueName, val);
} else if (MAX_RUNNING_APPS.equals(field.getTagName())) {
String text = getTrimmedTextData(field);
int val = Integer.parseInt(text);
builder.queueMaxApps(queueName, val);
} else if (MAX_AMSHARE.equals(field.getTagName())) {
String text = getTrimmedTextData(field);
float val = Float.parseFloat(text);
val = Math.min(val, 1.0f);
builder.queueMaxAMShares(queueName, val);
isMaxAMShareSet = true;
} else if (MAX_CONTAINER_ALLOCATION.equals(field.getTagName())) {
String text = getTrimmedTextData(field);
ConfigurableResource val =
FairSchedulerConfiguration.parseResourceConfigValue(text);
builder.queueMaxContainerAllocation(queueName, val.getResource());
} else if (WEIGHT.equals(field.getTagName())) {
String text = getTrimmedTextData(field);
double val = Double.parseDouble(text);
builder.queueWeights(queueName, (float) val);
} else if (MIN_SHARE_PREEMPTION_TIMEOUT.equals(field.getTagName())) {
String text = getTrimmedTextData(field);
long val = Long.parseLong(text) * 1000L;
builder.minSharePreemptionTimeouts(queueName, val);
} else if (FAIR_SHARE_PREEMPTION_TIMEOUT.equals(field.getTagName())) {
String text = getTrimmedTextData(field);
long val = Long.parseLong(text) * 1000L;
builder.fairSharePreemptionTimeouts(queueName, val);
} else if (FAIR_SHARE_PREEMPTION_THRESHOLD.equals(field.getTagName())) {
String text = getTrimmedTextData(field);
float val = Float.parseFloat(text);
val = Math.max(Math.min(val, 1.0f), 0.0f);
builder.fairSharePreemptionThresholds(queueName, val);
} else if (SCHEDULING_POLICY.equals(field.getTagName())
|| SCHEDULING_MODE.equals(field.getTagName())) {
String text = getTrimmedTextData(field);
SchedulingPolicy policy = SchedulingPolicy.parse(text);
builder.queuePolicies(queueName, policy);
} else if (ACL_SUBMIT_APPS.equals(field.getTagName())) {
String text = ((Text) field.getFirstChild()).getData();
builder.queueAcls(queueName, AccessType.SUBMIT_APP,
new AccessControlList(text));
} else if (ACL_ADMINISTER_APPS.equals(field.getTagName())) {
String text = ((Text) field.getFirstChild()).getData();
builder.queueAcls(queueName, AccessType.ADMINISTER_QUEUE,
new AccessControlList(text));
} else if (ACL_ADMINISTER_RESERVATIONS.equals(field.getTagName())) {
String text = ((Text) field.getFirstChild()).getData();
builder.reservationAcls(queueName,
ReservationACL.ADMINISTER_RESERVATIONS,
new AccessControlList(text));
} else if (ACL_LIST_RESERVATIONS.equals(field.getTagName())) {
String text = ((Text) field.getFirstChild()).getData();
builder.reservationAcls(queueName, ReservationACL.LIST_RESERVATIONS,
new AccessControlList(text));
} else if (ACL_SUBMIT_RESERVATIONS.equals(field.getTagName())) {
String text = ((Text) field.getFirstChild()).getData();
builder.reservationAcls(queueName, ReservationACL.SUBMIT_RESERVATIONS,
new AccessControlList(text));
} else if (RESERVATION.equals(field.getTagName())) {
isReservable = true;
builder.reservableQueues(queueName);
builder.configuredQueues(FSQueueType.PARENT, queueName);
} else if (ALLOW_PREEMPTION_FROM.equals(field.getTagName())) {
String text = getTrimmedTextData(field);
if (!Boolean.parseBoolean(text)) {
builder.nonPreemptableQueues(queueName);
}
} else if (QUEUE.endsWith(field.getTagName())
|| POOL.equals(field.getTagName())) {
loadQueue(queueName, field, builder);
isLeaf = false;
}
}
// if a leaf in the alloc file is marked as type='parent'
// then store it as a parent queue
if (isLeaf && !"parent".equals(element.getAttribute("type"))) {
// reservable queue has been already configured as parent
if (!isReservable) {
builder.configuredQueues(FSQueueType.LEAF, queueName);
}
} else {
if (isReservable) {
throw new AllocationConfigurationException(
getErrorString(queueName, RESERVATION));
} else if (isMaxAMShareSet) {
throw new AllocationConfigurationException(
getErrorString(queueName, MAX_AMSHARE));
}
builder.configuredQueues(FSQueueType.PARENT, queueName);
}
// Set default acls if not defined
// The root queue defaults to all access
for (QueueACL acl : QueueACL.values()) {
AccessType accessType = SchedulerUtils.toAccessType(acl);
if (!builder.isAclDefinedForAccessType(queueName, accessType)) {
AccessControlList defaultAcl =
queueName.equals(ROOT) ? EVERYBODY_ACL : NOBODY_ACL;
builder.queueAcls(queueName, accessType, defaultAcl);
}
}
checkMinAndMaxResource(builder.getMinQueueResources(),
builder.getMaxQueueResources(), queueName);
} | 3.68 |
graphhopper_VectorTile_addFeatures | /**
* <pre>
* The actual features in this tile.
* </pre>
*
* <code>repeated .vector_tile.Tile.Feature features = 2;</code>
*/
public Builder addFeatures(
int index, vector_tile.VectorTile.Tile.Feature.Builder builderForValue) {
if (featuresBuilder_ == null) {
ensureFeaturesIsMutable();
features_.add(index, builderForValue.build());
onChanged();
} else {
featuresBuilder_.addMessage(index, builderForValue.build());
}
return this;
} | 3.68 |
hbase_TableSplit_getScan | /**
* Returns a Scan object from the stored string representation.
* @return Returns a Scan object based on the stored scanner.
* @throws IOException throws IOException if deserialization fails
*/
public Scan getScan() throws IOException {
return TableMapReduceUtil.convertStringToScan(this.scan);
} | 3.68 |
hudi_HoodieTableConfig_getLogFileFormat | /**
* Get the log Storage Format.
*
* @return HoodieFileFormat for the log Storage format
*/
public HoodieFileFormat getLogFileFormat() {
return HoodieFileFormat.valueOf(getStringOrDefault(LOG_FILE_FORMAT));
} | 3.68 |
graphhopper_WaySegmentParser_setSplitNodeFilter | /**
* @param splitNodeFilter return true if the given OSM node should be duplicated to create an artificial edge
*/
public Builder setSplitNodeFilter(Predicate<ReaderNode> splitNodeFilter) {
waySegmentParser.splitNodeFilter = splitNodeFilter;
return this;
} | 3.68 |
dubbo_AbstractServiceDiscovery_update | /**
* Update assumes that DefaultServiceInstance and its attributes will never get updated once created.
* Checking hasExportedServices() before registration guarantees that at least one service is ready for creating the
* instance.
*/
@Override
public synchronized void update() throws RuntimeException {
if (isDestroy) {
return;
}
if (this.serviceInstance == null) {
register();
}
if (!isValidInstance(this.serviceInstance)) {
return;
}
ServiceInstance oldServiceInstance = this.serviceInstance;
DefaultServiceInstance newServiceInstance =
new DefaultServiceInstance((DefaultServiceInstance) oldServiceInstance);
boolean revisionUpdated = calOrUpdateInstanceRevision(newServiceInstance);
if (revisionUpdated) {
logger.info(String.format(
"Metadata of instance changed, updating instance with revision %s.",
newServiceInstance.getServiceMetadata().getRevision()));
doUpdate(oldServiceInstance, newServiceInstance);
this.serviceInstance = newServiceInstance;
}
} | 3.68 |
framework_VAbstractTextualDate_hasChildFocus | /**
* Returns whether any of the child components has focus.
*
* @return {@code true} if any of the child component has focus,
* {@code false} otherwise
* @since 8.3
*/
protected boolean hasChildFocus() {
return false;
} | 3.68 |
hbase_Procedure_setNonceKey | /**
* Called by the ProcedureExecutor to set the value to the newly created procedure.
*/
protected void setNonceKey(NonceKey nonceKey) {
this.nonceKey = nonceKey;
} | 3.68 |
dubbo_RpcStatus_getSucceededMaxElapsed | /**
* get succeeded max elapsed.
*
* @return succeeded max elapsed.
*/
public long getSucceededMaxElapsed() {
return succeededMaxElapsed.get();
} | 3.68 |
framework_GridDragSourceConnector_getSelectedRowsInRange | /**
* Get all selected rows from a subset of rows defined by {@code range}.
*
* @param range
* Range of indexes.
* @return List of data of all selected rows in the given range.
*/
private List<JsonObject> getSelectedRowsInRange(Range range) {
List<JsonObject> selectedRows = new ArrayList<>();
for (int i = range.getStart(); i < range.getEnd(); i++) {
JsonObject row = gridConnector.getDataSource().getRow(i);
if (SelectionModel.isItemSelected(row)) {
selectedRows.add(row);
}
}
return selectedRows;
} | 3.68 |
graphhopper_VectorTile_hasVersion | /**
* <pre>
* Any compliant implementation must first read the version
* number encoded in this message and choose the correct
* implementation for this version number before proceeding to
* decode other parts of this message.
* </pre>
*
* <code>required uint32 version = 15 [default = 1];</code>
*/
public boolean hasVersion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
} | 3.68 |
hbase_MultiRowMutationEndpoint_start | /**
* Stores a reference to the coprocessor environment provided by the
* {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this
* coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded on
* a table region, so always expects this to be an instance of
* {@link RegionCoprocessorEnvironment}.
* @param env the environment provided by the coprocessor host
* @throws IOException if the provided environment is not an instance of
* {@code RegionCoprocessorEnvironment}
*/
@Override
public void start(CoprocessorEnvironment env) throws IOException {
if (env instanceof RegionCoprocessorEnvironment) {
this.env = (RegionCoprocessorEnvironment) env;
} else {
throw new CoprocessorException("Must be loaded on a table region!");
}
} | 3.68 |
hbase_LogRollBackupSubprocedurePool_close | /**
* Attempt to cleanly shutdown any running tasks - allows currently running tasks to cleanly
* finish
*/
@Override
public void close() {
executor.shutdown();
} | 3.68 |
AreaShop_FileManager_checkRegionAdd | /**
* Check if a player can add a certain region as rent or buy region.
* @param sender The player/console that wants to add a region
* @param region The WorldGuard region to add
* @param world The world the ProtectedRegion is located in
* @param type The type the region should have in AreaShop
* @return The result if a player would want to add this region
*/
public AddResult checkRegionAdd(CommandSender sender, ProtectedRegion region, World world, RegionType type) {
Player player = null;
if(sender instanceof Player) {
player = (Player)sender;
}
// Determine if the player is an owner or member of the region
boolean isMember = player != null && plugin.getWorldGuardHandler().containsMember(region, player.getUniqueId());
boolean isOwner = player != null && plugin.getWorldGuardHandler().containsOwner(region, player.getUniqueId());
AreaShop.debug("checkRegionAdd: isOwner=" + isOwner + ", isMember=" + isMember);
String typeString;
if(type == RegionType.RENT) {
typeString = "rent";
} else {
typeString = "buy";
}
AreaShop.debug(" permissions: .create=" + sender.hasPermission("areashop.create" + typeString) + ", .create.owner=" + sender.hasPermission("areashop.create" + typeString + ".owner") + ", .create.member=" + sender.hasPermission("areashop.create" + typeString + ".member"));
if(!(sender.hasPermission("areashop.create" + typeString)
|| (sender.hasPermission("areashop.create" + typeString + ".owner") && isOwner)
|| (sender.hasPermission("areashop.create" + typeString + ".member") && isMember))) {
return AddResult.NOPERMISSION;
}
GeneralRegion asRegion = plugin.getFileManager().getRegion(region.getId());
if(asRegion != null) {
if(asRegion.getWorld().equals(world)) {
return AddResult.ALREADYADDED;
} else {
return AddResult.ALREADYADDEDOTHERWORLD;
}
} else if(plugin.getFileManager().isBlacklisted(region.getId())) {
return AddResult.BLACKLISTED;
} else {
return AddResult.SUCCESS;
}
} | 3.68 |
dubbo_AbstractConfig_checkDefault | /**
* Check and set default value for some fields.
* <p>
* This method will be called at the end of {@link #refresh()}, as a post-initializer.
* </p>
* <p>NOTE: </p>
* <p>
* To distinguish between user-set property values and default property values,
* do not initialize default value at field declare statement. <b>If the field has a default value,
* it should be set in the checkDefault() method</b>, which will be called at the end of {@link #refresh()},
* so that it will not affect the behavior of attribute overrides.</p>
*
* @see AbstractConfig#getMetaData()
* @see AbstractConfig#appendAttributes(Map, Object)
*/
protected void checkDefault() {} | 3.68 |
zxing_PDF417_determineDimensions | /**
* Determine optimal nr of columns and rows for the specified number of
* codewords.
*
* @param sourceCodeWords number of code words
* @param errorCorrectionCodeWords number of error correction code words
* @return dimension object containing cols as width and rows as height
*/
private int[] determineDimensions(int sourceCodeWords, int errorCorrectionCodeWords) throws WriterException {
float ratio = 0.0f;
int[] dimension = null;
for (int cols = minCols; cols <= maxCols; cols++) {
int rows = calculateNumberOfRows(sourceCodeWords, errorCorrectionCodeWords, cols);
if (rows < minRows) {
break;
}
if (rows > maxRows) {
continue;
}
float newRatio = ((float) (17 * cols + 69) * DEFAULT_MODULE_WIDTH) / (rows * HEIGHT);
// ignore if previous ratio is closer to preferred ratio
if (dimension != null && Math.abs(newRatio - PREFERRED_RATIO) > Math.abs(ratio - PREFERRED_RATIO)) {
continue;
}
ratio = newRatio;
dimension = new int[] {cols, rows};
}
// Handle case when min values were larger than necessary
if (dimension == null) {
int rows = calculateNumberOfRows(sourceCodeWords, errorCorrectionCodeWords, minCols);
if (rows < minRows) {
dimension = new int[]{minCols, minRows};
}
}
if (dimension == null) {
throw new WriterException("Unable to fit message in columns");
}
return dimension;
} | 3.68 |
hadoop_FlowRunRowKey_parseRowKey | /**
* Given the raw row key as bytes, returns the row key as an object.
* @param rowKey Byte representation of row key.
* @return A <cite>FlowRunRowKey</cite> object.
*/
public static FlowRunRowKey parseRowKey(byte[] rowKey) {
return new FlowRunRowKeyConverter().decode(rowKey);
} | 3.68 |
hadoop_ConnectionPool_getConnectionPoolId | /**
* Get the connection pool identifier.
*
* @return Connection pool identifier.
*/
protected ConnectionPoolId getConnectionPoolId() {
return this.connectionPoolId;
} | 3.68 |
framework_Escalator_getScrollPos | /**
* ScrollDestination case-specific handling logic.
*/
private static double getScrollPos(final ScrollDestination destination,
final double targetStartPx, final double targetEndPx,
final double viewportStartPx, final double viewportEndPx,
final double padding) {
final double viewportLength = viewportEndPx - viewportStartPx;
switch (destination) {
/*
* Scroll as little as possible to show the target element. If the
* element fits into view, this works as START or END depending on the
* current scroll position. If the element does not fit into view, this
* works as START.
*/
case ANY: {
final double startScrollPos = targetStartPx - padding;
final double endScrollPos = targetEndPx + padding - viewportLength;
if (startScrollPos < viewportStartPx) {
return startScrollPos;
} else if (targetEndPx + padding > viewportEndPx) {
return endScrollPos;
} else {
// NOOP, it's already visible
return viewportStartPx;
}
}
/*
* Scrolls so that the element is shown at the end of the viewport. The
* viewport will, however, not scroll before its first element.
*/
case END: {
return targetEndPx + padding - viewportLength;
}
/*
* Scrolls so that the element is shown in the middle of the viewport.
* The viewport will, however, not scroll beyond its contents, given
* more elements than what the viewport is able to show at once. Under
* no circumstances will the viewport scroll before its first element.
*/
case MIDDLE: {
final double targetMiddle = targetStartPx
+ (targetEndPx - targetStartPx) / 2;
return targetMiddle - viewportLength / 2;
}
/*
* Scrolls so that the element is shown at the start of the viewport.
* The viewport will, however, not scroll beyond its contents.
*/
case START: {
return targetStartPx - padding;
}
/*
* Throw an error if we're here. This can only mean that
* ScrollDestination has been carelessly amended..
*/
default: {
throw new IllegalArgumentException(
"Internal: ScrollDestination has been modified, "
+ "but Escalator.getScrollPos has not been updated "
+ "to match new values.");
}
}
} | 3.68 |
flink_CopyOnWriteStateMap_isRehashing | /** Returns true, if an incremental rehash is in progress. */
@VisibleForTesting
boolean isRehashing() {
// if we rehash, the secondary table is not empty
return EMPTY_TABLE != incrementalRehashTable;
} | 3.68 |
hbase_MiniHBaseCluster_waitForActiveAndReadyMaster | /**
* Blocks until there is an active master and that master has completed initialization.
* @return true if an active master becomes available. false if there are no masters left.
*/
@Override
public boolean waitForActiveAndReadyMaster(long timeout) throws IOException {
List<JVMClusterUtil.MasterThread> mts;
long start = EnvironmentEdgeManager.currentTime();
while (
!(mts = getMasterThreads()).isEmpty()
&& (EnvironmentEdgeManager.currentTime() - start) < timeout
) {
for (JVMClusterUtil.MasterThread mt : mts) {
if (mt.getMaster().isActiveMaster() && mt.getMaster().isInitialized()) {
return true;
}
}
Threads.sleep(100);
}
return false;
} | 3.68 |
framework_DesignContext_twoWayMap | /**
* Creates a two-way mapping between key and value, i.e. adds key -> value
* to keyToValue and value -> key to valueToKey. If key was mapped to a
* value v different from the given value, the mapping from v to key is
* removed. Similarly, if value was mapped to some key k different from key,
* the mapping from k to value is removed.
*
* Returns true if there already was a mapping from key to some value v or
* if there was a mapping from value to some key k. Otherwise returns false.
*
* @param key
* The new key in keyToValue.
* @param value
* The new value in keyToValue.
* @param keyToValue
* A map from keys to values.
* @param valueToKey
* A map from values to keys.
* @return whether there already was some mapping from key to a value or
* from value to a key.
*/
private <S, T> boolean twoWayMap(S key, T value, Map<S, T> keyToValue,
Map<T, S> valueToKey) {
T oldValue = keyToValue.put(key, value);
if (oldValue != null && !oldValue.equals(value)) {
valueToKey.remove(oldValue);
}
S oldKey = valueToKey.put(value, key);
if (oldKey != null && !oldKey.equals(key)) {
keyToValue.remove(oldKey);
}
return oldValue != null || oldKey != null;
} | 3.68 |
flink_AvroParquetRecordFormat_createReader | /**
* Creates a new reader to read avro {@link GenericRecord} from Parquet input stream.
*
* <p>Several wrapper classes haven be created to Flink abstraction become compatible with the
* parquet abstraction. Please refer to the inner classes {@link AvroParquetRecordReader},
* {@link ParquetInputFile}, {@code FSDataInputStreamAdapter} for details.
*/
@Override
public Reader<E> createReader(
Configuration config, FSDataInputStream stream, long fileLen, long splitEnd)
throws IOException {
// current version does not support splitting.
checkNotSplit(fileLen, splitEnd);
return new AvroParquetRecordReader<E>(
AvroParquetReader.<E>builder(new ParquetInputFile(stream, fileLen))
.withDataModel(getDataModel())
.build());
} | 3.68 |
streampipes_DataStreamBuilder_protocol | /**
* Assigns a new {@link org.apache.streampipes.model.grounding.TransportProtocol} to the stream definition.
*
* @param protocol The transport protocol of the stream at runtime (e.g., Kafka or MQTT).
* Use {@link org.apache.streampipes.sdk.helpers.Protocols} to use some pre-defined protocols
* (or create a new protocol as described in the developer guide).
* @return this
*/
public DataStreamBuilder protocol(TransportProtocol protocol) {
this.eventGrounding.setTransportProtocol(protocol);
return this;
} | 3.68 |
hudi_HoodieCompactor_validateRunningMode | // make sure that cfg.runningMode couldn't be null
private static void validateRunningMode(Config cfg) {
// --mode has a higher priority than --schedule
// If we remove --schedule option in the future we need to change runningMode default value to EXECUTE
if (StringUtils.isNullOrEmpty(cfg.runningMode)) {
cfg.runningMode = cfg.runSchedule ? SCHEDULE : EXECUTE;
}
} | 3.68 |
hbase_HFileBlock_getBufferWithoutHeader | /**
* Returns a buffer that does not include the header and checksum.
* @return the buffer with header skipped and checksum omitted.
*/
public ByteBuff getBufferWithoutHeader() {
ByteBuff dup = getBufferReadOnly();
return dup.position(headerSize()).slice();
} | 3.68 |
flink_NetUtils_ipAddressAndPortToUrlString | /**
* Encodes an IP address and port to be included in URL. in particular, this method makes sure
* that IPv6 addresses have the proper formatting to be included in URLs.
*
* @param address The address to be included in the URL.
* @param port The port for the URL address.
* @return The proper URL string encoded IP address and port.
*/
public static String ipAddressAndPortToUrlString(InetAddress address, int port) {
return ipAddressToUrlString(address) + ':' + port;
} | 3.68 |
hbase_RegionCoprocessorHost_preClose | /**
* Invoked before a region is closed
* @param abortRequested true if the server is aborting
*/
public void preClose(final boolean abortRequested) throws IOException {
execOperation(new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.preClose(this, abortRequested);
}
});
} | 3.68 |
morf_TableOutputter_record | /**
* @param row to add the record at
* @param worksheet to add the record to
* @param table that the record comes from
* @param record Record to serialise. This method is part of the old Cryo API.
*/
private void record(final int row, final WritableSheet worksheet, final Table table, Record record) {
int columnNumber = 0;
WritableFont standardFont = new WritableFont(WritableFont.ARIAL, 8);
WritableCellFormat standardFormat = new WritableCellFormat(standardFont);
WritableCellFormat exampleFormat = new WritableCellFormat(standardFont);
try {
exampleFormat.setBackground(Colour.ICE_BLUE);
} catch (WriteException e) {
throw new RuntimeException("Failed to set example background colour", e);
}
for (Column column : table.columns()) {
if(columnNumber < MAX_EXCEL_COLUMNS && !column.getName().equals("id") && !column.getName().equals("version")) {
createCell(worksheet, column, columnNumber, row, record, standardFormat);
columnNumber++;
}
}
} | 3.68 |
hbase_MobUtils_isReadEmptyValueOnMobCellMiss | /**
* Indicates whether return null value when the mob file is missing or corrupt. The information is
* set in the attribute "empty.value.on.mobcell.miss" of scan.
* @param scan The current scan.
* @return True if the readEmptyValueOnMobCellMiss is enabled.
*/
public static boolean isReadEmptyValueOnMobCellMiss(Scan scan) {
byte[] readEmptyValueOnMobCellMiss =
scan.getAttribute(MobConstants.EMPTY_VALUE_ON_MOBCELL_MISS);
try {
return readEmptyValueOnMobCellMiss != null && Bytes.toBoolean(readEmptyValueOnMobCellMiss);
} catch (IllegalArgumentException e) {
return false;
}
} | 3.68 |
hadoop_FilterFileSystem_getUsed | /** Return the total size of all files from a specified path.*/
@Override
public long getUsed(Path path) throws IOException {
return fs.getUsed(path);
} | 3.68 |
hadoop_AzureBlobFileSystem_makeQualified | /**
* Qualify a path to one which uses this FileSystem and, if relative,
* made absolute.
* @param path to qualify.
* @return this path if it contains a scheme and authority and is absolute, or
* a new path that includes a path and authority and is fully qualified
* @see Path#makeQualified(URI, Path)
* @throws IllegalArgumentException if the path has a schema/URI different
* from this FileSystem.
*/
@Override
public Path makeQualified(Path path) {
// To support format: abfs://{dfs.nameservices}/file/path,
// path need to be first converted to URI, then get the raw path string,
// during which {dfs.nameservices} will be omitted.
if (path != null) {
String uriPath = path.toUri().getPath();
path = uriPath.isEmpty() ? path : new Path(uriPath);
}
return super.makeQualified(path);
} | 3.68 |
hadoop_ContainerReapContext_getUser | /**
* Get the user set for the context.
*
* @return the user set in the context.
*/
public String getUser() {
return user;
} | 3.68 |
framework_ValueChangeHandler_setValueChangeMode | /**
* Sets the value change mode to use.
*
* @see ValueChangeMode
*
* @param valueChangeMode
* the value change mode to use
*/
public void setValueChangeMode(ValueChangeMode valueChangeMode) {
this.valueChangeMode = valueChangeMode;
} | 3.68 |
querydsl_AbstractOracleQuery_startWith | /**
* START WITH specifies the root row(s) of the hierarchy.
*
* @param cond condition
* @return the current object
*/
public <A> C startWith(Predicate cond) {
return addFlag(Position.BEFORE_ORDER, START_WITH, cond);
} | 3.68 |
hadoop_TimelineDomain_getCreatedTime | /**
* Get the created time of the domain
*
* @return the created time of the domain
*/
@XmlElement(name = "createdtime")
public Long getCreatedTime() {
return createdTime;
} | 3.68 |
graphhopper_Path_forEveryEdge | /**
* Iterates over all edges in this path sorted from start to end and calls the visitor callback
* for every edge.
* <p>
*
* @param visitor callback to handle every edge. The edge is decoupled from the iterator and can
* be stored.
*/
public void forEveryEdge(EdgeVisitor visitor) {
int tmpNode = getFromNode();
int len = edgeIds.size();
int prevEdgeId = EdgeIterator.NO_EDGE;
for (int i = 0; i < len; i++) {
EdgeIteratorState edgeBase = graph.getEdgeIteratorState(edgeIds.get(i), tmpNode);
if (edgeBase == null)
throw new IllegalStateException("Edge " + edgeIds.get(i) + " was empty when requested with node " + tmpNode
+ ", array index:" + i + ", edges:" + edgeIds.size());
tmpNode = edgeBase.getBaseNode();
// more efficient swap, currently not implemented for virtual edges: visitor.next(edgeBase.detach(true), i);
edgeBase = graph.getEdgeIteratorState(edgeBase.getEdge(), tmpNode);
visitor.next(edgeBase, i, prevEdgeId);
prevEdgeId = edgeBase.getEdge();
}
visitor.finish();
} | 3.68 |
hadoop_FutureIO_unwrapInnerException | /**
* From the inner cause of an execution exception, extract the inner cause
* to an IOException, raising RuntimeExceptions and Errors immediately.
* <ol>
* <li> If it is an IOE: Return.</li>
* <li> If it is a {@link UncheckedIOException}: return the cause</li>
* <li> Completion/Execution Exceptions: extract and repeat</li>
* <li> If it is an RTE or Error: throw.</li>
* <li> Any other type: wrap in an IOE</li>
* </ol>
*
* Recursively handles wrapped Execution and Completion Exceptions in
* case something very complicated has happened.
* @param e exception.
* @return an IOException extracted or built from the cause.
* @throws RuntimeException if that is the inner cause.
* @throws Error if that is the inner cause.
*/
@SuppressWarnings("ChainOfInstanceofChecks")
public static IOException unwrapInnerException(final Throwable e) {
Throwable cause = e.getCause();
if (cause instanceof IOException) {
return (IOException) cause;
} else if (cause instanceof UncheckedIOException) {
// this is always an IOException
return ((UncheckedIOException) cause).getCause();
} else if (cause instanceof CompletionException) {
return unwrapInnerException(cause);
} else if (cause instanceof ExecutionException) {
return unwrapInnerException(cause);
} else if (cause instanceof RuntimeException) {
throw (RuntimeException) cause;
} else if (cause instanceof Error) {
throw (Error) cause;
} else if (cause != null) {
// other type: wrap with a new IOE
return new IOException(cause);
} else {
// this only happens if there was no cause.
return new IOException(e);
}
} | 3.68 |
framework_TreeTable_getPreOrder | /**
* Preorder of ids currently visible
*
* @return
*/
private List<Object> getPreOrder() {
if (preOrder == null) {
preOrder = new ArrayList<Object>();
Collection<?> rootItemIds = getContainerDataSource()
.rootItemIds();
for (Object id : rootItemIds) {
preOrder.add(id);
addVisibleChildTree(id);
}
}
return preOrder;
} | 3.68 |
flink_TaskExecutorManager_allocateWorker | /**
* Tries to allocate a worker that can provide a slot with the given resource profile.
*
* @param requestedSlotResourceProfile desired slot profile
* @return an upper bound resource requirement that can be fulfilled by the new worker, if one
* was allocated
*/
public Optional<ResourceRequirement> allocateWorker(
ResourceProfile requestedSlotResourceProfile) {
if (!resourceAllocator.isSupported()) {
// resource cannot be allocated
return Optional.empty();
}
final int numRegisteredSlots = getNumberRegisteredSlots();
final int numPendingSlots = getNumberPendingTaskManagerSlots();
if (isMaxSlotNumExceededAfterAdding(numSlotsPerWorker)) {
LOG.warn(
"Could not allocate {} more slots. The number of registered and pending slots is {}, while the maximum is {}.",
numSlotsPerWorker,
numPendingSlots + numRegisteredSlots,
maxSlotNum);
return Optional.empty();
}
if (!defaultSlotResourceProfile.isMatching(requestedSlotResourceProfile)) {
// requested resource profile is unfulfillable
return Optional.empty();
}
for (int i = 0; i < numSlotsPerWorker; ++i) {
PendingTaskManagerSlot pendingTaskManagerSlot =
new PendingTaskManagerSlot(defaultSlotResourceProfile);
pendingSlots.put(pendingTaskManagerSlot.getTaskManagerSlotId(), pendingTaskManagerSlot);
}
declareNeededResourcesWithDelay();
return Optional.of(
ResourceRequirement.create(defaultSlotResourceProfile, numSlotsPerWorker));
} | 3.68 |
hadoop_FSDataOutputStreamBuilder_replication | /**
* Set replication factor.
*
* @param replica replica.
* @return Generics Type B.
*/
public B replication(short replica) {
replication = replica;
return getThisBuilder();
} | 3.68 |
flink_RemoteStorageScanner_watchSegment | /**
* Watch the segment for a specific subpartition in the {@link RemoteStorageScanner}.
*
* <p>If a segment with a larger or equal id already exists, the current segment won't be
* watched.
*
* <p>If a segment with a smaller segment id is still being watched, the current segment will
* replace it because the smaller segment should have been consumed. This method ensures that
* only one segment file can be watched for each subpartition.
*
* @param partitionId is the id of partition.
* @param subpartitionId is the id of subpartition.
* @param segmentId is the id of segment.
*/
public void watchSegment(
TieredStoragePartitionId partitionId,
TieredStorageSubpartitionId subpartitionId,
int segmentId) {
Tuple2<TieredStoragePartitionId, TieredStorageSubpartitionId> key =
Tuple2.of(partitionId, subpartitionId);
scannedMaxSegmentIds.compute(
key,
(segmentKey, maxSegmentId) -> {
if (maxSegmentId == null || maxSegmentId < segmentId) {
requiredSegmentIds.put(segmentKey, segmentId);
}
return maxSegmentId;
});
} | 3.68 |
hbase_BucketCache_bucketSizesAboveThresholdCount | /**
* Return the count of bucketSizeinfos still need free space
*/
private int bucketSizesAboveThresholdCount(float minFactor) {
BucketAllocator.IndexStatistics[] stats = bucketAllocator.getIndexStatistics();
int fullCount = 0;
for (int i = 0; i < stats.length; i++) {
long freeGoal = (long) Math.floor(stats[i].totalCount() * (1 - minFactor));
freeGoal = Math.max(freeGoal, 1);
if (stats[i].freeCount() < freeGoal) {
fullCount++;
}
}
return fullCount;
} | 3.68 |
flink_SharedBufferAccessor_materializeMatch | /**
* Extracts the real event from the sharedBuffer with pre-extracted eventId.
*
* @param match the matched event's eventId.
* @return the event associated with the eventId.
*/
public Map<String, List<V>> materializeMatch(Map<String, List<EventId>> match) {
Map<String, List<V>> materializedMatch =
CollectionUtil.newLinkedHashMapWithExpectedSize(match.size());
for (Map.Entry<String, List<EventId>> pattern : match.entrySet()) {
List<V> events = new ArrayList<>(pattern.getValue().size());
for (EventId eventId : pattern.getValue()) {
try {
V event = sharedBuffer.getEvent(eventId).getElement();
events.add(event);
} catch (Exception ex) {
throw new WrappingRuntimeException(ex);
}
}
materializedMatch.put(pattern.getKey(), events);
}
return materializedMatch;
} | 3.68 |
hibernate-validator_UUIDValidator_extractVariant | /**
* Get the 3 bit UUID variant from the current value
*
* @param variant The old variant (in case the variant has already been extracted)
* @param index The index of the current value to find the variant to extract
* @param value The numeric value at the character position
*/
private static int extractVariant(int variant, int index, int value) {
if ( index == 19 ) {
// 0xxx
if ( value >> 3 == 0 ) {
return 0;
}
// 10xx
if ( value >> 2 == 2 ) {
return 1;
}
// 110x
if ( value >> 1 == 6 ) {
return 2;
}
}
return variant;
} | 3.68 |
flink_ExceptionUtils_tryRethrowIOException | /**
* Tries to throw the given {@code Throwable} in scenarios where the signatures allows only
* IOExceptions (and RuntimeException and Error). Throws this exception directly, if it is an
* IOException, a RuntimeException, or an Error. Otherwise does nothing.
*
* @param t The Throwable to be thrown.
*/
public static void tryRethrowIOException(Throwable t) throws IOException {
if (t instanceof IOException) {
throw (IOException) t;
} else if (t instanceof RuntimeException) {
throw (RuntimeException) t;
} else if (t instanceof Error) {
throw (Error) t;
}
} | 3.68 |
morf_DatabaseSchemaManager_dropViewIfExists | /**
* Removes the specified view from the database, if it exists. Otherwise
* do nothing. To allow for JDBC implem,entations that do not support
* conditional dropping of views, this will trap and ignore
*
* @param view the view to drop
*/
private Collection<String> dropViewIfExists(View view) {
if (log.isDebugEnabled()) log.debug("Dropping any existing view [" + view.getName() + "]");
views.get().remove(view.getName().toUpperCase());
return dialect.get().dropStatements(view);
} | 3.68 |
framework_GenericFontIcon_getCodepoint | /*
* (non-Javadoc)
*
* @see com.vaadin.server.FontIcon#getCodepoint()
*/
@Override
public int getCodepoint() {
return codePoint;
} | 3.68 |
hudi_MetadataMessage_isOverwriteOfExistingFile | /**
* Whether message represents an overwrite of an existing file.
* Ref: https://cloud.google.com/storage/docs/pubsub-notifications#replacing_objects
*/
private boolean isOverwriteOfExistingFile() {
return !isNullOrEmpty(getOverwroteGeneration());
} | 3.68 |
hudi_SparkHoodieIndexFactory_isGlobalIndex | /**
* Whether index is global or not.
* @param config HoodieWriteConfig to use.
* @return {@code true} if index is a global one. else {@code false}.
*/
public static boolean isGlobalIndex(HoodieWriteConfig config) {
switch (config.getIndexType()) {
case HBASE:
return true;
case INMEMORY:
return true;
case BLOOM:
return false;
case GLOBAL_BLOOM:
return true;
case SIMPLE:
return false;
case GLOBAL_SIMPLE:
return true;
case BUCKET:
return false;
case RECORD_INDEX:
return true;
default:
return createIndex(config).isGlobal();
}
} | 3.68 |
hudi_HoodieIndexUtils_mergeIncomingWithExistingRecord | /**
* Merge the incoming record with the matching existing record loaded via {@link HoodieMergedReadHandle}. The existing record is the latest version in the table.
*/
private static <R> Option<HoodieRecord<R>> mergeIncomingWithExistingRecord(
HoodieRecord<R> incoming,
HoodieRecord<R> existing,
Schema writeSchema,
HoodieWriteConfig config,
HoodieRecordMerger recordMerger) throws IOException {
Schema existingSchema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(config.getSchema()), config.allowOperationMetadataField());
Schema writeSchemaWithMetaFields = HoodieAvroUtils.addMetadataFields(writeSchema, config.allowOperationMetadataField());
// prepend the hoodie meta fields as the incoming record does not have them
HoodieRecord incomingPrepended = incoming
.prependMetaFields(writeSchema, writeSchemaWithMetaFields, new MetadataValues().setRecordKey(incoming.getRecordKey()).setPartitionPath(incoming.getPartitionPath()), config.getProps());
// after prepend the meta fields, convert the record back to the original payload
HoodieRecord incomingWithMetaFields = incomingPrepended
.wrapIntoHoodieRecordPayloadWithParams(writeSchema, config.getProps(), Option.empty(), config.allowOperationMetadataField(), Option.empty(), false, Option.empty());
Option<Pair<HoodieRecord, Schema>> mergeResult = recordMerger
.merge(existing, existingSchema, incomingWithMetaFields, writeSchemaWithMetaFields, config.getProps());
if (mergeResult.isPresent()) {
// the merged record needs to be converted back to the original payload
HoodieRecord<R> merged = mergeResult.get().getLeft().wrapIntoHoodieRecordPayloadWithParams(
writeSchemaWithMetaFields, config.getProps(), Option.empty(),
config.allowOperationMetadataField(), Option.empty(), false, Option.of(writeSchema));
return Option.of(merged);
} else {
return Option.empty();
}
} | 3.68 |
framework_Label_stripTags | /**
* Strips the tags from the XML.
*
* @param xml
* the String containing a XML snippet.
* @return the original XML without tags.
*/
private String stripTags(String xml) {
final StringBuilder res = new StringBuilder();
int processed = 0;
final int xmlLen = xml.length();
while (processed < xmlLen) {
int next = xml.indexOf('<', processed);
if (next < 0) {
next = xmlLen;
}
res.append(xml.substring(processed, next));
if (processed < xmlLen) {
next = xml.indexOf('>', processed);
if (next < 0) {
next = xmlLen;
}
processed = next + 1;
}
}
return res.toString();
} | 3.68 |
morf_UpdateStatement_useParallelDml | /**
* Request that this statement is executed with a parallel execution plan for data manipulation language (DML). This request will have no effect unless the database implementation supports it and the feature is enabled.
*
* <p>For statement that will affect a high percentage or rows in the table, a parallel execution plan may reduce the execution time, although the exact effect depends on
* the underlying database, the nature of the data and the nature of the query.</p>
*
* <p>Note that the use of parallel DML comes with restrictions, in particular, a table may not be accessed in the same transaction following a parallel DML execution. Please consult the Oracle manual section <em>Restrictions on Parallel DML</em> to check whether this hint is suitable.</p>
*
* @param degreeOfParallelism Degree of parallelism to be specified in the hint.
* @return this, for method chaining.
*/
public UpdateStatement useParallelDml(int degreeOfParallelism) {
if (AliasedField.immutableDslEnabled()) {
return shallowCopy().useParallelDml(degreeOfParallelism).build();
} else {
hints.add(new UseParallelDml(degreeOfParallelism));
return this;
}
} | 3.68 |
flink_DeltaIteration_registerAggregator | /**
* Registers an {@link Aggregator} for the iteration. Aggregators can be used to maintain simple
* statistics during the iteration, such as number of elements processed. The aggregators
* compute global aggregates: After each iteration step, the values are globally aggregated to
* produce one aggregate that represents statistics across all parallel instances. The value of
* an aggregator can be accessed in the next iteration.
*
* <p>Aggregators can be accessed inside a function via the {@link
* org.apache.flink.api.common.functions.AbstractRichFunction#getIterationRuntimeContext()}
* method.
*
* @param name The name under which the aggregator is registered.
* @param aggregator The aggregator class.
* @return The DeltaIteration itself, to allow chaining function calls.
*/
@PublicEvolving
public DeltaIteration<ST, WT> registerAggregator(String name, Aggregator<?> aggregator) {
this.aggregators.registerAggregator(name, aggregator);
return this;
} | 3.68 |
morf_Deployment_getPath | /**
* Return an "upgrade" path corresponding to a full database deployment, matching
* the given schema.
*
* <p>This method adds all upgrade steps after creating the tables/views.</p>
*
* @param targetSchema Schema that is to be deployed.
* @param upgradeSteps All available upgrade steps.
* @return A path which can be executed to make {@code database} match {@code targetSchema}.
*/
public UpgradePath getPath(Schema targetSchema, Collection<Class<? extends UpgradeStep>> upgradeSteps) {
final UpgradePath path = upgradePathFactory.create(connectionResources);
writeStatements(targetSchema, path);
writeUpgradeSteps(upgradeSteps, path);
return path;
} | 3.68 |
morf_AbstractSqlDialectTest_testFormatSqlStatement | /**
* Tests the {@link SqlDialect#formatSqlStatement(String)} performs
* correctly.
*/
@Test
public void testFormatSqlStatement() {
expectedSqlStatementFormat();
} | 3.68 |
hbase_SplitTableRegionProcedure_prepareSplitRegion | /**
* Prepare to Split region.
* @param env MasterProcedureEnv
*/
public boolean prepareSplitRegion(final MasterProcedureEnv env) throws IOException {
// Fail if we are taking snapshot for the given table
if (
env.getMasterServices().getSnapshotManager().isTakingSnapshot(getParentRegion().getTable())
) {
setFailure(new IOException("Skip splitting region " + getParentRegion().getShortNameToLog()
+ ", because we are taking snapshot for the table " + getParentRegion().getTable()));
return false;
}
// Check whether the region is splittable
RegionStateNode node =
env.getAssignmentManager().getRegionStates().getRegionStateNode(getParentRegion());
if (node == null) {
throw new UnknownRegionException(getParentRegion().getRegionNameAsString());
}
RegionInfo parentHRI = node.getRegionInfo();
if (parentHRI == null) {
LOG.info("Unsplittable; parent region is null; node={}", node);
return false;
}
// Lookup the parent HRI state from the AM, which has the latest updated info.
// Protect against the case where concurrent SPLIT requests came in and succeeded
// just before us.
if (node.isInState(State.SPLIT)) {
LOG.info("Split of " + parentHRI + " skipped; state is already SPLIT");
return false;
}
if (parentHRI.isSplit() || parentHRI.isOffline()) {
LOG.info("Split of " + parentHRI + " skipped because offline/split.");
return false;
}
// expected parent to be online or closed
if (!node.isInState(EXPECTED_SPLIT_STATES)) {
// We may have SPLIT already?
setFailure(
new IOException("Split " + parentHRI.getRegionNameAsString() + " FAILED because state="
+ node.getState() + "; expected " + Arrays.toString(EXPECTED_SPLIT_STATES)));
return false;
}
// Mostly this check is not used because we already check the switch before submit a split
// procedure. Just for safe, check the switch again. This procedure can be rollbacked if
// the switch was set to false after submit.
if (!env.getMasterServices().isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) {
LOG.warn("pid=" + getProcId() + " split switch is off! skip split of " + parentHRI);
setFailure(new IOException(
"Split region " + parentHRI.getRegionNameAsString() + " failed due to split switch off"));
return false;
}
if (!env.getMasterServices().getTableDescriptors().get(getTableName()).isSplitEnabled()) {
LOG.warn("pid={}, split is disabled for the table! Skipping split of {}", getProcId(),
parentHRI);
setFailure(new IOException("Split region " + parentHRI.getRegionNameAsString()
+ " failed as region split is disabled for the table"));
return false;
}
// set node state as SPLITTING
node.setState(State.SPLITTING);
// Since we have the lock and the master is coordinating the operation
// we are always able to split the region
return true;
} | 3.68 |
hbase_ZKUtil_setWatchIfNodeExists | /**
* Watch the specified znode, but only if exists. Useful when watching for deletions. Uses
* .getData() (and handles NoNodeException) instead of .exists() to accomplish this, as .getData()
* will only set a watch if the znode exists.
* @param zkw zk reference
* @param znode path of node to watch
* @return true if the watch is set, false if node does not exists
* @throws KeeperException if unexpected zookeeper exception
*/
public static boolean setWatchIfNodeExists(ZKWatcher zkw, String znode) throws KeeperException {
try {
zkw.getRecoverableZooKeeper().getData(znode, true, null);
return true;
} catch (NoNodeException e) {
return false;
} catch (InterruptedException e) {
LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e);
zkw.interruptedException(e);
return false;
}
} | 3.68 |
shardingsphere-elasticjob_PropertiesPreconditions_checkRequired | /**
* Check property value is required.
*
* @param props properties to be checked
* @param key property key to be checked
*/
public static void checkRequired(final Properties props, final String key) {
Preconditions.checkArgument(props.containsKey(key), "The property `%s` is required.", key);
} | 3.68 |
flink_SortPartitionOperator_useKeySelector | /** Returns whether using key selector or not. */
public boolean useKeySelector() {
return useKeySelector;
} | 3.68 |
morf_DataSetHomology_subtractTable | /**
* Subtract the common tables from the tables provided
*/
private Set<String> subtractTable(Set<String> tables, Set<String> commonTables) {
return Sets.difference(tables, commonTables);
} | 3.68 |
morf_SqlDialect_checkSelectStatementHasNoHints | /**
* Throws {@link IllegalArgumentException} if the select statement has hints.
*
* @param statement The select statement.
* @param errorMessage The message for the exception.
*/
protected void checkSelectStatementHasNoHints(SelectStatement statement, String errorMessage) {
if (!statement.getHints().isEmpty()) {
throw new IllegalArgumentException(errorMessage);
}
} | 3.68 |
hbase_MasterObserver_preDeleteTableAction | /**
* Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a table. Called as part of
* delete table procedure and it is async to the delete RPC call.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
*/
default void preDeleteTableAction(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName) throws IOException {
} | 3.68 |
hadoop_ExcessRedundancyMap_remove | /**
* Remove the redundancy corresponding to the given datanode and the given
* block.
*
* @return true if the block is removed.
*/
synchronized boolean remove(DatanodeDescriptor dn, BlockInfo blk) {
final LightWeightHashSet<BlockInfo> set = map.get(dn.getDatanodeUuid());
if (set == null) {
return false;
}
final boolean removed = set.remove(blk);
if (removed) {
size.decrementAndGet();
blockLog.debug("BLOCK* ExcessRedundancyMap.remove({}, {})", dn, blk);
if (set.isEmpty()) {
map.remove(dn.getDatanodeUuid());
}
}
return removed;
} | 3.68 |
flink_TypeInformationSerializationSchema_isEndOfStream | /**
* This schema never considers an element to signal end-of-stream, so this method returns always
* false.
*
* @param nextElement The element to test for the end-of-stream signal.
* @return Returns false.
*/
@Override
public boolean isEndOfStream(T nextElement) {
return false;
} | 3.68 |
hadoop_ReconfigurableBase_run | // See {@link ReconfigurationServlet#applyChanges}
public void run() {
LOG.info("Starting reconfiguration task.");
final Configuration oldConf = parent.getConf();
final Configuration newConf = parent.getNewConf();
final Collection<PropertyChange> changes =
parent.getChangedProperties(newConf, oldConf);
Map<PropertyChange, Optional<String>> results = Maps.newHashMap();
ConfigRedactor oldRedactor = new ConfigRedactor(oldConf);
ConfigRedactor newRedactor = new ConfigRedactor(newConf);
for (PropertyChange change : changes) {
String errorMessage = null;
String oldValRedacted = oldRedactor.redact(change.prop, change.oldVal);
String newValRedacted = newRedactor.redact(change.prop, change.newVal);
if (!parent.isPropertyReconfigurable(change.prop)) {
LOG.info(String.format(
"Property %s is not configurable: old value: %s, new value: %s",
change.prop,
oldValRedacted,
newValRedacted));
continue;
}
LOG.info("Change property: " + change.prop + " from \""
+ ((change.oldVal == null) ? "<default>" : oldValRedacted)
+ "\" to \""
+ ((change.newVal == null) ? "<default>" : newValRedacted)
+ "\".");
try {
String effectiveValue =
parent.reconfigurePropertyImpl(change.prop, change.newVal);
if (change.newVal != null) {
oldConf.set(change.prop, effectiveValue);
} else {
oldConf.unset(change.prop);
}
} catch (ReconfigurationException e) {
Throwable cause = e.getCause();
errorMessage = cause == null ? e.getMessage() : cause.getMessage();
}
results.put(change, Optional.ofNullable(errorMessage));
}
synchronized (parent.reconfigLock) {
parent.endTime = Time.now();
parent.status = Collections.unmodifiableMap(results);
parent.reconfigThread = null;
}
} | 3.68 |
MagicPlugin_BlockSpell_goLeft | /**
* A helper function to go change a given direction to the direction "to the right".
*
* <p>There's probably some better matrix-y, math-y way to do this.
* It'd be nice if this was in BlockFace.
*
* @param direction The current direction
* @return The direction to the left
*/
public static BlockFace goLeft(BlockFace direction)
{
switch (direction)
{
case EAST:
return BlockFace.NORTH;
case NORTH:
return BlockFace.WEST;
case WEST:
return BlockFace.SOUTH;
case SOUTH:
return BlockFace.EAST;
default:
return direction;
}
} | 3.68 |
dubbo_ReferenceAnnotationBeanPostProcessor_getInjectedMethodReferenceBeanMap | /**
* Get {@link ReferenceBean} {@link Map} in injected method.
*
* @return non-null {@link Map}
* @since 2.5.11
*/
public Map<InjectionMetadata.InjectedElement, ReferenceBean<?>> getInjectedMethodReferenceBeanMap() {
Map<InjectionMetadata.InjectedElement, ReferenceBean<?>> map = new HashMap<>();
for (Map.Entry<InjectionMetadata.InjectedElement, String> entry : injectedMethodReferenceBeanCache.entrySet()) {
map.put(entry.getKey(), referenceBeanManager.getById(entry.getValue()));
}
return Collections.unmodifiableMap(map);
} | 3.68 |
flink_KvStateInfo_getKeySerializer | /** @return The serializer for the key the state is associated to. */
public TypeSerializer<K> getKeySerializer() {
return keySerializer;
} | 3.68 |
flink_NoFetchingInput_require | /**
* Require makes sure that at least required number of bytes are kept in the buffer. If not,
* then it will load exactly the difference between required and currently available number of
* bytes. Thus, it will only load the data which is required and never prefetch data.
*
* @param required the number of bytes being available in the buffer
* @return the number of bytes remaining, which is equal to required
* @throws KryoException
*/
@Override
protected int require(int required) throws KryoException {
if (required > capacity) {
throw new KryoException(
"Buffer too small: capacity: " + capacity + ", " + "required: " + required);
}
position = 0;
int bytesRead = 0;
int count;
while (true) {
count = fill(buffer, bytesRead, required - bytesRead);
if (count == -1) {
throw new KryoException(new EOFException("No more bytes left."));
}
bytesRead += count;
if (bytesRead == required) {
break;
}
}
limit = required;
return required;
} | 3.68 |
flink_TableFactoryService_normalizeContext | /** Prepares the properties of a context to be used for match operations. */
private static Map<String, String> normalizeContext(TableFactory factory) {
Map<String, String> requiredContext = factory.requiredContext();
if (requiredContext == null) {
throw new TableException(
String.format(
"Required context of factory '%s' must not be null.",
factory.getClass().getName()));
}
return requiredContext.keySet().stream()
.collect(Collectors.toMap(String::toLowerCase, requiredContext::get));
} | 3.68 |
open-banking-gateway_TppTokenConfig_loadPublicKey | /**
* See {@code de.adorsys.opba.tppauthapi.TokenSignVerifyTest#generateNewTppKeyPair()} for details of how to
* generate the encoded key.
*/
@SneakyThrows
private RSAPublicKey loadPublicKey(TppTokenProperties tppTokenProperties) {
byte[] publicKeyBytes = Base64.getDecoder().decode(tppTokenProperties.getPublicKey());
X509EncodedKeySpec ks = new X509EncodedKeySpec(publicKeyBytes);
KeyFactory kf = KeyFactory.getInstance(tppTokenProperties.getSignAlgo());
return (RSAPublicKey) kf.generatePublic(ks);
} | 3.68 |
framework_VScrollTable_getNavigationUpKey | /**
* Get the key that moves the selection head upwards. By default it is the
* up arrow key but by overriding this you can change the key to whatever
* you want.
*
* @return The keycode of the key
*/
protected int getNavigationUpKey() {
return KeyCodes.KEY_UP;
} | 3.68 |
flink_LeaderRetrievalUtils_retrieveLeaderInformation | /**
* Retrieves the leader pekko url and the current leader session ID. The values are stored in a
* {@link LeaderInformation} instance.
*
* @param leaderRetrievalService Leader retrieval service to retrieve the leader connection
* information
* @param timeout Timeout when to give up looking for the leader
* @return LeaderInformation containing the leader's rpc URL and the current leader session ID
* @throws LeaderRetrievalException
*/
public static LeaderInformation retrieveLeaderInformation(
LeaderRetrievalService leaderRetrievalService, Duration timeout)
throws LeaderRetrievalException {
LeaderInformationListener listener = new LeaderInformationListener();
try {
leaderRetrievalService.start(listener);
return listener.getLeaderInformationFuture()
.get(timeout.toMillis(), TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new LeaderRetrievalException(
"Could not retrieve the leader address and leader " + "session ID.", e);
} finally {
try {
leaderRetrievalService.stop();
} catch (Exception fe) {
LOG.warn("Could not stop the leader retrieval service.", fe);
}
}
} | 3.68 |
framework_Table_accept | /*
* (non-Javadoc)
*
* @see
* com.vaadin.event.dd.acceptcriteria.AcceptCriterion#accepts(com.vaadin
* .event.dd.DragAndDropEvent)
*/
@Override
@SuppressWarnings("unchecked")
public boolean accept(DragAndDropEvent dragEvent) {
AbstractSelectTargetDetails dropTargetData = (AbstractSelectTargetDetails) dragEvent
.getTargetDetails();
table = (Table) dragEvent.getTargetDetails().getTarget();
Collection<?> visibleItemIds = table.getVisibleItemIds();
allowedItemIds = getAllowedItemIds(dragEvent, table,
(Collection<Object>) visibleItemIds);
return allowedItemIds.contains(dropTargetData.getItemIdOver());
} | 3.68 |
pulsar_MessageIdAdv_getBatchIndex | /**
* Get the batch index.
*
* @return -1 if the message is not in a batch
*/
default int getBatchIndex() {
return -1;
} | 3.68 |
hadoop_RetryReasonCategory_checkExceptionMessage | /**
* Checks if a required search-string is in the exception's message.
*/
Boolean checkExceptionMessage(final Exception exceptionCaptured,
final String search) {
if (search == null) {
return false;
}
if (exceptionCaptured != null
&& exceptionCaptured.getMessage() != null
&& exceptionCaptured.getMessage()
.toLowerCase(Locale.US)
.contains(search.toLowerCase(Locale.US))) {
return true;
}
return false;
} | 3.68 |
hbase_Procedure_isFinished | /**
* @return true if the procedure is finished. The Procedure may be completed successfully or
* rolledback.
*/
public synchronized boolean isFinished() {
return isSuccess() || state == ProcedureState.ROLLEDBACK;
} | 3.68 |
flink_TimeWindow_cover | /** Returns the minimal window covers both this window and the given window. */
public TimeWindow cover(TimeWindow other) {
return new TimeWindow(Math.min(start, other.start), Math.max(end, other.end));
} | 3.68 |
flink_BinaryHashPartition_finalizeProbePhase | /**
* @param keepUnprobedSpilledPartitions If true then partitions that were spilled but received
* no further probe requests will be retained; used for build-side outer joins.
*/
void finalizeProbePhase(
LazyMemorySegmentPool pool,
List<BinaryHashPartition> spilledPartitions,
boolean keepUnprobedSpilledPartitions)
throws IOException {
if (isInMemory()) {
this.bucketArea.returnMemory(pool);
this.bucketArea = null;
// return the partition buffers
pool.returnAll(Arrays.asList(partitionBuffers));
this.partitionBuffers = null;
} else {
if (bloomFilter != null) {
freeBloomFilter();
}
if (this.probeSideRecordCounter == 0 && !keepUnprobedSpilledPartitions) {
// delete the spill files
this.probeSideBuffer.close();
this.buildSideChannel.deleteChannel();
this.probeSideBuffer.getChannel().deleteChannel();
} else {
// flush the last probe side buffer and register this partition as pending
probeNumBytesInLastSeg = this.probeSideBuffer.close();
spilledPartitions.add(this);
}
}
} | 3.68 |
pulsar_ResourceUnitRanking_getAllocatedLoadPercentageBandwidthIn | /**
* Percentage of inbound bandwidth allocated to bundle's quota.
*/
public double getAllocatedLoadPercentageBandwidthIn() {
return this.allocatedLoadPercentageBandwidthIn;
} | 3.68 |
framework_Escalator_getScrollTop | /**
* Returns the vertical scroll offset. Note that this is not necessarily the
* same as the {@code scrollTop} attribute in the DOM.
*
* @return the logical vertical scroll offset
*/
public double getScrollTop() {
return verticalScrollbar.getScrollPos();
} | 3.68 |
pulsar_NettyFutureUtil_toCompletableFuture | /**
* Converts a Netty {@link Future} to {@link CompletableFuture}.
*
* @param future Netty future
* @param <V> value type
* @return converted future instance
*/
public static <V> CompletableFuture<V> toCompletableFuture(Future<V> future) {
Objects.requireNonNull(future, "future cannot be null");
CompletableFuture<V> adapter = new CompletableFuture<>();
if (future.isDone()) {
if (future.isSuccess()) {
adapter.complete(future.getNow());
} else {
adapter.completeExceptionally(future.cause());
}
} else {
future.addListener((Future<V> f) -> {
if (f.isSuccess()) {
adapter.complete(f.getNow());
} else {
adapter.completeExceptionally(f.cause());
}
});
}
return adapter;
} | 3.68 |
hadoop_AssumedRoleCredentialProvider_operationRetried | /**
* Callback from {@link Invoker} when an operation is retried.
* @param text text of the operation
* @param ex exception
* @param retries number of retries
* @param idempotent is the method idempotent
*/
public void operationRetried(
String text,
Exception ex,
int retries,
boolean idempotent) {
if (retries == 0) {
// log on the first retry attempt of the credential access.
// At worst, this means one log entry every intermittent renewal
// time.
LOG.info("Retried {}", text);
}
} | 3.68 |
hbase_MetricsConnection_incrHedgedReadOps | /** Increment the number of hedged read that have occurred. */
public void incrHedgedReadOps() {
hedgedReadOps.inc();
} | 3.68 |
flink_FieldParser_nextStringLength | /**
* Returns the length of a string. Throws an exception if the column is empty.
*
* @return the length of the string
*/
protected static final int nextStringLength(
byte[] bytes, int startPos, int length, char delimiter) {
if (length <= 0) {
throw new IllegalArgumentException("Invalid input: Empty string");
}
int limitedLength = 0;
final byte delByte = (byte) delimiter;
while (limitedLength < length && bytes[startPos + limitedLength] != delByte) {
limitedLength++;
}
return limitedLength;
} | 3.68 |
hbase_HBaseTestingUtility_compact | /**
* Compact all of a table's reagion in the mini hbase cluster
*/
public void compact(TableName tableName, boolean major) throws IOException {
getMiniHBaseCluster().compact(tableName, major);
} | 3.68 |
hudi_HoodieCompactionAdminTool_run | /**
* Executes one of compaction admin operations.
*/
public void run(JavaSparkContext jsc) throws Exception {
HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(jsc.hadoopConfiguration()).setBasePath(cfg.basePath).build();
try (CompactionAdminClient admin = new CompactionAdminClient(new HoodieSparkEngineContext(jsc), cfg.basePath)) {
final FileSystem fs = FSUtils.getFs(cfg.basePath, jsc.hadoopConfiguration());
if (cfg.outputPath != null && fs.exists(new Path(cfg.outputPath))) {
throw new IllegalStateException("Output File Path already exists");
}
switch (cfg.operation) {
case VALIDATE:
List<ValidationOpResult> res =
admin.validateCompactionPlan(metaClient, cfg.compactionInstantTime, cfg.parallelism);
if (cfg.printOutput) {
printOperationResult("Result of Validation Operation :", res);
}
serializeOperationResult(fs, res);
break;
case UNSCHEDULE_FILE:
List<RenameOpResult> r = admin.unscheduleCompactionFileId(
new HoodieFileGroupId(cfg.partitionPath, cfg.fileId), cfg.skipValidation, cfg.dryRun);
if (cfg.printOutput) {
System.out.println(r);
}
serializeOperationResult(fs, r);
break;
case UNSCHEDULE_PLAN:
List<RenameOpResult> r2 = admin.unscheduleCompactionPlan(cfg.compactionInstantTime, cfg.skipValidation,
cfg.parallelism, cfg.dryRun);
if (cfg.printOutput) {
printOperationResult("Result of Unscheduling Compaction Plan :", r2);
}
serializeOperationResult(fs, r2);
break;
case REPAIR:
List<RenameOpResult> r3 = admin.repairCompaction(cfg.compactionInstantTime, cfg.parallelism, cfg.dryRun);
if (cfg.printOutput) {
printOperationResult("Result of Repair Operation :", r3);
}
serializeOperationResult(fs, r3);
break;
default:
throw new IllegalStateException("Not yet implemented !!");
}
}
} | 3.68 |
framework_AbstractTextField_addFocusListener | /**
* Adds a {@link FocusListener} to this component, which gets fired when
* this component receives keyboard focus.
*
* @param listener
* the focus listener
* @return a registration for the listener
*
* @see Registration
*/
@Override
public Registration addFocusListener(FocusListener listener) {
return addListener(FocusEvent.EVENT_ID, FocusEvent.class, listener,
FocusListener.focusMethod);
} | 3.68 |
framework_Tree_fireCollapseEvent | /**
* Emits collapse event.
*
* @param itemId
* the item id.
*/
protected void fireCollapseEvent(Object itemId) {
fireEvent(new CollapseEvent(this, itemId));
} | 3.68 |
framework_Form_addField | /**
* Registers the field with the form and adds the field to the form layout.
*
* <p>
* The property id must not be already used in the form.
* </p>
*
* <p>
* This field is added to the layout using the
* {@link #attachField(Object, Field)} method.
* </p>
*
* @param propertyId
* the Property id the the field.
* @param field
* the field which should be added to the form.
*/
public void addField(Object propertyId, Field<?> field) {
registerField(propertyId, field);
attachField(propertyId, field);
markAsDirty();
} | 3.68 |
framework_VScrollTable_getNavigationSelectKey | /**
* Get the key that selects an item in the table. By default it is the space
* bar key but by overriding this you can change the key to whatever you
* want.
*
* @return
*/
protected int getNavigationSelectKey() {
return CHARCODE_SPACE;
} | 3.68 |
framework_LegacyWindow_getBrowserWindowHeight | /**
* Gets the last known height of the browser window in which this UI
* resides.
*
* @return the browser window height in pixels
* @deprecated As of 7.0, use the similarly named api in Page instead
*/
@Deprecated
public int getBrowserWindowHeight() {
return getPage().getBrowserWindowHeight();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.