name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_SecondaryIndexManager_show | /**
* Show secondary indexes from hoodie table
*
* @param metaClient Hoodie table meta client
* @return Indexes in this table
*/
public Option<List<HoodieSecondaryIndex>> show(HoodieTableMetaClient metaClient) {
return SecondaryIndexUtils.getSecondaryIndexes(metaClient);
} | 3.68 |
framework_StaticSection_addColumn | /**
* Adds a cell corresponding to the given column id to this section.
*
* @param columnId
* the id of the column for which to add a cell
*/
public void addColumn(String columnId) {
for (ROW row : rows) {
row.internalAddCell(columnId);
}
} | 3.68 |
pulsar_Record_getEventTime | /**
* Retrieves the event time of the record from the source.
*
* @return millis since epoch
*/
default Optional<Long> getEventTime() {
return Optional.empty();
} | 3.68 |
flink_SharedBufferAccessor_close | /**
* Persists the entry in the cache to the underlay state.
*
* @throws Exception Thrown if the system cannot access the state.
*/
public void close() throws Exception {
sharedBuffer.flushCache();
} | 3.68 |
flink_KvStateSerializer_serializeValue | /**
* Serializes the value with the given serializer.
*
* @param value Value of type T to serialize
* @param serializer Serializer for T
* @param <T> Type of the value
* @return Serialized value or <code>null</code> if value <code>null</code>
* @throws IOException On failure during serialization
*/
public static <T> byte[] serializeValue(T value, TypeSerializer<T> serializer)
throws IOException {
if (value != null) {
// Serialize
DataOutputSerializer dos = new DataOutputSerializer(32);
serializer.serialize(value, dos);
return dos.getCopyOfBuffer();
} else {
return null;
}
} | 3.68 |
framework_VTabsheet_scrollLeft | /**
* Finds a plausible scroll position to the closest tab on the left that
* hasn't been set hidden on the server. If a suitable tab is found,
* also sets that tab visible and removes the first visible style from
* the previous tab. Does not update the scroller index or set the new
* first visible style, in case there are multiple calls in a row. Does
* not update any visibilities or styles if a suitable tab is not found.
*
* @param currentFirstVisible
* the index of the current first visible tab
* @return the index of the closest visible tab to the left from the
* starting point, or {@code -1} if not found
*/
public int scrollLeft(int currentFirstVisible) {
int prevVisible = getPreviousVisibleTab(currentFirstVisible);
if (prevVisible < 0) {
return -1;
}
Tab newFirst = getTab(prevVisible);
newFirst.setVisible(true);
newFirst.recalculateCaptionWidth();
Tab oldFirst = getTab(currentFirstVisible);
if (oldFirst != null) {
oldFirst.setStyleNames(
currentFirstVisible == tabsheet.activeTabIndex, false);
}
return prevVisible;
} | 3.68 |
pulsar_LoadManagerShared_getBundleRangeFromBundleName | // From a full bundle name, extract the bundle range.
public static String getBundleRangeFromBundleName(String bundleName) {
// the bundle format is property/cluster/namespace/0x00000000_0xFFFFFFFF
int pos = bundleName.lastIndexOf("/");
checkArgument(pos != -1);
return bundleName.substring(pos + 1);
} | 3.68 |
morf_FieldReference_nullsLast | /**
* sets null value handling type to last
* @return this
*/
public Builder nullsLast() {
this.nullValueHandling = Optional.of(NullValueHandling.LAST);
return this;
} | 3.68 |
hbase_TimeRange_withinOrAfterTimeRange | /**
* Check if the specified timestamp is within or after this TimeRange.
* <p>
* Returns true if greater than minStamp, false if not.
* @param timestamp timestamp to check
* @return true if within or after TimeRange, false if not
*/
public boolean withinOrAfterTimeRange(long timestamp) {
assert timestamp >= 0;
if (allTime) {
return true;
}
// check if >= minStamp
return timestamp >= minStamp;
} | 3.68 |
framework_TabsheetConnector_onStateChanged | /*
* (non-Javadoc)
*
* @see
* com.vaadin.client.ui.AbstractComponentConnector#onStateChanged(com.vaadin
* .client.communication.StateChangeEvent)
*/
@Override
public void onStateChanged(StateChangeEvent stateChangeEvent) {
super.onStateChanged(stateChangeEvent);
getWidget().handleStyleNames(getState());
if (getState().tabsVisible) {
getWidget().showTabs();
} else {
getWidget().hideTabs();
}
// tabs; push or not
if (!isUndefinedWidth()) {
getWidget().tabs.getStyle().setOverflow(Overflow.HIDDEN);
} else {
getWidget().showAllTabs();
getWidget().tabs.getStyle().clearWidth();
getWidget().tabs.getStyle().setOverflow(Overflow.VISIBLE);
getWidget().updateDynamicWidth();
}
if (!isUndefinedHeight()) {
// Must update height after the styles have been set
getWidget().updateContentNodeHeight();
getWidget().updateOpenTabSize();
}
getWidget().iLayout();
} | 3.68 |
framework_ClientRpcWriter_collectPendingRpcCalls | /**
* Collects all pending RPC calls from listed {@link ClientConnector}s and
* clears their RPC queues.
*
* @param rpcPendingQueue
* list of {@link ClientConnector} of interest
* @return ordered list of pending RPC calls
*/
private Collection<ClientMethodInvocation> collectPendingRpcCalls(
Collection<ClientConnector> rpcPendingQueue) {
List<ClientMethodInvocation> pendingInvocations = new ArrayList<>();
for (ClientConnector connector : rpcPendingQueue) {
List<ClientMethodInvocation> paintablePendingRpc = connector
.retrievePendingRpcCalls();
if (null != paintablePendingRpc && !paintablePendingRpc.isEmpty()) {
List<ClientMethodInvocation> oldPendingRpc = pendingInvocations;
int totalCalls = pendingInvocations.size()
+ paintablePendingRpc.size();
pendingInvocations = new ArrayList<>(totalCalls);
// merge two ordered comparable lists
for (int destIndex = 0, oldIndex = 0, paintableIndex = 0; destIndex < totalCalls; destIndex++) {
if (paintableIndex >= paintablePendingRpc.size()
|| (oldIndex < oldPendingRpc.size() && oldPendingRpc
.get(oldIndex).compareTo(paintablePendingRpc
.get(paintableIndex)) <= 0)) {
pendingInvocations.add(oldPendingRpc.get(oldIndex++));
} else {
pendingInvocations
.add(paintablePendingRpc.get(paintableIndex++));
}
}
}
}
return pendingInvocations;
} | 3.68 |
hbase_RSGroupInfoManagerImpl_moveServerRegionsFromGroup | /**
* Move every region from servers which are currently located on these servers, but should not be
* located there.
* @param movedServers the servers that are moved to new group
* @param srcGrpServers all servers in the source group, excluding the movedServers
* @param targetGroupName the target group
* @param sourceGroupName the source group
* @throws IOException if moving the server and tables fail
*/
private void moveServerRegionsFromGroup(Set<Address> movedServers, Set<Address> srcGrpServers,
String targetGroupName, String sourceGroupName) throws IOException {
moveRegionsBetweenGroups(movedServers, srcGrpServers, targetGroupName, sourceGroupName,
rs -> getRegions(rs), info -> {
try {
String groupName = RSGroupUtil.getRSGroupInfo(masterServices, this, info.getTable())
.map(RSGroupInfo::getName).orElse(RSGroupInfo.DEFAULT_GROUP);
return groupName.equals(targetGroupName);
} catch (IOException e) {
LOG.warn("Failed to test group for region {} and target group {}", info, targetGroupName);
return false;
}
});
} | 3.68 |
framework_Overlay_setWidth | /**
* Sets the pixel value for width css property.
*
* @param width
* value to set
*/
public void setWidth(int width) {
if (width < 0) {
width = 0;
}
this.width = width;
} | 3.68 |
flink_Schema_columnByMetadata | /**
* Declares a metadata column that is appended to this schema.
*
* <p>See {@link #columnByMetadata(String, AbstractDataType, String, boolean)} for a
* detailed explanation.
*
* <p>This method uses a type string that can be easily persisted in a durable catalog.
*
* @param columnName column name
* @param serializableTypeString data type of the column
* @param metadataKey identifying metadata key, if null the column name will be used as
* metadata key
* @param isVirtual whether the column should be persisted or not
*/
public Builder columnByMetadata(
String columnName,
String serializableTypeString,
@Nullable String metadataKey,
boolean isVirtual) {
return columnByMetadata(
columnName, DataTypes.of(serializableTypeString), metadataKey, isVirtual);
} | 3.68 |
graphhopper_GTFSFeed_fastDistance | /**
* @return Equirectangular approximation to distance.
*/
public static double fastDistance (double lat0, double lon0, double lat1, double lon1) {
double midLat = (lat0 + lat1) / 2;
double xscale = Math.cos(Math.toRadians(midLat));
double dx = xscale * (lon1 - lon0);
double dy = (lat1 - lat0);
return Math.sqrt(dx * dx + dy * dy) * METERS_PER_DEGREE_LATITUDE;
} | 3.68 |
framework_Heartbeat_getInterval | /**
* @return the interval at which heartbeat requests are sent
*/
public int getInterval() {
return interval;
} | 3.68 |
hbase_FavoredNodeLoadBalancer_availableServersContains | // Do a check of the hostname and port and return the servername from the servers list
// that matched (the favoredNode will have a startcode of -1 but we want the real
// server with the legit startcode
private ServerName availableServersContains(List<ServerName> servers, ServerName favoredNode) {
for (ServerName server : servers) {
if (ServerName.isSameAddress(favoredNode, server)) {
return server;
}
}
return null;
} | 3.68 |
open-banking-gateway_BaseDatasafeDbStorageService_objectExists | /**
* Checks if object exists within Datasafe storage.
* @param absoluteLocation Absolute path including protocol to the object. I.e. {@code db://storage/deadbeef}
* @return If the object at the {@code absoluteLocation} exists
*/
@Override
@Transactional
public boolean objectExists(AbsoluteLocation absoluteLocation) {
return handlers.get(deduceTable(absoluteLocation))
.read(deduceId(absoluteLocation))
.isPresent();
} | 3.68 |
flink_RetryingRegistration_startRegistration | /**
* This method resolves the target address to a callable gateway and starts the registration
* after that.
*/
@SuppressWarnings("unchecked")
public void startRegistration() {
if (canceled) {
// we already got canceled
return;
}
try {
// trigger resolution of the target address to a callable gateway
final CompletableFuture<G> rpcGatewayFuture;
if (FencedRpcGateway.class.isAssignableFrom(targetType)) {
rpcGatewayFuture =
(CompletableFuture<G>)
rpcService.connect(
targetAddress,
fencingToken,
targetType.asSubclass(FencedRpcGateway.class));
} else {
rpcGatewayFuture = rpcService.connect(targetAddress, targetType);
}
// upon success, start the registration attempts
CompletableFuture<Void> rpcGatewayAcceptFuture =
rpcGatewayFuture.thenAcceptAsync(
(G rpcGateway) -> {
log.info("Resolved {} address, beginning registration", targetName);
register(
rpcGateway,
1,
retryingRegistrationConfiguration
.getInitialRegistrationTimeoutMillis());
},
rpcService.getScheduledExecutor());
// upon failure, retry, unless this is cancelled
rpcGatewayAcceptFuture.whenCompleteAsync(
(Void v, Throwable failure) -> {
if (failure != null && !canceled) {
final Throwable strippedFailure =
ExceptionUtils.stripCompletionException(failure);
if (log.isDebugEnabled()) {
log.debug(
"Could not resolve {} address {}, retrying in {} ms.",
targetName,
targetAddress,
retryingRegistrationConfiguration.getErrorDelayMillis(),
strippedFailure);
} else {
log.info(
"Could not resolve {} address {}, retrying in {} ms: {}",
targetName,
targetAddress,
retryingRegistrationConfiguration.getErrorDelayMillis(),
strippedFailure.getMessage());
}
startRegistrationLater(
retryingRegistrationConfiguration.getErrorDelayMillis());
}
},
rpcService.getScheduledExecutor());
} catch (Throwable t) {
completionFuture.completeExceptionally(t);
cancel();
}
} | 3.68 |
dubbo_Router_route | // Add since 2.7.0
@Override
default <T> List<Invoker<T>> route(List<Invoker<T>> invokers, URL url, Invocation invocation) throws RpcException {
List<com.alibaba.dubbo.rpc.Invoker<T>> invs = invokers.stream()
.map(invoker -> new com.alibaba.dubbo.rpc.Invoker.CompatibleInvoker<T>(invoker))
.collect(Collectors.toList());
List<com.alibaba.dubbo.rpc.Invoker<T>> res = this.route(
invs,
new com.alibaba.dubbo.common.DelegateURL(url),
new com.alibaba.dubbo.rpc.Invocation.CompatibleInvocation(invocation));
return res.stream()
.map(inv -> inv.getOriginal())
.filter(Objects::nonNull)
.collect(Collectors.toList());
} | 3.68 |
framework_VDragAndDropWrapper_setDragAndDropWidget | /**
* Set the widget that will be used as the drag image when using
* DragStartMode {@link COMPONENT_OTHER} .
*
* @param widget
*/
public void setDragAndDropWidget(Widget widget) {
dragImageWidget = widget;
} | 3.68 |
flink_JoinedStreams_evictor | /**
* Sets the {@code Evictor} that should be used to evict elements from a window before
* emission.
*
* <p>Note: When using an evictor window performance will degrade significantly, since
* pre-aggregation of window results cannot be used.
*/
@PublicEvolving
public WithWindow<T1, T2, KEY, W> evictor(
Evictor<? super TaggedUnion<T1, T2>, ? super W> newEvictor) {
return new WithWindow<>(
input1,
input2,
keySelector1,
keySelector2,
keyType,
windowAssigner,
trigger,
newEvictor,
allowedLateness);
} | 3.68 |
flink_WindowReader_process | /**
* Reads window state generated without any preaggregation such as {@code WindowedStream#apply}
* and {@code WindowedStream#process}.
*
* @param uid The uid of the operator.
* @param readerFunction The window reader function.
* @param keyType The key type of the window.
* @param stateType The type of records stored in state.
* @param outputType The output type of the reader function.
* @param <K> The type of the key.
* @param <T> The type of the records stored in state.
* @param <OUT> The output type of the reader function.
* @return A {@code DataSet} of objects read from keyed state.
* @throws IOException If the savepoint does not contain the specified uid.
*/
public <K, T, OUT> DataSource<OUT> process(
String uid,
WindowReaderFunction<T, OUT, K, W> readerFunction,
TypeInformation<K> keyType,
TypeInformation<T> stateType,
TypeInformation<OUT> outputType)
throws IOException {
WindowReaderOperator<?, K, T, W, OUT> operator =
WindowReaderOperator.process(readerFunction, keyType, windowSerializer, stateType);
return readWindowOperator(uid, outputType, operator);
} | 3.68 |
framework_GridDropTargetConnector_getTargetElement | /**
* Gets the target element for a dragover or drop event.
*
* @param source
* the event target of the event
* @return the element that should be handled as the target of the event
*/
protected Element getTargetElement(Element source) {
final Element tableWrapper = getDropTargetElement();
final BodyRowContainer gridBody = getGridBody();
final Range visibleRowRange = getEscalator().getVisibleRowRange();
if (!isDroppingOnRowsPossible()) {
return tableWrapper;
}
while (!Objects.equals(source, tableWrapper)) {
// the drop might happen on top of header, body or footer rows
if (TableRowElement.is(source)) {
String parentTagName = source.getParentElement().getTagName();
if ("thead".equalsIgnoreCase(parentTagName)) {
// for empty grid or ON_TOP mode, drop as last row,
// otherwise as above first visible row
if (visibleRowRange.isEmpty()
|| getState().dropMode == DropMode.ON_TOP) {
return tableWrapper;
} else {
return gridBody
.getRowElement(visibleRowRange.getStart());
}
} else if ("tfoot".equalsIgnoreCase(parentTagName)) {
// for empty grid or ON_TOP mode, drop as last row,
// otherwise as below last visible row
if (visibleRowRange.isEmpty()
|| getState().dropMode == DropMode.ON_TOP) {
return tableWrapper;
} else {
return gridBody
.getRowElement(visibleRowRange.getEnd() - 1);
}
} else { // parent is tbody
return source;
}
}
source = source.getParentElement();
}
// the drag is on top of the tablewrapper, if the drop mode is ON_TOP,
// then there is no target row for the drop
if (getState().dropMode == DropMode.ON_TOP) {
return tableWrapper;
}
// if dragged under the last row to empty space, drop target
// needs to be below the last row
return gridBody.getRowElement(visibleRowRange.getEnd() - 1);
} | 3.68 |
hbase_TableDescriptorBuilder_parseFrom | /**
* @param bytes A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix
* @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code>
* @see #toByteArray()
*/
private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor");
}
int pblen = ProtobufUtil.lengthOfPBMagic();
HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder();
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
return ProtobufUtil.toTableDescriptor(builder.build());
} catch (IOException e) {
throw new DeserializationException(e);
}
} | 3.68 |
pulsar_ModularLoadManagerImpl_updateAll | // Update both the broker data and the bundle data.
public void updateAll() {
if (log.isDebugEnabled()) {
log.debug("Updating broker and bundle data for loadreport");
}
cleanupDeadBrokersData();
updateAllBrokerData();
updateBundleData();
// broker has latest load-report: check if any bundle requires split
checkNamespaceBundleSplit();
} | 3.68 |
hbase_HttpServer_isMissing | /**
* Returns true if the argument is non-null and not whitespace
*/
private boolean isMissing(String value) {
if (null == value) {
return true;
}
return value.trim().isEmpty();
} | 3.68 |
querydsl_LuceneSerializer_convert | /**
* template method
*
* @param leftHandSide left hand side
* @param rightHandSide right hand side
* @return results
*/
protected String[] convert(Path<?> leftHandSide, Object rightHandSide) {
String str = rightHandSide.toString();
if (lowerCase) {
str = str.toLowerCase();
}
if (splitTerms) {
if (str.equals("")) {
return new String[] {str};
} else {
return str.split("\\s+");
}
} else {
return new String[] {str};
}
} | 3.68 |
hmily_PropertyName_getElement | /**
* Gets element.
*
* @param index the index
* @return the element
*/
public String getElement(final int index) {
return getElements()[index];
} | 3.68 |
morf_DatabaseMetaDataProvider_getDbName | /**
* The name as retrieved by the JDBC driver.
*
* @return name as expected in the database
*/
public String getDbName() {
return getAName();
} | 3.68 |
morf_SqlScriptExecutor_afterExecute | /**
* @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.SqlScriptVisitor#afterExecute(java.lang.String,
* long)
*/
@Override
public void afterExecute(String sql, long numberOfRowsUpdated) {
// Defaults to no-op
} | 3.68 |
hudi_LSMTimeline_latestSnapshotManifest | /**
* Reads the file list from the manifest file for the latest snapshot.
*/
public static HoodieLSMTimelineManifest latestSnapshotManifest(HoodieTableMetaClient metaClient, int latestVersion) {
if (latestVersion < 0) {
// there is no valid snapshot of the timeline.
return HoodieLSMTimelineManifest.EMPTY;
}
// read and deserialize the valid files.
byte[] content = FileIOUtils.readDataFromPath(metaClient.getFs(), getManifestFilePath(metaClient, latestVersion)).get();
try {
return HoodieLSMTimelineManifest.fromJsonString(new String(content, StandardCharsets.UTF_8), HoodieLSMTimelineManifest.class);
} catch (Exception e) {
throw new HoodieException("Error deserializing manifest entries", e);
}
} | 3.68 |
framework_Escalator_getSpacerHeightsSumUntilIndex | /**
* Gets the amount of pixels occupied by spacers until a logical row
* index.
*
* @param logicalIndex
* a logical row index
* @return the pixels occupied by spacers up until {@code logicalIndex}
*/
@SuppressWarnings("boxing")
public double getSpacerHeightsSumUntilIndex(int logicalIndex) {
return getHeights(
rowIndexToSpacer.headMap(logicalIndex, false).values());
} | 3.68 |
hadoop_TimestampGenerator_getSupplementedTimestamp | /**
* Returns a timestamp multiplied with TS_MULTIPLIER and last few digits of
* application id.
*
* Unlikely scenario of generating a timestamp that is a duplicate: If more
* than a 1M concurrent apps are running in one flow run AND write to same
* column at the same time, then say appId of 1M and 1 will overlap
* with appId of 001 and there may be collisions for that flow run's
* specific column.
*
* @param incomingTS Timestamp to be converted.
* @param appId Application Id.
* @return a timestamp multiplied with TS_MULTIPLIER and last few digits of
* application id
*/
public static long getSupplementedTimestamp(long incomingTS, String appId) {
long suffix = getAppIdSuffix(appId);
long outgoingTS = incomingTS * TS_MULTIPLIER + suffix;
return outgoingTS;
} | 3.68 |
hadoop_DockerCommand_addCommandArguments | /**
* Add command commandWithArguments - this method is only meant for use by
* sub-classes.
*
* @param key name of the key to be added
* @param value value of the key
*/
protected final void addCommandArguments(String key, String value) {
List<String> list = commandArguments.get(key);
if (list != null) {
list.add(value);
return;
}
list = new ArrayList<>();
list.add(value);
this.commandArguments.put(key, list);
} | 3.68 |
hudi_HoodieFileGroup_getLatestFileSlicesIncludingInflight | /**
* Get the latest file slices including inflight ones.
*/
public Option<FileSlice> getLatestFileSlicesIncludingInflight() {
return Option.fromJavaOptional(getAllFileSlicesIncludingInflight().findFirst());
} | 3.68 |
framework_SQLContainer_getReferencedItem | /**
* Fetches the referenced item from the target SQLContainer.
*
* @param itemId
* Item Id of the reference source (from this container)
* @param refdCont
* Target SQLContainer of the reference
* @return The referenced item, or null if not found
*/
public Item getReferencedItem(Object itemId, SQLContainer refdCont) {
return refdCont.getItem(getReferencedItemId(itemId, refdCont));
} | 3.68 |
framework_VScrollTable_getPreviousRow | /**
* Returns the previous row from the given row
*
* @param row
* The row to calculate from
* @return The previous row or null if no row exists
*/
private VScrollTableRow getPreviousRow(VScrollTableRow row, int offset) {
final Iterator<Widget> it = scrollBody.iterator();
final Iterator<Widget> offsetIt = scrollBody.iterator();
VScrollTableRow prev = null;
while (it.hasNext()) {
VScrollTableRow r = (VScrollTableRow) it.next();
if (offset < 0) {
prev = (VScrollTableRow) offsetIt.next();
}
if (r == row) {
return prev;
}
offset--;
}
return null;
} | 3.68 |
flink_HadoopReduceCombineFunction_writeObject | /**
* Custom serialization methods.
*
* @see <a
* href="http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html">http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html</a>
*/
private void writeObject(final ObjectOutputStream out) throws IOException {
out.writeObject(reducer.getClass());
out.writeObject(combiner.getClass());
jobConf.write(out);
} | 3.68 |
graphhopper_VectorTile_clearName | /**
* <code>required string name = 1;</code>
*/
public Builder clearName() {
bitField0_ = (bitField0_ & ~0x00000002);
name_ = getDefaultInstance().getName();
onChanged();
return this;
} | 3.68 |
zxing_OneDimensionalCodeWriter_renderResult | /**
* @return a byte array of horizontal pixels (0 = white, 1 = black)
*/
private static BitMatrix renderResult(boolean[] code, int width, int height, int sidesMargin) {
int inputWidth = code.length;
// Add quiet zone on both sides.
int fullWidth = inputWidth + sidesMargin;
int outputWidth = Math.max(width, fullWidth);
int outputHeight = Math.max(1, height);
int multiple = outputWidth / fullWidth;
int leftPadding = (outputWidth - (inputWidth * multiple)) / 2;
BitMatrix output = new BitMatrix(outputWidth, outputHeight);
for (int inputX = 0, outputX = leftPadding; inputX < inputWidth; inputX++, outputX += multiple) {
if (code[inputX]) {
output.setRegion(outputX, 0, multiple, outputHeight);
}
}
return output;
} | 3.68 |
flink_ScopeFormats_fromConfig | /**
* Creates the scope formats as defined in the given configuration.
*
* @param config The configuration that defines the formats
* @return The ScopeFormats parsed from the configuration
*/
public static ScopeFormats fromConfig(Configuration config) {
String jmFormat = config.getString(MetricOptions.SCOPE_NAMING_JM);
String jmJobFormat = config.getString(MetricOptions.SCOPE_NAMING_JM_JOB);
String tmFormat = config.getString(MetricOptions.SCOPE_NAMING_TM);
String tmJobFormat = config.getString(MetricOptions.SCOPE_NAMING_TM_JOB);
String taskFormat = config.getString(MetricOptions.SCOPE_NAMING_TASK);
String operatorFormat = config.getString(MetricOptions.SCOPE_NAMING_OPERATOR);
String jmOperatorFormat = config.getString(MetricOptions.SCOPE_NAMING_JM_OPERATOR);
return new ScopeFormats(
jmFormat,
jmJobFormat,
tmFormat,
tmJobFormat,
taskFormat,
operatorFormat,
jmOperatorFormat);
} | 3.68 |
flink_HiveParserBaseSemanticAnalyzer_readProps | /**
* Converts parsed key/value properties pairs into a map.
*
* @param prop HiveParserASTNode parent of the key/value pairs
* @param mapProp property map which receives the mappings
*/
public static void readProps(HiveParserASTNode prop, Map<String, String> mapProp) {
for (int propChild = 0; propChild < prop.getChildCount(); propChild++) {
String key = unescapeSQLString(prop.getChild(propChild).getChild(0).getText());
String value = null;
if (prop.getChild(propChild).getChild(1) != null) {
value = unescapeSQLString(prop.getChild(propChild).getChild(1).getText());
}
mapProp.put(key, value);
}
} | 3.68 |
druid_MySqlStatementParser_parseSpStatement | /**
* zhujun [[email protected]]
* parse spstatement
*/
public SQLStatement parseSpStatement() {
// update
if (lexer.token() == (Token.UPDATE)) {
return parseUpdateStatement();
}
// create
if (lexer.token() == (Token.CREATE)) {
return parseCreate();
}
// insert
if (lexer.token() == Token.INSERT) {
return parseInsert();
}
// delete
if (lexer.token() == (Token.DELETE)) {
return parseDeleteStatement();
}
// begin
if (lexer.token() == Token.BEGIN) {
return this.parseBlock();
}
// select
if (lexer.token() == Token.LPAREN) {
Lexer.SavePoint savePoint = lexer.markOut();
lexer.nextToken();
if (lexer.token() == Token.SELECT) {
lexer.reset(savePoint);
return this.parseSelect();
} else {
throw new ParserException("TODO. " + lexer.info());
}
}
// assign statement
if (lexer.token() == Token.SET) {
return parseAssign();
}
throw new ParserException("error sp_statement. " + lexer.info());
} | 3.68 |
hadoop_CosNFileSystem_validatePath | /**
* Validate the path from the bottom up.
*
* @param path The path to be validated
* @throws FileAlreadyExistsException The specified path is an existing file
* @throws IOException Getting the file status of the
* specified path occurs
* an IOException.
*/
private void validatePath(Path path) throws IOException {
Path parent = path.getParent();
do {
try {
FileStatus fileStatus = getFileStatus(parent);
if (fileStatus.isDirectory()) {
break;
} else {
throw new FileAlreadyExistsException(String.format(
"Can't make directory for path '%s', it is a file.", parent));
}
} catch (FileNotFoundException e) {
LOG.debug("The Path: [{}] does not exist.", path);
}
parent = parent.getParent();
} while (parent != null);
} | 3.68 |
hbase_BucketCache_doDrain | /**
* Flush the entries in ramCache to IOEngine and add bucket entry to backingMap. Process all that
* are passed in even if failure being sure to remove from ramCache else we'll never undo the
* references and we'll OOME.
* @param entries Presumes list passed in here will be processed by this invocation only. No
* interference expected.
*/
void doDrain(final List<RAMQueueEntry> entries, ByteBuffer metaBuff) throws InterruptedException {
if (entries.isEmpty()) {
return;
}
// This method is a little hard to follow. We run through the passed in entries and for each
// successful add, we add a non-null BucketEntry to the below bucketEntries. Later we must
// do cleanup making sure we've cleared ramCache of all entries regardless of whether we
// successfully added the item to the bucketcache; if we don't do the cleanup, we'll OOME by
// filling ramCache. We do the clean up by again running through the passed in entries
// doing extra work when we find a non-null bucketEntries corresponding entry.
final int size = entries.size();
BucketEntry[] bucketEntries = new BucketEntry[size];
// Index updated inside loop if success or if we can't succeed. We retry if cache is full
// when we go to add an entry by going around the loop again without upping the index.
int index = 0;
while (cacheEnabled && index < size) {
RAMQueueEntry re = null;
try {
re = entries.get(index);
if (re == null) {
LOG.warn("Couldn't get entry or changed on us; who else is messing with it?");
index++;
continue;
}
BlockCacheKey cacheKey = re.getKey();
if (ramCache.containsKey(cacheKey)) {
blocksByHFile.add(cacheKey);
}
// Reset the position for reuse.
// It should be guaranteed that the data in the metaBuff has been transferred to the
// ioEngine safely. Otherwise, this reuse is problematic. Fortunately, the data is already
// transferred with our current IOEngines. Should take care, when we have new kinds of
// IOEngine in the future.
metaBuff.clear();
BucketEntry bucketEntry =
re.writeToCache(ioEngine, bucketAllocator, realCacheSize, this::createRecycler, metaBuff);
// Successfully added. Up index and add bucketEntry. Clear io exceptions.
bucketEntries[index] = bucketEntry;
if (ioErrorStartTime > 0) {
ioErrorStartTime = -1;
}
index++;
} catch (BucketAllocatorException fle) {
long currTs = EnvironmentEdgeManager.currentTime();
cacheStats.allocationFailed(); // Record the warning.
if (
allocFailLogPrevTs == 0 || (currTs - allocFailLogPrevTs) > ALLOCATION_FAIL_LOG_TIME_PERIOD
) {
LOG.warn(getAllocationFailWarningMessage(fle, re));
allocFailLogPrevTs = currTs;
}
// Presume can't add. Too big? Move index on. Entry will be cleared from ramCache below.
bucketEntries[index] = null;
index++;
} catch (CacheFullException cfe) {
// Cache full when we tried to add. Try freeing space and then retrying (don't up index)
if (!freeInProgress) {
freeSpace("Full!");
} else {
Thread.sleep(50);
}
} catch (IOException ioex) {
// Hopefully transient. Retry. checkIOErrorIsTolerated disables cache if problem.
LOG.error("Failed writing to bucket cache", ioex);
checkIOErrorIsTolerated();
}
}
// Make sure data pages are written on media before we update maps.
try {
ioEngine.sync();
} catch (IOException ioex) {
LOG.error("Failed syncing IO engine", ioex);
checkIOErrorIsTolerated();
// Since we failed sync, free the blocks in bucket allocator
for (int i = 0; i < entries.size(); ++i) {
BucketEntry bucketEntry = bucketEntries[i];
if (bucketEntry != null) {
bucketAllocator.freeBlock(bucketEntry.offset(), bucketEntry.getLength());
bucketEntries[i] = null;
}
}
}
// Now add to backingMap if successfully added to bucket cache. Remove from ramCache if
// success or error.
for (int i = 0; i < size; ++i) {
BlockCacheKey key = entries.get(i).getKey();
// Only add if non-null entry.
if (bucketEntries[i] != null) {
putIntoBackingMap(key, bucketEntries[i]);
if (ioEngine.isPersistent()) {
setCacheInconsistent(true);
}
}
// Always remove from ramCache even if we failed adding it to the block cache above.
boolean existed = ramCache.remove(key, re -> {
if (re != null) {
heapSize.add(-1 * re.getData().heapSize());
}
});
if (!existed && bucketEntries[i] != null) {
// Block should have already been evicted. Remove it and free space.
final BucketEntry bucketEntry = bucketEntries[i];
bucketEntry.withWriteLock(offsetLock, () -> {
if (backingMap.remove(key, bucketEntry)) {
blockEvicted(key, bucketEntry, false, false);
}
return null;
});
}
}
long used = bucketAllocator.getUsedSize();
if (used > acceptableSize()) {
freeSpace("Used=" + used + " > acceptable=" + acceptableSize());
}
return;
} | 3.68 |
hadoop_BalanceProcedure_isSchedulerShutdown | /**
* The active flag.
*/
protected boolean isSchedulerShutdown() {
return job.isSchedulerShutdown();
} | 3.68 |
hadoop_AMRMProxyService_getApplicationAttemptId | /**
* Gets the application attempt identifier.
*
* @return the application attempt identifier
*/
public synchronized ApplicationAttemptId getApplicationAttemptId() {
return applicationAttemptId;
} | 3.68 |
framework_VScrollTable_isFocusable | /**
* Can the Table be focused?
*
* @return True if the table can be focused, else false
*/
public boolean isFocusable() {
if (scrollBody != null && enabled) {
return !(!hasHorizontalScrollbar() && !hasVerticalScrollbar()
&& !isSelectable());
}
return false;
} | 3.68 |
framework_RowReference_getRow | /**
* Gets the row data object.
*
* @return the row object
*/
public T getRow() {
return row;
} | 3.68 |
hudi_ClusteringTask_newBuilder | /**
* Utility to create builder for {@link ClusteringTask}.
*
* @return Builder for {@link ClusteringTask}.
*/
public static Builder newBuilder() {
return new Builder();
} | 3.68 |
flink_ClusterEntrypointUtils_parseParametersOrExit | /**
* Parses passed String array using the parameter definitions of the passed {@code
* ParserResultFactory}. The method will call {@code System.exit} and print the usage
* information to stdout in case of a parsing error.
*
* @param args The String array that shall be parsed.
* @param parserResultFactory The {@code ParserResultFactory} that collects the parameter
* parsing instructions.
* @param mainClass The main class initiating the parameter parsing.
* @param <T> The parsing result type.
* @return The parsing result.
*/
public static <T> T parseParametersOrExit(
String[] args, ParserResultFactory<T> parserResultFactory, Class<?> mainClass) {
final CommandLineParser<T> commandLineParser = new CommandLineParser<>(parserResultFactory);
try {
return commandLineParser.parse(args);
} catch (Exception e) {
LOG.error("Could not parse command line arguments {}.", args, e);
commandLineParser.printHelp(mainClass.getSimpleName());
System.exit(ClusterEntrypoint.STARTUP_FAILURE_RETURN_CODE);
}
return null;
} | 3.68 |
shardingsphere-elasticjob_JobScheduler_shutdown | /**
* Shutdown job.
*/
public void shutdown() {
setUpFacade.tearDown();
schedulerFacade.shutdownInstance();
jobExecutor.shutdown();
} | 3.68 |
AreaShop_GithubUpdateCheck_withVersionComparator | /**
* Change the version comparator.
* @param versionComparator VersionComparator to use for checking if one version is newer than the other
* @return this
*/
public GithubUpdateCheck withVersionComparator(VersionComparator versionComparator) {
this.versionComparator = versionComparator;
return this;
} | 3.68 |
hbase_SnapshotManager_cloneSnapshot | /**
* Clone the specified snapshot into a new table. The operation will fail if the destination table
* has a snapshot or restore in progress.
* @param snapshot Snapshot Descriptor
* @param tableDescriptor Table Descriptor of the table to create
* @param nonceKey unique identifier to prevent duplicated RPC
* @return procId the ID of the clone snapshot procedure
*/
synchronized long cloneSnapshot(final SnapshotDescription snapshot,
final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl,
final String customSFT) throws HBaseSnapshotException {
TableName tableName = tableDescriptor.getTableName();
// make sure we aren't running a snapshot on the same table
if (isTableTakingAnySnapshot(tableName)) {
throw new RestoreSnapshotException("Snapshot in progress on the restore table=" + tableName);
}
// make sure we aren't running a restore on the same table
if (isRestoringTable(tableName)) {
throw new RestoreSnapshotException("Restore already in progress on the table=" + tableName);
}
try {
long procId = master.getMasterProcedureExecutor().submitProcedure(
new CloneSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(),
tableDescriptor, snapshot, restoreAcl, customSFT),
nonceKey);
this.restoreTableToProcIdMap.put(tableName, procId);
return procId;
} catch (Exception e) {
String msg = "Couldn't clone the snapshot="
+ ClientSnapshotDescriptionUtils.toString(snapshot) + " on table=" + tableName;
LOG.error(msg, e);
throw new RestoreSnapshotException(msg, e);
}
} | 3.68 |
hadoop_HdfsFileStatus_fileId | /**
* Set the fileId for this entity (default = -1).
* @param fileId FileId
* @return This Builder instance
*/
public Builder fileId(long fileId) {
this.fileId = fileId;
return this;
} | 3.68 |
hadoop_BoundedByteArrayOutputStream_reset | /** Reset the buffer */
public void reset() {
this.limit = buffer.length - startOffset;
this.currentPointer = startOffset;
} | 3.68 |
flink_DynamicProcessingTimeSessionWindows_mergeWindows | /** Merge overlapping {@link TimeWindow}s. */
@Override
public void mergeWindows(Collection<TimeWindow> windows, MergeCallback<TimeWindow> c) {
TimeWindow.mergeWindows(windows, c);
} | 3.68 |
hbase_MetricsSource_setAgeOfLastShippedOpByTable | /**
* Set the age of the last edit that was shipped group by table
* @param timestamp write time of the edit
* @param tableName String as group and tableName
*/
public void setAgeOfLastShippedOpByTable(long timestamp, String tableName) {
long age = EnvironmentEdgeManager.currentTime() - timestamp;
this.getSingleSourceSourceByTable()
.computeIfAbsent(tableName, t -> CompatibilitySingletonFactory
.getInstance(MetricsReplicationSourceFactory.class).getTableSource(t))
.setLastShippedAge(age);
} | 3.68 |
flink_StateMachineExample_main | /**
* Main entry point for the program.
*
* @param args The command line arguments.
*/
public static void main(String[] args) throws Exception {
// ---- print some usage help ----
System.out.println(
"Usage with built-in data generator: StateMachineExample [--error-rate <probability-of-invalid-transition>] [--sleep <sleep-per-record-in-ms> | --rps <records-per-second>]");
System.out.println(
"Usage with Kafka: StateMachineExample --kafka-topic <topic> [--brokers <brokers>]");
System.out.println("Options for both the above setups: ");
System.out.println("\t[--backend <hashmap|rocks>]");
System.out.println("\t[--checkpoint-dir <filepath>]");
System.out.println("\t[--incremental-checkpoints <true|false>]");
System.out.println("\t[--output <filepath> OR null for stdout]");
System.out.println();
// ---- determine whether to use the built-in source, or read from Kafka ----
final DataStream<Event> events;
final ParameterTool params = ParameterTool.fromArgs(args);
// create the environment to create streams and configure execution
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.enableCheckpointing(2000L);
final String stateBackend = params.get("backend", "memory");
if ("hashmap".equals(stateBackend)) {
final String checkpointDir = params.get("checkpoint-dir");
env.setStateBackend(new HashMapStateBackend());
env.getCheckpointConfig().setCheckpointStorage(checkpointDir);
} else if ("rocks".equals(stateBackend)) {
final String checkpointDir = params.get("checkpoint-dir");
boolean incrementalCheckpoints = params.getBoolean("incremental-checkpoints", false);
env.setStateBackend(new EmbeddedRocksDBStateBackend(incrementalCheckpoints));
env.getCheckpointConfig().setCheckpointStorage(checkpointDir);
}
if (params.has("kafka-topic")) {
// set up the Kafka reader
String kafkaTopic = params.get("kafka-topic");
String brokers = params.get("brokers", "localhost:9092");
System.out.printf("Reading from kafka topic %s @ %s\n", kafkaTopic, brokers);
System.out.println();
KafkaSource<Event> source =
KafkaSource.<Event>builder()
.setBootstrapServers(brokers)
.setGroupId("stateMachineExample")
.setTopics(kafkaTopic)
.setDeserializer(
KafkaRecordDeserializationSchema.valueOnly(
new EventDeSerializationSchema()))
.setStartingOffsets(OffsetsInitializer.latest())
.build();
events =
env.fromSource(
source, WatermarkStrategy.noWatermarks(), "StateMachineExampleSource");
} else {
final double errorRate = params.getDouble("error-rate", 0.0);
final int sleep = params.getInt("sleep", 1);
final double recordsPerSecond =
params.getDouble("rps", rpsFromSleep(sleep, env.getParallelism()));
System.out.printf(
"Using standalone source with error rate %f and %.1f records per second\n",
errorRate, recordsPerSecond);
System.out.println();
GeneratorFunction<Long, Event> generatorFunction =
new EventsGeneratorFunction(errorRate);
DataGeneratorSource<Event> eventGeneratorSource =
new DataGeneratorSource<>(
generatorFunction,
Long.MAX_VALUE,
RateLimiterStrategy.perSecond(recordsPerSecond),
TypeInformation.of(Event.class));
events =
env.fromSource(
eventGeneratorSource,
WatermarkStrategy.noWatermarks(),
"Events Generator Source");
}
// ---- main program ----
final String outputFile = params.get("output");
// make parameters available in the web interface
env.getConfig().setGlobalJobParameters(params);
DataStream<Alert> alerts =
events
// partition on the address to make sure equal addresses
// end up in the same state machine flatMap function
.keyBy(Event::sourceAddress)
// the function that evaluates the state machine over the sequence of events
.flatMap(new StateMachineMapper());
// output the alerts to std-out
if (outputFile == null) {
alerts.print();
} else {
alerts.sinkTo(
FileSink.<Alert>forRowFormat(
new Path(outputFile), new SimpleStringEncoder<>())
.withRollingPolicy(
DefaultRollingPolicy.builder()
.withMaxPartSize(MemorySize.ofMebiBytes(1))
.withRolloverInterval(Duration.ofSeconds(10))
.build())
.build())
.setParallelism(1)
.name("output");
}
// trigger program execution
env.execute("State machine job");
} | 3.68 |
druid_Base64_byteArrayToAltBase64 | /**
* Translates the specified byte array into an "alternate representation" Base64 string. This non-standard variant
* uses an alphabet that does not contain the uppercase alphabetic characters, which makes it suitable for use in
* situations where case-folding occurs.
*/
public static String byteArrayToAltBase64(byte[] a) {
return byteArrayToBase64(a, true);
} | 3.68 |
flink_ConnectionUtils_findAddressUsingStrategy | /**
* Try to find a local address which allows as to connect to the targetAddress using the given
* strategy.
*
* @param strategy Depending on the strategy, the method will enumerate all interfaces, trying
* to connect to the target address
* @param targetAddress The address we try to connect to
* @param logging Boolean indicating the logging verbosity
* @return null if we could not find an address using this strategy, otherwise, the local
* address.
* @throws IOException
*/
private static InetAddress findAddressUsingStrategy(
AddressDetectionState strategy, InetSocketAddress targetAddress, boolean logging)
throws IOException {
// try LOCAL_HOST strategy independent of the network interfaces
if (strategy == AddressDetectionState.LOCAL_HOST) {
InetAddress localhostName;
try {
localhostName = InetAddress.getLocalHost();
} catch (UnknownHostException uhe) {
LOG.warn("Could not resolve local hostname to an IP address: {}", uhe.getMessage());
return null;
}
if (tryToConnect(localhostName, targetAddress, strategy.getTimeout(), logging)) {
LOG.debug(
"Using InetAddress.getLocalHost() immediately for the connecting address");
// Here, we are not calling tryLocalHostBeforeReturning() because it is the
// LOCAL_HOST strategy
return localhostName;
} else {
return null;
}
}
final InetAddress address = targetAddress.getAddress();
if (address == null) {
return null;
}
final byte[] targetAddressBytes = address.getAddress();
// for each network interface
Enumeration<NetworkInterface> e = NetworkInterface.getNetworkInterfaces();
while (e.hasMoreElements()) {
NetworkInterface netInterface = e.nextElement();
// for each address of the network interface
Enumeration<InetAddress> ee = netInterface.getInetAddresses();
while (ee.hasMoreElements()) {
InetAddress interfaceAddress = ee.nextElement();
switch (strategy) {
case ADDRESS:
if (hasCommonPrefix(targetAddressBytes, interfaceAddress.getAddress())) {
LOG.debug(
"Target address {} and local address {} share prefix - trying to connect.",
targetAddress,
interfaceAddress);
if (tryToConnect(
interfaceAddress,
targetAddress,
strategy.getTimeout(),
logging)) {
return tryLocalHostBeforeReturning(
interfaceAddress, targetAddress, logging);
}
}
break;
case FAST_CONNECT:
case SLOW_CONNECT:
LOG.debug(
"Trying to connect to {} from local address {} with timeout {}",
targetAddress,
interfaceAddress,
strategy.getTimeout());
if (tryToConnect(
interfaceAddress, targetAddress, strategy.getTimeout(), logging)) {
return tryLocalHostBeforeReturning(
interfaceAddress, targetAddress, logging);
}
break;
case HEURISTIC:
if (LOG.isDebugEnabled()) {
LOG.debug(
"Choosing InetAddress.getLocalHost() address as a heuristic.");
}
return InetAddress.getLocalHost();
default:
throw new RuntimeException("Unsupported strategy: " + strategy);
}
} // end for each address of the interface
} // end for each interface
return null;
} | 3.68 |
hadoop_PeriodicService_getIntervalMs | /**
* Get the interval for the periodic service.
*
* @return Interval in milliseconds.
*/
protected long getIntervalMs() {
return this.intervalMs;
} | 3.68 |
dubbo_StringUtils_isEmpty | /**
* is empty string.
*
* @param str source string.
* @return is empty.
*/
public static boolean isEmpty(String str) {
return str == null || str.isEmpty();
} | 3.68 |
hadoop_TypedBytesInput_readRawDouble | /**
* Reads the raw bytes following a <code>Type.DOUBLE</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawDouble() throws IOException {
byte[] bytes = new byte[9];
bytes[0] = (byte) Type.DOUBLE.code;
in.readFully(bytes, 1, 8);
return bytes;
} | 3.68 |
hbase_ReplicationSourceLogQueue_getNumQueues | /**
* Returns number of queues.
*/
public int getNumQueues() {
return queues.size();
} | 3.68 |
rocketmq-connect_ColumnDefinition_precision | /**
* Get the column's specified column size. For numeric data, this is the maximum precision. For
* character data, this is the length in characters. For datetime datatypes, this is the length in
* characters of the String representation (assuming the maximum allowed precision of the
* fractional seconds component). For binary data, this is the length in bytes. For the ROWID
* datatype, this is the length in bytes. 0 is returned for data types where the column size is
* not applicable.
*
* @return precision
*/
public int precision() {
return precision;
} | 3.68 |
pulsar_MultiMessageIdImpl_toByteArray | // TODO: Add support for Serialization and Deserialization
// https://github.com/apache/pulsar/issues/4940
@Override
public byte[] toByteArray() {
throw new UnsupportedOperationException();
} | 3.68 |
framework_VColorPickerGrid_getSelectedY | /**
* Returns currently selected y-coordinate of the grid.
*
* @return the selected y-coordinate
*/
public int getSelectedY() {
return selectedY;
} | 3.68 |
framework_AbstractTextFieldConnector_getValueChangeHandler | /**
* Returns the internal value change handler.
*
* @return the value change handler
*/
protected ValueChangeHandler getValueChangeHandler() {
return valueChangeHandler;
} | 3.68 |
framework_DefaultEditorEventHandler_handleCloseEvent | /**
* Closes the editor if the received event is a close event. The default
* implementation uses {@link #isCloseEvent(EditorDomEvent) isCloseEvent}.
*
* @param event
* the received event
* @return true if this method handled the event and nothing else should be
* done, false otherwise
*/
protected boolean handleCloseEvent(EditorDomEvent<T> event) {
if (isCloseEvent(event)) {
event.getEditor().cancel();
FocusUtil.setFocus(event.getGrid(), true);
return true;
}
return false;
} | 3.68 |
framework_AbstractTextField_setMaxLength | /**
* Sets the maximum number of characters in the field. Value -1 is
* considered unlimited. Terminal may however have some technical limits.
*
* @param maxLength
* the maxLength to set
*/
public void setMaxLength(int maxLength) {
getState().maxLength = maxLength;
} | 3.68 |
hbase_OrderedBytes_isFixedInt32 | /**
* Return true when the next encoded value in {@code src} uses fixed-width Int32 encoding, false
* otherwise.
*/
public static boolean isFixedInt32(PositionedByteRange src) {
return FIXED_INT32
== (-1 == Integer.signum(src.peek()) ? DESCENDING : ASCENDING).apply(src.peek());
} | 3.68 |
hbase_TableHFileArchiveTracker_checkEnabledAndUpdate | /**
* Sets the watch on the top-level archive znode, and then updates the monitor with the current
* tables that should be archived (and ensures that those nodes are watched as well).
*/
private void checkEnabledAndUpdate() {
try {
if (ZKUtil.watchAndCheckExists(watcher, archiveHFileZNode)) {
LOG.debug(archiveHFileZNode + " znode does exist, checking for tables to archive");
// update the tables we should backup, to get the most recent state.
// This is safer than also watching for children and then hoping we get
// all the updates as it makes sure we get and watch all the children
updateWatchedTables();
} else {
LOG.debug("Archiving not currently enabled, waiting");
}
} catch (KeeperException e) {
LOG.warn("Failed to watch for archiving znode", e);
}
} | 3.68 |
hadoop_BlockManagerParameters_getTrackerFactory | /**
* @return The duration tracker with statistics to update.
*/
public DurationTrackerFactory getTrackerFactory() {
return trackerFactory;
} | 3.68 |
hbase_SnapshotManager_getCompletedSnapshots | /**
* Gets the list of all completed snapshots.
* @param snapshotDir snapshot directory
* @param withCpCall Whether to call CP hooks
* @return list of SnapshotDescriptions
* @throws IOException File system exception
*/
private List<SnapshotDescription> getCompletedSnapshots(Path snapshotDir, boolean withCpCall)
throws IOException {
List<SnapshotDescription> snapshotDescs = new ArrayList<>();
// first create the snapshot root path and check to see if it exists
FileSystem fs = master.getMasterFileSystem().getFileSystem();
if (snapshotDir == null) snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
// if there are no snapshots, return an empty list
if (!fs.exists(snapshotDir)) {
return snapshotDescs;
}
// ignore all the snapshots in progress
FileStatus[] snapshots = fs.listStatus(snapshotDir,
new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
withCpCall = withCpCall && cpHost != null;
// loop through all the completed snapshots
for (FileStatus snapshot : snapshots) {
Path info = new Path(snapshot.getPath(), SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
// if the snapshot is bad
if (!fs.exists(info)) {
LOG.error("Snapshot information for " + snapshot.getPath() + " doesn't exist");
continue;
}
FSDataInputStream in = null;
try {
in = fs.open(info);
SnapshotDescription desc = SnapshotDescription.parseFrom(in);
org.apache.hadoop.hbase.client.SnapshotDescription descPOJO =
(withCpCall) ? ProtobufUtil.createSnapshotDesc(desc) : null;
if (withCpCall) {
try {
cpHost.preListSnapshot(descPOJO);
} catch (AccessDeniedException e) {
LOG.warn("Current user does not have access to " + desc.getName() + " snapshot. "
+ "Either you should be owner of this snapshot or admin user.");
// Skip this and try for next snapshot
continue;
}
}
snapshotDescs.add(desc);
// call coproc post hook
if (withCpCall) {
cpHost.postListSnapshot(descPOJO);
}
} catch (IOException e) {
LOG.warn("Found a corrupted snapshot " + snapshot.getPath(), e);
} finally {
if (in != null) {
in.close();
}
}
}
return snapshotDescs;
} | 3.68 |
querydsl_Projections_map | /**
* Create a Map typed projection for the given expressions
*
* <p>Example</p>
* <pre>{@code
* Map<Expression<?>, ?> map = query.select(
* Projections.map(user.firstName, user.lastName));
* }</pre>
*
* @param exprs arguments for the projection
* @return factory expression
*/
public static QMap map(Expression<?>... exprs) {
return new QMap(exprs);
} | 3.68 |
hadoop_QueueCapacityConfigParser_uniformParser | /**
* A parser method that is usable on uniform capacity values e.g. percentage or
* weight.
* @param matcher a regex matcher that contains parsed value and its possible
* suffix
* @return a parsed capacity vector
*/
private QueueCapacityVector uniformParser(Matcher matcher) {
ResourceUnitCapacityType capacityType = null;
String value = matcher.group(1);
if (matcher.groupCount() == 2) {
String matchedSuffix = matcher.group(2);
for (ResourceUnitCapacityType suffix : ResourceUnitCapacityType.values()) {
// Absolute uniform syntax is not supported
if (suffix.equals(ResourceUnitCapacityType.ABSOLUTE)) {
continue;
}
// when capacity is given in percentage, we do not need % symbol
String uniformSuffix = suffix.getPostfix().replaceAll("%", "");
if (uniformSuffix.equals(matchedSuffix)) {
capacityType = suffix;
}
}
}
if (capacityType == null) {
return new QueueCapacityVector();
}
return QueueCapacityVector.of(Float.parseFloat(value), capacityType);
} | 3.68 |
hudi_AbstractTableFileSystemView_isBaseFileDueToPendingClustering | /**
* With async clustering, it is possible to see partial/complete base-files due to inflight-clustering, Ignore those
* base-files.
*
* @param baseFile base File
*/
protected boolean isBaseFileDueToPendingClustering(HoodieBaseFile baseFile) {
List<String> pendingReplaceInstants =
metaClient.getActiveTimeline().filterPendingReplaceTimeline().getInstantsAsStream().map(HoodieInstant::getTimestamp).collect(Collectors.toList());
return !pendingReplaceInstants.isEmpty() && pendingReplaceInstants.contains(baseFile.getCommitTime());
} | 3.68 |
hbase_SnapshotInfo_getSharedStoreFilePercentage | /** Returns the percentage of the shared store files */
public float getSharedStoreFilePercentage() {
return ((float) hfilesSize.get() / (getStoreFilesSize())) * 100;
} | 3.68 |
hbase_CompareFilter_convert | /** Returns A pb instance to represent this instance. */
FilterProtos.CompareFilter convert() {
FilterProtos.CompareFilter.Builder builder = FilterProtos.CompareFilter.newBuilder();
HBaseProtos.CompareType compareOp = CompareType.valueOf(this.op.name());
builder.setCompareOp(compareOp);
if (this.comparator != null) builder.setComparator(ProtobufUtil.toComparator(this.comparator));
return builder.build();
} | 3.68 |
flink_InMemoryPartition_getPartitionNumber | /**
* Gets the partition number of this partition.
*
* @return This partition's number.
*/
public int getPartitionNumber() {
return this.partitionNumber;
} | 3.68 |
hadoop_BoundDTExtension_getCanonicalServiceName | /**
* Get the canonical service name, which will be
* returned by {@code FileSystem.getCanonicalServiceName()} and so used to
* map the issued DT in credentials, including credential files collected
* for job submission.
*
* If null is returned: fall back to the default filesystem logic.
*
* Only invoked on {@link CustomDelegationTokenManager} instances.
* @return the service name to be returned by the filesystem.
*/
default String getCanonicalServiceName() {
return null;
} | 3.68 |
hadoop_FSDataOutputStreamBuilder_progress | /**
* Set the facility of reporting progress.
*
* @param prog progress.
* @return B Generics Type.
*/
public B progress(@Nonnull final Progressable prog) {
checkNotNull(prog);
progress = prog;
return getThisBuilder();
} | 3.68 |
hadoop_BalanceJob_getError | /**
* Return the error exception during the job execution. This should be called
* after the job finishes.
*/
public Exception getError() {
return error;
} | 3.68 |
hadoop_ShortWritable_toString | /** Short values in string format */
@Override
public String toString() {
return Short.toString(value);
} | 3.68 |
hbase_LogLevel_sendLogLevelRequest | /**
* Send HTTP request to the daemon.
* @throws HadoopIllegalArgumentException if arguments are invalid.
* @throws Exception if unable to connect
*/
private void sendLogLevelRequest() throws HadoopIllegalArgumentException, Exception {
switch (operation) {
case GETLEVEL:
doGetLevel();
break;
case SETLEVEL:
doSetLevel();
break;
default:
throw new HadoopIllegalArgumentException("Expect either -getlevel or -setlevel");
}
} | 3.68 |
hbase_TableHFileArchiveTracker_stop | /**
* Stop this tracker and the passed zookeeper
*/
public void stop() {
if (this.stopped) {
return;
}
this.stopped = true;
this.watcher.close();
} | 3.68 |
flink_SupportsRowLevelDelete_requiredColumns | /**
* The required columns by the sink to perform row-level delete. The rows consumed by sink
* will contain the required columns in order. If return Optional.empty(), it will contain
* all columns.
*/
default Optional<List<Column>> requiredColumns() {
return Optional.empty();
} | 3.68 |
hbase_ZKConfig_transformClusterKey | /**
* Separate the given key into the three configurations it should contain: hbase.zookeeper.quorum,
* hbase.zookeeper.client.port and zookeeper.znode.parent
* @return the three configuration in the described order
*/
public static ZKClusterKey transformClusterKey(String key) throws IOException {
List<String> parts = Splitter.on(':').splitToList(key);
String[] partsArray = parts.toArray(new String[parts.size()]);
if (partsArray.length == 3) {
if (!partsArray[2].matches("/.*[^/]")) {
throw new IOException("Cluster key passed " + key + " is invalid, the format should be:"
+ HConstants.ZOOKEEPER_QUORUM + ":" + HConstants.ZOOKEEPER_CLIENT_PORT + ":"
+ HConstants.ZOOKEEPER_ZNODE_PARENT);
}
return new ZKClusterKey(partsArray[0], Integer.parseInt(partsArray[1]), partsArray[2]);
}
if (partsArray.length > 3) {
// The quorum could contain client port in server:clientport format, try to transform more.
String zNodeParent = partsArray[partsArray.length - 1];
if (!zNodeParent.matches("/.*[^/]")) {
throw new IOException("Cluster key passed " + key + " is invalid, the format should be:"
+ HConstants.ZOOKEEPER_QUORUM + ":" + HConstants.ZOOKEEPER_CLIENT_PORT + ":"
+ HConstants.ZOOKEEPER_ZNODE_PARENT);
}
String clientPort = partsArray[partsArray.length - 2];
// The first part length is the total length minus the lengths of other parts and minus 2 ":"
int endQuorumIndex = key.length() - zNodeParent.length() - clientPort.length() - 2;
String quorumStringInput = key.substring(0, endQuorumIndex);
String[] serverHosts = quorumStringInput.split(",");
// The common case is that every server has its own client port specified - this means
// that (total parts - the ZNodeParent part - the ClientPort part) is equal to
// (the number of "," + 1) - "+ 1" because the last server has no ",".
if ((partsArray.length - 2) == (serverHosts.length + 1)) {
return new ZKClusterKey(quorumStringInput, Integer.parseInt(clientPort), zNodeParent);
}
// For the uncommon case that some servers has no port specified, we need to build the
// server:clientport list using default client port for servers without specified port.
return new ZKClusterKey(buildZKQuorumServerString(serverHosts, clientPort),
Integer.parseInt(clientPort), zNodeParent);
}
throw new IOException("Cluster key passed " + key + " is invalid, the format should be:"
+ HConstants.ZOOKEEPER_QUORUM + ":" + HConstants.ZOOKEEPER_CLIENT_PORT + ":"
+ HConstants.ZOOKEEPER_ZNODE_PARENT);
} | 3.68 |
hadoop_MappingRuleActionBase_setFallbackReject | /**
* Sets the fallback method to reject, if the action cannot be executed the
* application will get rejected.
* @return MappingRuleAction The same object for method chaining.
*/
public MappingRuleAction setFallbackReject() {
fallback = MappingRuleResult.createRejectResult();
return this;
} | 3.68 |
hbase_TableDescriptor_matchReplicationScope | /**
* Check if the table's cfs' replication scope matched with the replication state
* @param enabled replication state
* @return true if matched, otherwise false
*/
default boolean matchReplicationScope(boolean enabled) {
boolean hasEnabled = false;
boolean hasDisabled = false;
for (ColumnFamilyDescriptor cf : getColumnFamilies()) {
if (cf.getScope() != HConstants.REPLICATION_SCOPE_GLOBAL) {
hasDisabled = true;
} else {
hasEnabled = true;
}
}
if (hasEnabled && hasDisabled) {
return false;
}
if (hasEnabled) {
return enabled;
}
return !enabled;
} | 3.68 |
rocketmq-connect_AbstractConfigManagementService_pauseConnector | /**
* pause connector
*
* @param connectorName
*/
@Override
public void pauseConnector(String connectorName) {
if (!connectorKeyValueStore.containsKey(connectorName)) {
throw new ConnectException("Connector [" + connectorName + "] does not exist");
}
Struct connectTargetState = new Struct(TARGET_STATE_V0);
connectTargetState.put(FIELD_STATE, TargetState.PAUSED.name());
connectTargetState.put(FIELD_EPOCH, System.currentTimeMillis());
byte[] serializedTargetState = converter.fromConnectData(topic, TARGET_STATE_V0, connectTargetState);
log.debug("Writing target state {} for connector {}", TargetState.PAUSED.name(), connectorName);
notify(TARGET_STATE_KEY(connectorName), serializedTargetState);
} | 3.68 |
morf_DrawIOGraphPrinter_equals | /**
* Based on label only.
*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
BasicNode other = (BasicNode) obj;
if (label == null) {
if (other.label != null) return false;
} else if (!label.equals(other.label)) return false;
return true;
} | 3.68 |
hmily_HmilyLockCacheManager_cacheHmilyLock | /**
* Cache hmily lock.
*
* @param lockId lock id
* @param hmilyLock the hmily lock
*/
public void cacheHmilyLock(final String lockId, final HmilyLock hmilyLock) {
loadingCache.put(lockId, Optional.of(hmilyLock));
} | 3.68 |
flink_FieldParser_setErrorState | /**
* Sets the error state of the parser. Called by subclasses of the parser to set the type of
* error when failing a parse.
*
* @param error The error state to set.
*/
protected void setErrorState(ParseErrorState error) {
this.errorState = error;
} | 3.68 |
morf_ColumnBean_getName | /**
* @return the name
*/
@Override
public String getName() {
return name;
} | 3.68 |
flink_SegmentsUtil_bitUnSet | /**
* unset bit from segments.
*
* @param segments target segments.
* @param baseOffset bits base offset.
* @param index bit index from base offset.
*/
public static void bitUnSet(MemorySegment[] segments, int baseOffset, int index) {
if (segments.length == 1) {
MemorySegment segment = segments[0];
int offset = baseOffset + byteIndex(index);
byte current = segment.get(offset);
current &= ~(1 << (index & BIT_BYTE_INDEX_MASK));
segment.put(offset, current);
} else {
bitUnSetMultiSegments(segments, baseOffset, index);
}
} | 3.68 |
hudi_OptionsResolver_isReadByTxnCompletionTime | /**
* Returns whether to read the instants using completion time.
*
* <p>A Hudi instant contains both the txn start time and completion time, for incremental subscription
* of the source reader, using completion time to filter the candidate instants can avoid data loss
* in scenarios like multiple writers.
*/
public static boolean isReadByTxnCompletionTime(Configuration conf) {
HollowCommitHandling handlingMode = HollowCommitHandling.valueOf(conf
.getString(INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT.key(), INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT.defaultValue()));
return handlingMode == HollowCommitHandling.USE_TRANSITION_TIME;
} | 3.68 |
framework_VFilterSelect_getPreferredHeight | /*
* Gets the preferred height of the menu including pageItemsCount items.
*/
String getPreferredHeight(int pageItemsCount) {
if (!currentSuggestions.isEmpty()) {
final int pixels = (getPreferredHeight()
/ currentSuggestions.size()) * pageItemsCount;
return pixels + "px";
} else {
return "";
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.