name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_FlinkSecurityManager_fromConfiguration | /**
* Instantiate FlinkUserSecurityManager from configuration. Return null if no security manager
* check is needed, so that a caller can skip setting security manager avoiding runtime check
* cost, if there is no security check set up already. Use {@link #setFromConfiguration} helper,
* which handles disabled case.
*
* @param configuration to instantiate the security manager from
* @return FlinkUserSecurityManager instantiated based on configuration. Return null if
* disabled.
*/
@VisibleForTesting
static FlinkSecurityManager fromConfiguration(Configuration configuration) {
final ClusterOptions.UserSystemExitMode userSystemExitMode =
configuration.get(ClusterOptions.INTERCEPT_USER_SYSTEM_EXIT);
boolean haltOnSystemExit = configuration.get(ClusterOptions.HALT_ON_FATAL_ERROR);
// If no check is needed, return null so that caller can avoid setting security manager not
// to incur any runtime cost.
if (userSystemExitMode == ClusterOptions.UserSystemExitMode.DISABLED && !haltOnSystemExit) {
return null;
}
LOG.info(
"FlinkSecurityManager is created with {} user system exit mode and {} exit",
userSystemExitMode,
haltOnSystemExit ? "forceful" : "graceful");
// Add more configuration parameters that need user security manager (currently only for
// system exit).
return new FlinkSecurityManager(userSystemExitMode, haltOnSystemExit);
} | 3.68 |
hadoop_SchedulerNodeReport_getNumContainers | /**
* @return the number of containers currently running on this node.
*/
public int getNumContainers() {
return num;
} | 3.68 |
hadoop_AbstractSchedulerPlanFollower_cleanupExpiredQueues | /**
* First sets entitlement of queues to zero to prevent new app submission.
* Then move all apps in the set of queues to the parent plan queue's default
* reservation queue if move is enabled. Finally cleanups the queue by killing
* any apps (if move is disabled or move failed) and removing the queue
*
* @param planQueueName the name of {@code PlanQueue}
* @param shouldMove flag to indicate if any running apps should be moved or
* killed
* @param toRemove the remnant apps to clean up
* @param defReservationQueue the default {@code ReservationQueue} of the
* {@link Plan}
*/
protected void cleanupExpiredQueues(String planQueueName, boolean shouldMove,
Set<String> toRemove, String defReservationQueue) {
for (String expiredReservationId : toRemove) {
try {
// reduce entitlement to 0
String expiredReservation =
getReservationQueueName(planQueueName, expiredReservationId);
setQueueEntitlement(planQueueName, expiredReservation, 0.0f, 0.0f);
if (shouldMove) {
moveAppsInQueueSync(expiredReservation, defReservationQueue);
}
List<ApplicationAttemptId> appsInQueue = scheduler.
getAppsInQueue(expiredReservation);
int size = (appsInQueue == null ? 0 : appsInQueue.size());
if (size > 0) {
scheduler.killAllAppsInQueue(expiredReservation);
LOG.info("Killing applications in queue: {}", expiredReservation);
} else {
scheduler.removeQueue(expiredReservation);
LOG.info("Queue: " + expiredReservation + " removed");
}
} catch (YarnException e) {
LOG.warn("Exception while trying to expire reservation: {}",
expiredReservationId, e);
}
}
} | 3.68 |
hadoop_StartupProgress_isComplete | /**
* Returns true if the given startup phase has been completed.
*
* @param phase Which phase to check for completion
* @return boolean true if the given startup phase has completed.
*/
private boolean isComplete(Phase phase) {
return getStatus(phase) == Status.COMPLETE;
} | 3.68 |
morf_ResultSetIterator_advanceResultSet | /**
* Advances the underlying result set.
*/
private void advanceResultSet() {
try {
hasNext = this.resultSet.next();
if (hasNext) {
nextRecord = sqlDialect.resultSetToRecord(resultSet, sortedMetadata);
} else {
close();
}
} catch (SQLException e) {
throw new RuntimeSqlException("Error advancing result set", e);
}
} | 3.68 |
flink_ClassLoaderUtil_formatURL | /**
* Returns the interpretation of URL in string format.
*
* <p>If the URL is null, it returns '(null)'.
*
* <p>If the URL protocol is file, prepend 'file:' flag before the formatted URL. Otherwise, use
* 'url: ' as the prefix instead.
*
* <p>Also, it checks whether the object that the URL directs to exists or not. If the object
* exists, some additional checks should be performed in order to determine that the object is a
* directory or a valid/invalid jar file. If the object does not exist, a missing flag should be
* appended.
*
* @param url URL that should be formatted
* @return The formatted URL
* @throws IOException When JarFile cannot be closed
*/
public static String formatURL(URL url) throws IOException {
StringBuilder bld = new StringBuilder();
bld.append("\n ");
if (url == null) {
bld.append("(null)");
} else if ("file".equals(url.getProtocol())) {
String filePath = url.getPath();
File fileFile = new File(filePath);
bld.append("file: '").append(filePath).append('\'');
if (fileFile.exists()) {
if (fileFile.isDirectory()) {
bld.append(" (directory)");
} else {
JarFile jar = null;
try {
jar = new JarFile(filePath);
bld.append(" (valid JAR)");
} catch (Exception e) {
bld.append(" (invalid JAR: ").append(e.getMessage()).append(')');
} finally {
if (jar != null) {
jar.close();
}
}
}
} else {
bld.append(" (missing)");
}
} else {
bld.append("url: ").append(url);
}
return bld.toString();
} | 3.68 |
flink_ColumnSummary_getTotalCount | /** The number of all rows in this column including both nulls and non-nulls. */
public long getTotalCount() {
return getNullCount() + getNonNullCount();
} | 3.68 |
flink_Plan_getCachedFiles | /**
* Return the registered cached files.
*
* @return Set of (name, filePath) pairs
*/
public Set<Entry<String, DistributedCacheEntry>> getCachedFiles() {
return this.cacheFile.entrySet();
} | 3.68 |
dubbo_URLStrParser_parseEncodedStr | /**
* @param encodedURLStr : after {@link URL#encode(String)} string
* encodedURLStr after decode format: protocol://username:password@host:port/path?k1=v1&k2=v2
* [protocol://][username:password@][host:port]/[path][?k1=v1&k2=v2]
*/
public static URL parseEncodedStr(String encodedURLStr) {
Map<String, String> parameters = null;
int pathEndIdx = encodedURLStr.toUpperCase().indexOf("%3F"); // '?'
if (pathEndIdx >= 0) {
parameters = parseEncodedParams(encodedURLStr, pathEndIdx + 3);
} else {
pathEndIdx = encodedURLStr.length();
}
// decodedBody format: [protocol://][username:password@][host:port]/[path]
String decodedBody = decodeComponent(encodedURLStr, 0, pathEndIdx, false, DECODE_TEMP_BUF.get());
return parseURLBody(encodedURLStr, decodedBody, parameters);
} | 3.68 |
flink_BinaryRowDataSerializer_copyFromPagesToView | /**
* Copy a binaryRow which stored in paged input view to output view.
*
* @param source source paged input view where the binary row stored
* @param target the target output view.
*/
public void copyFromPagesToView(AbstractPagedInputView source, DataOutputView target)
throws IOException {
checkSkipReadForFixLengthPart(source);
int length = source.readInt();
target.writeInt(length);
target.write(source, length);
} | 3.68 |
hudi_CompactionAdminClient_unscheduleCompactionPlan | /**
* Un-schedules compaction plan. Remove All compaction operation scheduled.
*
* @param compactionInstant Compaction Instant
* @param skipValidation Skip validation step
* @param parallelism Parallelism
* @param dryRun Dry Run
*/
public List<RenameOpResult> unscheduleCompactionPlan(String compactionInstant, boolean skipValidation,
int parallelism, boolean dryRun) throws Exception {
HoodieTableMetaClient metaClient = createMetaClient(false);
// Only if all operations are successfully executed
if (!dryRun) {
// Overwrite compaction request with empty compaction operations
HoodieInstant inflight = new HoodieInstant(State.INFLIGHT, COMPACTION_ACTION, compactionInstant);
Path inflightPath = new Path(metaClient.getMetaPath(), inflight.getFileName());
if (metaClient.getFs().exists(inflightPath)) {
// We need to rollback data-files because of this inflight compaction before unscheduling
throw new IllegalStateException("Please rollback the inflight compaction before unscheduling");
}
// Leave the trace in aux folder but delete from metapath.
// TODO: Add a rollback instant but for compaction
HoodieInstant instant = new HoodieInstant(State.REQUESTED, COMPACTION_ACTION, compactionInstant);
boolean deleted = metaClient.getFs().delete(new Path(metaClient.getMetaPath(), instant.getFileName()), false);
ValidationUtils.checkArgument(deleted, "Unable to delete compaction instant.");
}
return new ArrayList<>();
} | 3.68 |
hbase_ScanDeleteTracker_add | /**
* Add the specified Cell to the list of deletes to check against for this row operation.
* <p>
* This is called when a Delete is encountered.
* @param cell - the delete cell
*/
@Override
public void add(Cell cell) {
long timestamp = cell.getTimestamp();
byte type = cell.getTypeByte();
if (!hasFamilyStamp || timestamp > familyStamp) {
if (type == KeyValue.Type.DeleteFamily.getCode()) {
hasFamilyStamp = true;
familyStamp = timestamp;
return;
} else if (type == KeyValue.Type.DeleteFamilyVersion.getCode()) {
familyVersionStamps.add(timestamp);
return;
}
if (deleteCell != null && type < deleteType) {
// same column, so ignore less specific delete
if (CellUtil.matchingQualifier(cell, deleteCell)) {
return;
}
}
// new column, or more general delete type
deleteCell = cell;
deleteType = type;
deleteTimestamp = timestamp;
}
// missing else is never called.
} | 3.68 |
morf_MergeStatementBuilder_getSelectStatement | /**
* Gets the select statement that selects the values to merge
* into the table.
*
* @return the select statement.
*/
SelectStatement getSelectStatement() {
return selectStatement;
} | 3.68 |
flink_KeyGroupRangeAssignment_assignKeyToParallelOperator | /**
* Assigns the given key to a parallel operator index.
*
* @param key the key to assign
* @param maxParallelism the maximum supported parallelism, aka the number of key-groups.
* @param parallelism the current parallelism of the operator
* @return the index of the parallel operator to which the given key should be routed.
*/
public static int assignKeyToParallelOperator(Object key, int maxParallelism, int parallelism) {
Preconditions.checkNotNull(key, "Assigned key must not be null!");
return computeOperatorIndexForKeyGroup(
maxParallelism, parallelism, assignToKeyGroup(key, maxParallelism));
} | 3.68 |
hudi_SpillableMapUtils_readInternal | /**
* Reads the given file with specific pattern(|crc|timestamp|sizeOfKey|SizeOfValue|key|value|) then
* returns an instance of {@link FileEntry}.
*/
private static FileEntry readInternal(RandomAccessFile file, long valuePosition, int valueLength) throws IOException {
file.seek(valuePosition);
long crc = file.readLong();
long timestamp = file.readLong();
int keySize = file.readInt();
int valueSize = file.readInt();
byte[] key = new byte[keySize];
file.readFully(key, 0, keySize);
byte[] value = new byte[valueSize];
if (valueSize != valueLength) {
throw new HoodieCorruptedDataException("unequal size of payload written to external file, data may be corrupted");
}
file.readFully(value, 0, valueSize);
long crcOfReadValue = generateChecksum(value);
if (crc != crcOfReadValue) {
throw new HoodieCorruptedDataException(
"checksum of payload written to external disk does not match, data may be corrupted");
}
return new FileEntry(crc, keySize, valueSize, key, value, timestamp);
} | 3.68 |
framework_Flash_setCodebase | /**
* This attribute specifies the base path used to resolve relative URIs
* specified by the classid, data, and archive attributes. When absent, its
* default value is the base URI of the current document.
*
* @param codebase
* The base path
*/
public void setCodebase(String codebase) {
if (codebase != getState().codebase || (codebase != null
&& !codebase.equals(getState().codebase))) {
getState().codebase = codebase;
requestRepaint();
}
} | 3.68 |
hbase_ProcedureMember_getRpcs | /**
* Package exposed. Not for public use.
* @return reference to the Procedure member's rpcs object
*/
ProcedureMemberRpcs getRpcs() {
return rpcs;
} | 3.68 |
flink_StateUtil_discardStateFuture | /**
* Discards the given state future by first trying to cancel it. If this is not possible, then
* the state object contained in the future is calculated and afterwards discarded.
*
* @param stateFuture to be discarded
* @throws Exception if the discard operation failed
* @return the size of state before cancellation (if available)
*/
public static Tuple2<Long, Long> discardStateFuture(Future<? extends StateObject> stateFuture)
throws Exception {
long stateSize = 0, checkpointedSize = 0;
if (null != stateFuture) {
if (!stateFuture.cancel(true)) {
try {
// We attempt to get a result, in case the future completed before cancellation.
if (stateFuture instanceof RunnableFuture<?> && !stateFuture.isDone()) {
((RunnableFuture<?>) stateFuture).run();
}
StateObject stateObject = stateFuture.get();
if (stateObject != null) {
stateSize = stateObject.getStateSize();
checkpointedSize = getCheckpointedSize(stateObject, stateSize);
stateObject.discardState();
}
} catch (Exception ex) {
LOG.debug(
"Cancelled execution of snapshot future runnable. Cancellation produced the following "
+ "exception, which is expected an can be ignored.",
ex);
}
} else if (stateFuture.isDone()) {
try {
StateObject stateObject = stateFuture.get();
stateSize = stateObject.getStateSize();
checkpointedSize = getCheckpointedSize(stateObject, stateSize);
} catch (Exception e) {
// ignored
}
}
}
return Tuple2.of(stateSize, checkpointedSize);
} | 3.68 |
flink_HiveParserTypeCheckCtx_setOuterRR | /** @param outerRR the outerRR to set */
public void setOuterRR(HiveParserRowResolver outerRR) {
this.outerRR = outerRR;
} | 3.68 |
hbase_WALPrettyPrinter_endPersistentOutput | /**
* ends output of a single, persistent list. at present, only relevant in the case of JSON output.
*/
public void endPersistentOutput() {
if (!persistentOutput) {
return;
}
persistentOutput = false;
if (outputJSON) {
out.print("]");
}
} | 3.68 |
hbase_StoreFileReader_incrementRefCount | /**
* Indicate that the scanner has started reading with this reader. We need to increment the ref
* count so reader is not close until some object is holding the lock
*/
void incrementRefCount() {
storeFileInfo.increaseRefCount();
} | 3.68 |
hadoop_HAServiceTarget_getHealthMonitorProxy | /**
* Returns a proxy to connect to the target HA service for health monitoring.
* If {@link #getHealthMonitorAddress()} is implemented to return a non-null
* address, then this proxy will connect to that address. Otherwise, the
* returned proxy defaults to using {@link #getAddress()}, which means this
* method's behavior is identical to {@link #getProxy(Configuration, int)}.
*
* @param conf configuration.
* @param timeoutMs timeout in milliseconds
* @return a proxy to connect to the target HA service for health monitoring
* @throws IOException if there is an error
*/
public HAServiceProtocol getHealthMonitorProxy(Configuration conf,
int timeoutMs) throws IOException {
return getHealthMonitorProxy(conf, timeoutMs, 1);
} | 3.68 |
hudi_HoodieReaderContext_updateSchemaAndResetOrderingValInMetadata | /**
* Updates the schema and reset the ordering value in existing metadata mapping of a record.
*
* @param meta Metadata in a mapping.
* @param schema New schema to set.
* @return The input metadata mapping.
*/
public Map<String, Object> updateSchemaAndResetOrderingValInMetadata(Map<String, Object> meta,
Schema schema) {
meta.remove(INTERNAL_META_ORDERING_FIELD);
meta.put(INTERNAL_META_SCHEMA, schema);
return meta;
} | 3.68 |
hadoop_ClusterMetrics_getOccupiedMapSlots | /**
* Get number of occupied map slots in the cluster.
*
* @return occupied map slot count
*/
public int getOccupiedMapSlots() {
return occupiedMapSlots;
} | 3.68 |
flink_ExtractionUtils_wrapperToPrimitive | /**
* Converts the specified wrapper class to its corresponding primitive class.
*
* <p>This method is the counter part of {@code primitiveToWrapper()}. If the passed in class is
* a wrapper class for a primitive type, this primitive type will be returned (e.g. {@code
* Integer.TYPE} for {@code Integer.class}). For other classes, or if the parameter is
* <b>null</b>, the return value is <b>null</b>.
*
* @param cls the class to convert, may be <b>null</b>
* @return the corresponding primitive type if {@code cls} is a wrapper class, <b>null</b>
* otherwise
* @see #primitiveToWrapper(Class)
* @since 2.4
*/
public static Class<?> wrapperToPrimitive(final Class<?> cls) {
return wrapperPrimitiveMap.get(cls);
} | 3.68 |
hmily_AggregateBinder_get | /**
* Get t.
*
* @return the t
*/
public T get() {
if (this.supplied == null) {
this.supplied = this.supplier.get();
}
return this.supplied;
} | 3.68 |
flink_HiveParserQBParseInfo_getSortByForClause | /** Get the Sort By AST for the clause. */
public HiveParserASTNode getSortByForClause(String clause) {
return destToSortby.get(clause);
} | 3.68 |
hmily_HmilyParticipantCacheManager_cacheHmilyParticipant | /**
* Cache hmily participant.
*
* @param participantId the participant id
* @param hmilyParticipant the hmily participant
*/
public void cacheHmilyParticipant(final Long participantId, final HmilyParticipant hmilyParticipant) {
List<HmilyParticipant> existHmilyParticipantList = get(participantId);
if (CollectionUtils.isEmpty(existHmilyParticipantList)) {
LOADING_CACHE.put(participantId, Lists.newArrayList(hmilyParticipant));
} else {
existHmilyParticipantList.add(hmilyParticipant);
LOADING_CACHE.put(participantId, existHmilyParticipantList);
}
} | 3.68 |
hbase_CompactSplit_getShortCompactions | /** Returns the shortCompactions thread pool executor */
ThreadPoolExecutor getShortCompactions() {
return shortCompactions;
} | 3.68 |
framework_VTabsheet_tabSizeMightHaveChanged | /**
* This should be triggered from an onload event within the given tab's
* caption to signal that icon contents have finished loading. The contents
* may have changed the tab's width. This might in turn require changes in
* the scroller (hidden tabs might need to be scrolled back into view), or
* even the width of the entire tab sheet if it has been configured to be
* dynamic.
*
* @param tab
* the tab whose size may have changed
*/
public void tabSizeMightHaveChanged(Tab tab) {
// icon onloads may change total width of tabsheet
if (isDynamicWidth()) {
updateDynamicWidth();
}
updateTabScroller();
} | 3.68 |
hbase_KeyValueUtil_create | /**
* Create a KeyValue reading <code>length</code> from <code>in</code>
* @return Created KeyValue OR if we find a length of zero, we will return null which can be
* useful marking a stream as done.
*/
public static KeyValue create(int length, final DataInput in) throws IOException {
if (length <= 0) {
if (length == 0) return null;
throw new IOException("Failed read " + length + " bytes, stream corrupt?");
}
// This is how the old Writables.readFrom used to deserialize. Didn't even
// vint.
byte[] bytes = new byte[length];
in.readFully(bytes);
return new KeyValue(bytes, 0, length);
} | 3.68 |
framework_DefaultEditorEventHandler_handleMoveEvent | /**
* Moves the editor to another row or another column if the received event
* is a move event. The default implementation moves the editor to the
* clicked row if the event is a click; otherwise, if the event is a keydown
* and the keycode is {@link #KEYCODE_MOVE_VERTICAL}, moves the editor one
* row up or down if the shift key is pressed or not, respectively. Keydown
* event with keycode {@link #KEYCODE_MOVE_HORIZONTAL} moves the editor left
* or right if shift key is pressed or not, respectively.
*
* @param event
* the received event
* @return true if this method handled the event and nothing else should be
* done, false otherwise
*/
protected boolean handleMoveEvent(EditorDomEvent<T> event) {
Event e = event.getDomEvent();
final EventCellReference<T> cell = event.getCell();
// TODO: Move on touch events
if (e.getTypeInt() == Event.ONCLICK) {
editRow(event, cell.getRowIndex(), cell.getColumnIndexDOM());
return true;
} else if (e.getTypeInt() == Event.ONKEYDOWN) {
CursorMoveDelta delta = getDeltaFromKeyDownEvent(event);
final boolean changed = delta != null;
if (changed) {
int columnCount = event.getGrid().getVisibleColumns().size();
int colIndex = delta.colDelta > 0
? findNextEditableColumnIndex(event.getGrid(),
event.getFocusedColumnIndex() + delta.colDelta)
: findPrevEditableColumnIndex(event.getGrid(),
event.getFocusedColumnIndex() + delta.colDelta);
int rowIndex = event.getRowIndex();
// Handle row change with horizontal move when column goes out
// of range.
if (delta.rowDelta == 0 && colIndex < 0) {
if (delta.colDelta > 0
&& rowIndex < event.getGrid().getDataSource().size()
- 1) {
delta = CursorMoveDelta.DOWN;
colIndex = findNextEditableColumnIndex(event.getGrid(),
0);
} else if (delta.colDelta < 0 && rowIndex > 0) {
delta = CursorMoveDelta.UP;
colIndex = findPrevEditableColumnIndex(event.getGrid(),
columnCount - 1);
}
}
int newRowIndex = rowIndex + delta.rowDelta;
if (newRowIndex != event.getRowIndex()) {
triggerValueChangeEvent(event);
// disable until validity check is done
setWidgetEnabled(event.getEditorWidget(), false);
event.getEditor().getHandler().checkValidity();
pendingEdit = new PendingEdit(event, newRowIndex, colIndex);
} else {
editRow(event, newRowIndex, colIndex);
}
}
return changed;
}
return false;
} | 3.68 |
hbase_Client_shutdown | /**
* Shut down the client. Close any open persistent connections.
*/
public void shutdown() {
} | 3.68 |
hbase_AbstractMultiFileWriter_init | /**
* Initializes multi-writer before usage.
* @param sourceScanner Optional store scanner to obtain the information about read progress.
* @param factory Factory used to produce individual file writers.
*/
public void init(StoreScanner sourceScanner, WriterFactory factory) {
this.writerFactory = factory;
this.sourceScanner = sourceScanner;
} | 3.68 |
framework_CalendarTest_deleteCalendarEvent | /* Removes the event from the data source and fires change event. */
private void deleteCalendarEvent() {
BasicEvent event = getFormCalendarEvent();
if (dataSource.containsEvent(event)) {
dataSource.removeEvent(event);
}
getUI().removeWindow(scheduleEventPopup);
} | 3.68 |
pulsar_ResourceUnitRanking_compareMessageRateTo | /**
* Compare two loads based on message rate only.
*/
public int compareMessageRateTo(ResourceUnitRanking other) {
return Double.compare(this.estimatedMessageRate, other.estimatedMessageRate);
} | 3.68 |
hadoop_NamenodeStatusReport_getNamenodeId | /**
* Get the namenode identifier.
*
* @return The namenode identifier.
*/
public String getNamenodeId() {
return this.namenodeId;
} | 3.68 |
hadoop_FindOptions_isDepthFirst | /**
* Should directory tree be traversed depth first?
*
* @return true indicate depth first traversal
*/
public boolean isDepthFirst() {
return this.depthFirst;
} | 3.68 |
zxing_ExpandedRow_equals | /**
* Two rows are equal if they contain the same pairs in the same order.
*/
@Override
public boolean equals(Object o) {
if (!(o instanceof ExpandedRow)) {
return false;
}
ExpandedRow that = (ExpandedRow) o;
return this.pairs.equals(that.pairs);
} | 3.68 |
hadoop_ReplayJobFactory_update | /**
* @param item
*/
public void update(Statistics.ClusterStats item) {
} | 3.68 |
hbase_MultiVersionConcurrencyControl_begin | /**
* Start a write transaction. Create a new {@link WriteEntry} with a new write number and add it
* to our queue of ongoing writes. Return this WriteEntry instance. To complete the write
* transaction and wait for it to be visible, call {@link #completeAndWait(WriteEntry)}. If the
* write failed, call {@link #complete(WriteEntry)} so we can clean up AFTER removing ALL trace of
* the failed write transaction.
* <p>
* The {@code action} will be executed under the lock which means it can keep the same order with
* mvcc.
* @see #complete(WriteEntry)
* @see #completeAndWait(WriteEntry)
*/
public WriteEntry begin(Runnable action) {
synchronized (writeQueue) {
long nextWriteNumber = writePoint.incrementAndGet();
WriteEntry e = new WriteEntry(nextWriteNumber);
writeQueue.add(e);
action.run();
return e;
}
} | 3.68 |
flink_BigDecParser_parseField | /**
* Static utility to parse a field of type BigDecimal from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
* @return The parsed value.
* @throws IllegalArgumentException Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final BigDecimal parseField(
byte[] bytes, int startPos, int length, char delimiter) {
if (length <= 0) {
throw new NumberFormatException("Invalid input: Empty string");
}
int i = 0;
final byte delByte = (byte) delimiter;
while (i < length && bytes[startPos + i] != delByte) {
i++;
}
if (i > 0
&& (Character.isWhitespace(bytes[startPos])
|| Character.isWhitespace(bytes[startPos + i - 1]))) {
throw new NumberFormatException(
"There is leading or trailing whitespace in the numeric field.");
}
final char[] chars = new char[i];
for (int j = 0; j < i; j++) {
final byte b = bytes[startPos + j];
if ((b < '0' || b > '9') && b != '-' && b != '+' && b != '.' && b != 'E' && b != 'e') {
throw new NumberFormatException();
}
chars[j] = (char) bytes[startPos + j];
}
return new BigDecimal(chars);
} | 3.68 |
pulsar_LoadSimulationController_handleCopy | // Handle the command line arguments associated with the copy command.
private void handleCopy(final ShellArguments arguments) throws Exception {
final List<String> commandArguments = arguments.commandArguments;
// Copy accepts 3 application arguments: Tenant name, source ZooKeeper and target ZooKeeper connect strings.
if (checkAppArgs(commandArguments.size() - 1, 3)) {
final String tenantName = commandArguments.get(1);
final String sourceZKConnectString = commandArguments.get(2);
final String targetZKConnectString = commandArguments.get(3);
final ZooKeeper sourceZKClient = new ZooKeeper(sourceZKConnectString, 5000, null);
final ZooKeeper targetZKClient = new ZooKeeper(targetZKConnectString, 5000, null);
// Make a map for each thread to speed up the ZooKeeper writing process.
final Map<String, ResourceQuota>[] threadLocalMaps = new Map[clients.length];
for (int i = 0; i < clients.length; ++i) {
threadLocalMaps[i] = new HashMap<>();
}
getResourceQuotas(QUOTA_ROOT, sourceZKClient, threadLocalMaps);
final List<Future> futures = new ArrayList<>(clients.length);
int i = 0;
log.info("Copying...");
for (final Map<String, ResourceQuota> bundleToQuota : threadLocalMaps) {
final int j = i;
futures.add(threadPool.submit(() -> {
for (final Map.Entry<String, ResourceQuota> entry : bundleToQuota.entrySet()) {
final String bundle = entry.getKey();
final ResourceQuota quota = entry.getValue();
// Simulation will send messages in and out at about the same rate, so just make the rate the
// average of in and out.
final int tenantStart = QUOTA_ROOT.length() + 1;
final int clusterStart = bundle.indexOf('/', tenantStart) + 1;
final String sourceTenant = bundle.substring(tenantStart, clusterStart - 1);
final int namespaceStart = bundle.indexOf('/', clusterStart) + 1;
final String sourceCluster = bundle.substring(clusterStart, namespaceStart - 1);
final String namespace = bundle.substring(namespaceStart, bundle.lastIndexOf('/'));
final String keyRangeString = bundle.substring(bundle.lastIndexOf('/') + 1);
// To prevent duplicate node issues for same namespace names in different clusters/tenants.
final String manglePrefix = String.format("%s-%s-%s", sourceCluster, sourceTenant,
keyRangeString);
final String mangledNamespace = String.format("%s-%s", manglePrefix, namespace);
final BundleData bundleData = initializeBundleData(quota, arguments);
final String oldAPITargetPath = String.format(
"/loadbalance/resource-quota/namespace/%s/%s/%s/0x00000000_0xffffffff", tenantName,
cluster, mangledNamespace);
final String newAPITargetPath = String.format(
"%s/%s/%s/%s/0x00000000_0xffffffff", BUNDLE_DATA_BASE_PATH, tenantName, cluster,
mangledNamespace);
try {
ZkUtils.createFullPathOptimistic(targetZKClient, oldAPITargetPath,
ObjectMapperFactory.getMapper().writer().writeValueAsBytes(quota),
ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
} catch (KeeperException.NodeExistsException e) {
// Ignore already created nodes.
} catch (Exception e) {
throw new RuntimeException(e);
}
// Put the bundle data in the new ZooKeeper.
try {
ZkUtils.createFullPathOptimistic(targetZKClient, newAPITargetPath,
ObjectMapperFactory.getMapper().writer().writeValueAsBytes(bundleData),
ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
} catch (KeeperException.NodeExistsException e) {
// Ignore already created nodes.
} catch (Exception e) {
throw new RuntimeException(e);
}
try {
trade(arguments, makeTopic(tenantName, mangledNamespace, "t"), j);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}));
++i;
}
for (final Future future : futures) {
future.get();
}
sourceZKClient.close();
targetZKClient.close();
}
} | 3.68 |
framework_VAbstractSplitPanel_setSplitPosition | /**
* Sets the position of the splitter element.
*
* @param pos
* the required position as either percentage or pixels
*/
public void setSplitPosition(String pos) {
setSplitPosition(pos, true);
} | 3.68 |
framework_VaadinService_getUidlRequestTimeout | /**
* Returns the number of seconds that must pass without a valid UIDL request
* being received for the given session before the session is closed, even
* though heartbeat requests are received. This is a lower bound; it might
* take longer to close an inactive session.
* <p>
* Returns a negative number if there is no timeout. In this case heartbeat
* requests suffice to keep the session alive, but it will still eventually
* expire in the regular manner if there are no requests at all (see
* {@link WrappedSession#getMaxInactiveInterval()}).
*
* @see DeploymentConfiguration#isCloseIdleSessions()
* @see #getHeartbeatTimeout()
*
* @since 7.0.0
*
* @return The UIDL request timeout in seconds, or a negative number if
* timeout never occurs.
*/
private int getUidlRequestTimeout(VaadinSession session) {
return getDeploymentConfiguration().isCloseIdleSessions()
? session.getSession().getMaxInactiveInterval()
: -1;
} | 3.68 |
hadoop_FederationStateStoreFacade_incrementDelegationTokenSeqNum | /**
* stateStore provides DelegationTokenSeqNum increase.
*
* @return delegationTokenSequenceNumber.
*/
public int incrementDelegationTokenSeqNum() {
return stateStore.incrementDelegationTokenSeqNum();
} | 3.68 |
graphhopper_LMPreparationHandler_prepare | /**
* Prepares the landmark data for all given configs
*/
public List<PrepareLandmarks> prepare(List<LMConfig> lmConfigs, BaseGraph baseGraph, EncodingManager encodingManager, StorableProperties properties, LocationIndex locationIndex, final boolean closeEarly) {
List<PrepareLandmarks> preparations = createPreparations(lmConfigs, baseGraph, encodingManager, locationIndex);
List<Runnable> prepareRunnables = new ArrayList<>();
for (int i = 0; i < preparations.size(); i++) {
PrepareLandmarks prepare = preparations.get(i);
final int count = i + 1;
final String name = prepare.getLMConfig().getName();
prepareRunnables.add(() -> {
LOGGER.info(count + "/" + lmConfigs.size() + " calling LM prepare.doWork for " + prepare.getLMConfig().getName() + " ... (" + getMemInfo() + ")");
Thread.currentThread().setName(name);
prepare.doWork();
if (closeEarly)
prepare.close();
LOGGER.info("LM {} finished {}", name, getMemInfo());
properties.put(Landmark.PREPARE + "date." + name, createFormatter().format(new Date()));
});
}
GHUtility.runConcurrently(prepareRunnables.stream(), preparationThreads);
LOGGER.info("Finished LM preparation, {}", getMemInfo());
return preparations;
} | 3.68 |
hadoop_SelectEventStreamPublisher_toRecordsInputStream | /**
* Retrieve an input stream to the subset of the S3 object that matched the select query.
* This is equivalent to loading the content of all RecordsEvents into an InputStream.
* This will lazily-load the content from S3, minimizing the amount of memory used.
* @param onEndEvent callback on the end event
* @return the input stream
*/
public AbortableInputStream toRecordsInputStream(Consumer<EndEvent> onEndEvent) {
SdkPublisher<InputStream> recordInputStreams = this.publisher
.filter(e -> {
if (e instanceof RecordsEvent) {
return true;
} else if (e instanceof EndEvent) {
onEndEvent.accept((EndEvent) e);
}
return false;
})
.map(e -> ((RecordsEvent) e).payload().asInputStream());
// Subscribe to the async publisher using an enumeration that will
// buffer a single chunk (RecordsEvent's payload) at a time and
// block until it is consumed.
// Also inject an empty stream as the first element that
// SequenceInputStream will request on construction.
BlockingEnumeration enumeration =
new BlockingEnumeration(recordInputStreams, 1, EMPTY_STREAM);
return AbortableInputStream.create(
new SequenceInputStream(enumeration),
this::cancel);
} | 3.68 |
hibernate-validator_ConstraintHelper_getConstraintsFromMultiValueConstraint | /**
* Returns the constraints which are part of the given multi-value constraint.
* <p>
* Invoke {@link #isMultiValueConstraint(Class)} prior to calling this method to check whether a given constraint
* actually is a multi-value constraint.
*
* @param multiValueConstraint the multi-value constraint annotation from which to retrieve the contained constraints
* @param <A> the type of the annotation
*
* @return A list of constraint annotations, may be empty but never {@code null}.
*/
public <A extends Annotation> List<Annotation> getConstraintsFromMultiValueConstraint(A multiValueConstraint) {
Annotation[] annotations = run(
GetAnnotationAttribute.action(
multiValueConstraint,
"value",
Annotation[].class
)
);
return Arrays.asList( annotations );
} | 3.68 |
flink_EnvironmentInformation_getVersion | /**
* Returns the version of the code as String.
*
* @return The project version string.
*/
public static String getVersion() {
return getVersionsInstance().projectVersion;
} | 3.68 |
flink_SolutionSetBroker_instance | /** Retrieve the singleton instance. */
public static Broker<Object> instance() {
return INSTANCE;
} | 3.68 |
framework_CellReference_getColumnIndex | /**
* Gets the index of the column.
* <p>
* <em>NOTE:</em> The index includes hidden columns in the count, unlike
* {@link #getColumnIndexDOM()}.
*
* @return the index of the column
*/
public int getColumnIndex() {
return columnIndex;
} | 3.68 |
hudi_MarkerDirState_addMarkerToMap | /**
* Adds a new marker to the in-memory map.
*
* @param fileIndex Marker file index number.
* @param markerName Marker name.
*/
private void addMarkerToMap(int fileIndex, String markerName) {
allMarkers.add(markerName);
StringBuilder stringBuilder = fileMarkersMap.computeIfAbsent(fileIndex, k -> new StringBuilder(16384));
stringBuilder.append(markerName);
stringBuilder.append('\n');
} | 3.68 |
flink_StateAssignmentOperation_createKeyGroupPartitions | /**
* Groups the available set of key groups into key group partitions. A key group partition is
* the set of key groups which is assigned to the same task. Each set of the returned list
* constitutes a key group partition.
*
* <p><b>IMPORTANT</b>: The assignment of key groups to partitions has to be in sync with the
* KeyGroupStreamPartitioner.
*
* @param numberKeyGroups Number of available key groups (indexed from 0 to numberKeyGroups - 1)
* @param parallelism Parallelism to generate the key group partitioning for
* @return List of key group partitions
*/
public static List<KeyGroupRange> createKeyGroupPartitions(
int numberKeyGroups, int parallelism) {
Preconditions.checkArgument(numberKeyGroups >= parallelism);
List<KeyGroupRange> result = new ArrayList<>(parallelism);
for (int i = 0; i < parallelism; ++i) {
result.add(
KeyGroupRangeAssignment.computeKeyGroupRangeForOperatorIndex(
numberKeyGroups, parallelism, i));
}
return result;
} | 3.68 |
querydsl_AbstractGeometryCollectionExpression_numGeometries | /**
* Returns the number of geometries in this GeometryCollection.
*
* @return number of geometries
*/
public NumberExpression<Integer> numGeometries() {
if (numGeometries == null) {
numGeometries = Expressions.numberOperation(Integer.class, SpatialOps.NUM_GEOMETRIES, mixin);
}
return numGeometries;
} | 3.68 |
hmily_MongodbTemplateService_delete | /**
* remove records.
* @param c data type.
* @param conditions where condtions.
* @return line numbers.
*/
public int delete(final Class c, final Criteria conditions) {
return (int) remove(new Query().addCriteria(conditions), c).getDeletedCount();
} | 3.68 |
framework_VCalendarPanel_isShowISOWeekNumbers | /**
* Returns whether ISO 8601 week numbers should be shown in the value
* selector or not. ISO 8601 defines that a week always starts with a Monday
* so the week numbers are only shown if this is the case.
*
* @return true if week number should be shown, false otherwise
*/
public boolean isShowISOWeekNumbers() {
return showISOWeekNumbers;
} | 3.68 |
framework_WindowMoveEvent_getNewY | /**
* Gets the new y position of the window.
*
* @return the new Y position of the VWindow
*/
public int getNewY() {
return newY;
} | 3.68 |
flink_CrossOperator_projectTuple22 | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>
ProjectCross<
I1,
I2,
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>
projectTuple22() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>
tType =
new TupleTypeInfo<
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>(fTypes);
return new ProjectCross<
I1,
I2,
Tuple22<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21>>(
this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.68 |
hadoop_CommitContext_destroyThreadPools | /**
* Destroy any thread pools; wait for that to finish,
* but don't overreact if it doesn't finish in time.
*/
private synchronized void destroyThreadPools() {
try {
IOUtils.cleanupWithLogger(LOG, outerSubmitter, innerSubmitter);
} finally {
outerSubmitter = null;
innerSubmitter = null;
}
} | 3.68 |
hbase_GetUserPermissionsRequest_newBuilder | /**
* Build a get table permission request
* @param tableName the specific table name
* @return a get table permission request builder
*/
public static Builder newBuilder(TableName tableName) {
return new Builder(tableName);
} | 3.68 |
framework_ContainerHierarchicalWrapper_containsId | /*
* Does the container contain the specified Item? Don't add a JavaDoc
* comment here, we use the default documentation from implemented
* interface.
*/
@Override
public boolean containsId(Object itemId) {
return container.containsId(itemId);
} | 3.68 |
framework_Navigator_getViewName | /**
* Get the view name for this provider.
*
* @return view name for this provider
*/
public String getViewName() {
return viewName;
} | 3.68 |
pulsar_Schema_PROTOBUF | /**
* Create a Protobuf schema type with schema definition.
*
* @param schemaDefinition schemaDefinition the definition of the schema
* @return a Schema instance
*/
static <T extends com.google.protobuf.GeneratedMessageV3> Schema<T> PROTOBUF(SchemaDefinition<T> schemaDefinition) {
return DefaultImplementation.getDefaultImplementation().newProtobufSchema(schemaDefinition);
} | 3.68 |
hmily_AbstractHmilyTransactionAspect_interceptTccMethod | /**
* this is around in {@linkplain HmilyTCC }.
*
* @param proceedingJoinPoint proceedingJoinPoint
* @return Object object
* @throws Throwable Throwable
*/
@Around("hmilyInterceptor()")
public Object interceptTccMethod(final ProceedingJoinPoint proceedingJoinPoint) throws Throwable {
return interceptor.invoke(proceedingJoinPoint);
} | 3.68 |
morf_RecordHelper_joinRecordValues | /**
* Joins the values of a record with a specified delimiter and direct string conversion.
* Where a null occurs, the <code>valueForNull</code> string will be used in the
* output.
*
* @param columns The columns from the record which should be joined.
* @param record the record to join
* @param delimiter The delimiter to use.
* @param valueForNull The value to use in the output string instead of {@code null}.
* @return a string representation of the record's values
*/
public static String joinRecordValues(Iterable<Column> columns, Record record, String delimiter, String valueForNull) {
return FluentIterable.from(columns)
.transform(Column::getName)
.transform(record::getString)
.transform(v -> StringUtils.isEmpty(v) ? valueForNull : v)
.join(Joiner.on(delimiter));
} | 3.68 |
framework_ColumnConnector_setTooltipContentMode | /**
* Sets the content mode for tooltips in this column.
*
* @param tooltipContentMode
* the content mode for tooltips
*
* @since 8.2
*/
public void setTooltipContentMode(ContentMode tooltipContentMode) {
this.tooltipContentMode = tooltipContentMode;
} | 3.68 |
flink_FutureCompletingBlockingQueue_getAvailabilityFuture | /**
* Returns the availability future. If the queue is non-empty, then this future will already be
* complete. Otherwise the obtained future is guaranteed to get completed the next time the
* queue becomes non-empty, or a notification happens via {@link #notifyAvailable()}.
*
* <p>It is important that a completed future is no guarantee that the next call to {@link
* #poll()} will return a non-null element. If there are concurrent consumer, another consumer
* may have taken the available element. Or there was no element in the first place, because the
* future was completed through a call to {@link #notifyAvailable()}.
*
* <p>For that reason, it is important to call this method (to obtain a new future) every time
* again after {@link #poll()} returned null and you want to wait for data.
*/
public CompletableFuture<Void> getAvailabilityFuture() {
return currentFuture;
} | 3.68 |
flink_Either_of | /** Creates a right value of {@link Either} */
public static <L, R> Right<L, R> of(R right) {
return new Right<L, R>(right);
} | 3.68 |
hudi_TableSchemaResolver_getTableInternalSchemaFromCommitMetadata | /**
* Gets the InternalSchema for a hoodie table from the HoodieCommitMetadata of the instant.
*
* @return InternalSchema for this table
*/
private Option<InternalSchema> getTableInternalSchemaFromCommitMetadata(HoodieInstant instant) {
try {
HoodieCommitMetadata metadata = getCachedCommitMetadata(instant);
String latestInternalSchemaStr = metadata.getMetadata(SerDeHelper.LATEST_SCHEMA);
if (latestInternalSchemaStr != null) {
return SerDeHelper.fromJson(latestInternalSchemaStr);
} else {
return Option.empty();
}
} catch (Exception e) {
throw new HoodieException("Failed to read schema from commit metadata", e);
}
} | 3.68 |
hbase_ColumnFamilyDescriptorBuilder_setCacheBloomsOnWrite | /**
* Set the setCacheBloomsOnWrite flag.
* @param value true if we should cache bloomfilter blocks on write
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setCacheBloomsOnWrite(boolean value) {
return setValue(CACHE_BLOOMS_ON_WRITE_BYTES, Boolean.toString(value));
} | 3.68 |
dubbo_AbstractMetadataReport_calculateStartTime | /**
* between 2:00 am to 6:00 am, the time is random.
*
* @return
*/
long calculateStartTime() {
Calendar calendar = Calendar.getInstance();
long nowMill = calendar.getTimeInMillis();
calendar.set(Calendar.HOUR_OF_DAY, 0);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
long subtract = calendar.getTimeInMillis() + ONE_DAY_IN_MILLISECONDS - nowMill;
return subtract
+ (FOUR_HOURS_IN_MILLISECONDS / 2)
+ ThreadLocalRandom.current().nextInt(FOUR_HOURS_IN_MILLISECONDS);
} | 3.68 |
framework_LoginForm_removeLoginListener | /**
* Removes a {@link LoginListener}.
*
* @param listener
* the listener to remove
* @deprecated As of 8.0, replaced by {@link Registration#remove()} in the
* registration object returned from
* {@link #addLoginListener(LoginListener)}.
*/
@Deprecated
public void removeLoginListener(LoginListener listener) {
removeListener(LoginEvent.class, listener, ON_LOGIN_METHOD);
} | 3.68 |
hbase_Scan_includeStopRow | /** Returns if we should include stop row when scan */
public boolean includeStopRow() {
return includeStopRow;
} | 3.68 |
hadoop_AssumedRoleCredentialProvider_resolveCredentials | /**
* Get credentials.
* @return the credentials
* @throws StsException if none could be obtained.
*/
@Override
@Retries.RetryRaw
public AwsCredentials resolveCredentials() {
try {
return invoker.retryUntranslated("resolveCredentials",
true,
stsProvider::resolveCredentials);
} catch (IOException e) {
// this is in the signature of retryUntranslated;
// its hard to see how this could be raised, but for
// completeness, it is wrapped as an Amazon Client Exception
// and rethrown.
throw new CredentialInitializationException(
"getCredentials failed: " + e,
e);
} catch (SdkClientException e) {
LOG.error("Failed to resolve credentials for role {}",
arn, e);
throw e;
}
} | 3.68 |
flink_BaseHybridHashTable_getNextBuffer | /**
* Gets the next buffer to be used with the hash-table, either for an in-memory partition, or
* for the table buckets. This method returns <tt>null</tt>, if no more buffer is available.
* Spilling a partition may free new buffers then.
*
* @return The next buffer to be used by the hash-table, or null, if no buffer remains.
*/
public MemorySegment getNextBuffer() {
// check if the pool directly offers memory
MemorySegment segment = this.internalPool.nextSegment();
if (segment != null) {
return segment;
}
// check if there are write behind buffers that actually are to be used for the hash table
if (this.buildSpillRetBufferNumbers > 0) {
// grab at least one, no matter what
MemorySegment toReturn;
try {
toReturn = this.buildSpillReturnBuffers.take();
} catch (InterruptedException iex) {
throw new RuntimeException(
"Hybrid Hash Join was interrupted while taking a buffer.");
}
this.buildSpillRetBufferNumbers--;
// grab as many more buffers as are available directly
returnSpillBuffers();
return toReturn;
} else {
return null;
}
} | 3.68 |
hadoop_NamenodeStatusReport_haStateValid | /**
* If the HA state is valid.
*
* @return If the HA state is valid.
*/
public boolean haStateValid() {
return this.haStateValid;
} | 3.68 |
hbase_FilterBase_toByteArray | /**
* Return length 0 byte array for Filters that don't require special serialization
*/
@Override
public byte[] toByteArray() throws IOException {
return new byte[0];
} | 3.68 |
open-banking-gateway_PathHeadersMapperTemplate_forValidation | /**
* Converts context object into object that can be used for validation.
* @param context Context to convert
* @return Validatable object that can be used with {@link de.adorsys.opba.protocol.xs2a.service.xs2a.validation.Xs2aValidator}
* to check if all necessary parameters are present
*/
public PathHeadersToValidate<P, H> forValidation(C context) {
return new PathHeadersToValidate<>(
toPath.map(context),
toHeaders.map(context)
);
} | 3.68 |
hbase_HbckTableInfo_checkRegionChain | /**
* Check the region chain (from META) of this table. We are looking for holes, overlaps, and
* cycles.
* @return false if there are errors
*/
public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOException {
// When table is disabled no need to check for the region chain. Some of the regions
// accidently if deployed, this below code might report some issues like missing start
// or end regions or region hole in chain and may try to fix which is unwanted.
if (hbck.isTableDisabled(this.tableName)) {
return true;
}
int originalErrorsCount = hbck.getErrors().getErrorList().size();
Multimap<byte[], HbckRegionInfo> regions = sc.calcCoverage();
SortedSet<byte[]> splits = sc.getSplits();
byte[] prevKey = null;
byte[] problemKey = null;
if (splits.isEmpty()) {
// no region for this table
handler.handleHoleInRegionChain(HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
}
for (byte[] key : splits) {
Collection<HbckRegionInfo> ranges = regions.get(key);
if (prevKey == null && !Bytes.equals(key, HConstants.EMPTY_BYTE_ARRAY)) {
for (HbckRegionInfo rng : ranges) {
handler.handleRegionStartKeyNotEmpty(rng);
}
}
// check for degenerate ranges
for (HbckRegionInfo rng : ranges) {
// special endkey case converts '' to null
byte[] endKey = rng.getEndKey();
endKey = (endKey.length == 0) ? null : endKey;
if (Bytes.equals(rng.getStartKey(), endKey)) {
handler.handleDegenerateRegion(rng);
}
}
if (ranges.size() == 1) {
// this split key is ok -- no overlap, not a hole.
if (problemKey != null) {
LOG.warn("reached end of problem group: " + Bytes.toStringBinary(key));
}
problemKey = null; // fell through, no more problem.
} else if (ranges.size() > 1) {
// set the new problem key group name, if already have problem key, just
// keep using it.
if (problemKey == null) {
// only for overlap regions.
LOG.warn("Naming new problem group: " + Bytes.toStringBinary(key));
problemKey = key;
}
overlapGroups.putAll(problemKey, ranges);
// record errors
ArrayList<HbckRegionInfo> subRange = new ArrayList<>(ranges);
// this dumb and n^2 but this shouldn't happen often
for (HbckRegionInfo r1 : ranges) {
if (r1.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
continue;
}
subRange.remove(r1);
for (HbckRegionInfo r2 : subRange) {
if (r2.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
continue;
}
// general case of same start key
if (Bytes.compareTo(r1.getStartKey(), r2.getStartKey()) == 0) {
handler.handleDuplicateStartKeys(r1, r2);
} else if (
Bytes.compareTo(r1.getEndKey(), r2.getStartKey()) == 0
&& r1.getHdfsHRI().getRegionId() == r2.getHdfsHRI().getRegionId()
) {
LOG.info("this is a split, log to splits");
handler.handleSplit(r1, r2);
} else {
// overlap
handler.handleOverlapInRegionChain(r1, r2);
}
}
}
} else if (ranges.isEmpty()) {
if (problemKey != null) {
LOG.warn("reached end of problem group: " + Bytes.toStringBinary(key));
}
problemKey = null;
byte[] holeStopKey = sc.getSplits().higher(key);
// if higher key is null we reached the top.
if (holeStopKey != null) {
// hole
handler.handleHoleInRegionChain(key, holeStopKey);
}
}
prevKey = key;
}
// When the last region of a table is proper and having an empty end key, 'prevKey'
// will be null.
if (prevKey != null) {
handler.handleRegionEndKeyNotEmpty(prevKey);
}
// TODO fold this into the TableIntegrityHandler
if (hbck.getConf().getBoolean("hbasefsck.overlap.merge.parallel", true)) {
boolean ok = handleOverlapsParallel(handler, prevKey);
if (!ok) {
return false;
}
} else {
for (Collection<HbckRegionInfo> overlap : overlapGroups.asMap().values()) {
handler.handleOverlapGroup(overlap);
}
}
if (HBaseFsck.shouldDisplayFullReport()) {
// do full region split map dump
hbck.getErrors().print("---- Table '" + this.tableName + "': region split map");
dump(splits, regions);
hbck.getErrors().print("---- Table '" + this.tableName + "': overlap groups");
dumpOverlapProblems(overlapGroups);
hbck.getErrors().print("There are " + overlapGroups.keySet().size() + " overlap groups with "
+ overlapGroups.size() + " overlapping regions");
}
if (!sidelinedRegions.isEmpty()) {
LOG.warn("Sidelined big overlapped regions, please bulk load them!");
hbck.getErrors()
.print("---- Table '" + this.tableName + "': sidelined big overlapped regions");
dumpSidelinedRegions(sidelinedRegions);
}
return hbck.getErrors().getErrorList().size() == originalErrorsCount;
} | 3.68 |
rocketmq-connect_WorkerSourceTask_initializeAndStart | /**
* initinalize and start
*/
@Override
protected void initializeAndStart() {
try {
producer.start();
} catch (MQClientException e) {
log.error("{} Source task producer start failed!!", this);
throw new ConnectException(e);
}
sourceTask.init(sourceTaskContext);
sourceTask.start(taskConfig);
log.info("{} Source task finished initialization and start", this);
} | 3.68 |
flink_ThreadBase_run | /** Implements exception handling and delegates to go(). */
public void run() {
try {
go();
} catch (Throwable t) {
internalHandleException(
new IOException(
"Thread '"
+ getName()
+ "' terminated due to an exception: "
+ t.getMessage(),
t));
}
} | 3.68 |
framework_Upload_fireUpdateProgress | /**
* Emits the progress event.
*
* @param totalBytes
* bytes received so far
* @param contentLength
* actual size of the file being uploaded, if known
*
*/
protected void fireUpdateProgress(long totalBytes, long contentLength) {
// this is implemented differently than other listeners to maintain
// backwards compatibility
if (progressListeners != null) {
for (ProgressListener l : progressListeners) {
l.updateProgress(totalBytes, contentLength);
}
}
} | 3.68 |
graphhopper_CHStorage_getShortcuts | /**
* The number of shortcuts that were added to this storage
*/
public int getShortcuts() {
return shortcutCount;
} | 3.68 |
hbase_VisibilityController_requireScannerOwner | /**
* Verify, when servicing an RPC, that the caller is the scanner owner. If so, we assume that
* access control is correctly enforced based on the checks performed in preScannerOpen()
*/
private void requireScannerOwner(InternalScanner s) throws AccessDeniedException {
if (!RpcServer.isInRpcCallContext()) return;
String requestUName = RpcServer.getRequestUserName().orElse(null);
String owner = scannerOwners.get(s);
if (authorizationEnabled && owner != null && !owner.equals(requestUName)) {
throw new AccessDeniedException("User '" + requestUName + "' is not the scanner owner!");
}
} | 3.68 |
pulsar_PulsarAdminException_wrap | /**
* Clone the exception and grab the current stacktrace.
* @param e a PulsarAdminException
* @return a new PulsarAdminException, of the same class.
*/
public static PulsarAdminException wrap(PulsarAdminException e) {
PulsarAdminException cloned = e.clone();
if (e.getClass() != cloned.getClass()) {
throw new IllegalStateException("Cloning a " + e.getClass() + " generated a "
+ cloned.getClass() + ", this is a bug, original error is " + e, e);
}
// adding a reference to the original exception.
cloned.addSuppressed(e);
return (PulsarAdminException) cloned.fillInStackTrace();
} | 3.68 |
hudi_HoodieLogBlock_getContentBytes | // Return the bytes representation of the data belonging to a LogBlock
public byte[] getContentBytes() throws IOException {
throw new HoodieException("No implementation was provided");
} | 3.68 |
zxing_CameraManager_requestPreviewFrame | /**
* A single preview frame will be returned to the handler supplied. The data will arrive as byte[]
* in the message.obj field, with width and height encoded as message.arg1 and message.arg2,
* respectively.
*
* @param handler The handler to send the message to.
* @param message The what field of the message to be sent.
*/
public synchronized void requestPreviewFrame(Handler handler, int message) {
OpenCamera theCamera = camera;
if (theCamera != null && previewing) {
previewCallback.setHandler(handler, message);
theCamera.getCamera().setOneShotPreviewCallback(previewCallback);
}
} | 3.68 |
shardingsphere-elasticjob_TriggerNode_getTriggerRoot | /**
* Get trigger root.
*
* @return trigger root
*/
public String getTriggerRoot() {
return ROOT;
} | 3.68 |
framework_AbstractListing_getDataCommunicator | /**
* Returns the data communicator of this listing.
*
* @return the data communicator, not null
*/
public DataCommunicator<T> getDataCommunicator() {
return dataCommunicator;
} | 3.68 |
flink_TableConfig_getMaxGeneratedCodeLength | /**
* Returns the current threshold where generated code will be split into sub-function calls.
* Java has a maximum method length of 64 KB. This setting allows for finer granularity if
* necessary.
*
* <p>Default value is 4000 instead of 64KB as by default JIT refuses to work on methods with
* more than 8K byte code.
*/
public Integer getMaxGeneratedCodeLength() {
return this.configuration.getInteger(TableConfigOptions.MAX_LENGTH_GENERATED_CODE);
} | 3.68 |
hadoop_AbstractS3ACommitter_getOutputPath | /**
* Final path of output, in the destination FS.
* @return the path
*/
@Override
public final Path getOutputPath() {
return outputPath;
} | 3.68 |
hadoop_TypedBytesWritable_getValue | /** Get the typed bytes as a Java object. */
public Object getValue() {
try {
ByteArrayInputStream bais = new ByteArrayInputStream(getBytes());
TypedBytesInput tbi = TypedBytesInput.get(new DataInputStream(bais));
Object obj = tbi.read();
return obj;
} catch (IOException e) {
throw new RuntimeException(e);
}
} | 3.68 |
flink_TableSink_getFieldTypes | /** @deprecated Use the field types of {@link #getTableSchema()} instead. */
@Deprecated
default TypeInformation<?>[] getFieldTypes() {
return null;
} | 3.68 |
flink_ExecutionPlanUtil_getExecutionPlanAsJSON | /** Extracts the execution plan (as JSON) from the given {@link Plan}. */
public static String getExecutionPlanAsJSON(Plan plan) {
checkNotNull(plan);
ExecutionPlanJSONGenerator jsonGenerator = getJSONGenerator();
return jsonGenerator.getExecutionPlan(plan);
} | 3.68 |
framework_TableQuery_fetchMetaData | /**
* Fetches name(s) of primary key column(s) from DB metadata.
*
* Also tries to get the escape string to be used in search strings.
*/
private void fetchMetaData() {
Connection connection = null;
ResultSet rs = null;
ResultSet tables = null;
try {
connection = getConnection();
DatabaseMetaData dbmd = connection.getMetaData();
if (dbmd != null) {
tables = dbmd.getTables(catalogName, schemaName, tableName,
null);
if (!tables.next()) {
String catalog = (catalogName != null)
? catalogName.toUpperCase(Locale.ROOT)
: null;
String schema = (schemaName != null)
? schemaName.toUpperCase(Locale.ROOT)
: null;
tables = dbmd.getTables(catalog, schema,
tableName.toUpperCase(Locale.ROOT), null);
if (!tables.next()) {
throw new IllegalArgumentException(
"Table with the name \"" + getFullTableName()
+ "\" was not found. Check your database contents.");
} else {
catalogName = catalog;
schemaName = schema;
tableName = tableName.toUpperCase(Locale.ROOT);
}
}
tables.close();
rs = dbmd.getPrimaryKeys(catalogName, schemaName, tableName);
List<String> names = new ArrayList<String>();
while (rs.next()) {
names.add(rs.getString("COLUMN_NAME"));
}
rs.close();
if (!names.isEmpty()) {
primaryKeyColumns = names;
}
if (primaryKeyColumns == null || primaryKeyColumns.isEmpty()) {
throw new IllegalArgumentException(
"Primary key constraints have not been defined for the table \""
+ getFullTableName()
+ "\". Use FreeFormQuery to access this table.");
}
for (String colName : primaryKeyColumns) {
if (colName.equalsIgnoreCase("rownum")) {
if (getSqlGenerator() instanceof MSSQLGenerator
|| getSqlGenerator() instanceof MSSQLGenerator) {
throw new IllegalArgumentException(
"When using Oracle or MSSQL, a primary key column"
+ " named \'rownum\' is not allowed!");
}
}
}
}
} catch (SQLException e) {
throw new RuntimeException(e);
} finally {
try {
releaseConnection(connection, null, rs);
} catch (SQLException ignore) {
} finally {
try {
if (tables != null) {
tables.close();
}
} catch (SQLException ignore) {
}
}
}
} | 3.68 |
starts_AnnotationVisitor_visit | /**
* Visits a primitive value of the annotation.
*
* @param name
* the value name.
* @param value
* the actual value, whose type must be {@link Byte},
* {@link Boolean}, {@link Character}, {@link Short},
* {@link Integer} , {@link Long}, {@link Float}, {@link Double},
* {@link String} or {@link Type} or OBJECT or ARRAY sort. This
* value can also be an array of byte, boolean, short, char, int,
* long, float or double values (this is equivalent to using
* {@link #visitArray visitArray} and visiting each array element
* in turn, but is more convenient).
*/
public void visit(String name, Object value) {
if (av != null) {
av.visit(name, value);
}
} | 3.68 |
framework_BasicEvent_getEnd | /*
* (non-Javadoc)
*
* @see com.vaadin.addon.calendar.event.CalendarEvent#getEnd()
*/
@Override
public Date getEnd() {
return end;
} | 3.68 |
hbase_MasterCoprocessorHost_postTruncateRegionAction | /**
* Invoked after calling the truncate region procedure
* @param region Region which was truncated
* @param user The user
*/
public void postTruncateRegionAction(final RegionInfo region, User user) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {
@Override
public void call(MasterObserver observer) throws IOException {
observer.postTruncateRegionAction(this, region);
}
});
} | 3.68 |
cron-utils_FieldConstraintsBuilder_withShiftedStringMapping | /**
* Shifts integer representation of weekday/month names.
*
* @param shiftSize - size of the shift
* @return same FieldConstraintsBuilder instance
*/
public FieldConstraintsBuilder withShiftedStringMapping(final int shiftSize) {
if (shiftSize > 0 || endRange < stringMapping.size()) {
for (final Entry<String, Integer> entry : stringMapping.entrySet()) {
int value = entry.getValue();
value += shiftSize;
if (value > endRange) {
value -= stringMapping.size();
} else if (value < startRange) {
value += (startRange - endRange);
}
stringMapping.put(entry.getKey(), value);
}
}
return this;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.