name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_ApplicationMaster_getContainerStartCommand | /**
* Return the command used to start this container.
*/
private List<String> getContainerStartCommand() throws IOException {
// Set the necessary command to execute on the allocated container
List<String> vargs = new ArrayList<>();
// Set executable command
vargs.add("./" + DynoConstants.START_SCRIPT.getResourcePath());
String component = isNameNodeLauncher ? "namenode" : "datanode";
vargs.add(component);
if (isNameNodeLauncher) {
vargs.add(remoteStoragePath.getFileSystem(conf)
.makeQualified(remoteStoragePath).toString());
} else {
vargs.add(namenodeServiceRpcAddress);
vargs.add(String.valueOf(amOptions.getDataNodeLaunchDelaySec() < 1 ? 0
: RAND.nextInt(
Ints.checkedCast(amOptions.getDataNodeLaunchDelaySec()))));
}
// Add log redirect params
vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout");
vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr");
LOG.info("Completed setting up command for " + component + ": " + vargs);
return Lists.newArrayList(Joiner.on(" ").join(vargs));
} | 3.68 |
morf_ConcatenatedField_deepCopyInternal | /**
* @see org.alfasoftware.morf.sql.element.AliasedField#deepCopyInternal(DeepCopyTransformation)
*/
@Override
protected AliasedField deepCopyInternal(DeepCopyTransformation transformer) {
return new ConcatenatedField(getAlias(), FluentIterable.from(fields).transform(transformer::deepCopy).toList());
} | 3.68 |
hadoop_RpcProgramPortmap_dump | /**
* This procedure enumerates all entries in the port mapper's database. The
* procedure takes no parameters and returns a list of program, version,
* protocol, and port values.
*/
private XDR dump(int xid, XDR in, XDR out) {
PortmapMapping[] pmapList = map.values().toArray(new PortmapMapping[0]);
return PortmapResponse.pmapList(out, xid, pmapList);
} | 3.68 |
hbase_HFileOutputFormat2_createFamilyConfValueMap | /**
* Run inside the task to deserialize column family to given conf value map.
* @param conf to read the serialized values from
* @param confName conf key to read from the configuration
* @return a map of column family to the given configuration value
*/
private static Map<byte[], String> createFamilyConfValueMap(Configuration conf, String confName) {
Map<byte[], String> confValMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
String confVal = conf.get(confName, "");
for (String familyConf : confVal.split("&")) {
String[] familySplit = familyConf.split("=");
if (familySplit.length != 2) {
continue;
}
try {
confValMap.put(Bytes.toBytes(URLDecoder.decode(familySplit[0], "UTF-8")),
URLDecoder.decode(familySplit[1], "UTF-8"));
} catch (UnsupportedEncodingException e) {
// will not happen with UTF-8 encoding
throw new AssertionError(e);
}
}
return confValMap;
} | 3.68 |
flink_NettyMessageClientDecoderDelegate_channelInactive | /**
* Releases resources when the channel is closed. When exceptions are thrown during processing
* received netty buffers, {@link CreditBasedPartitionRequestClientHandler} is expected to catch
* the exception and close the channel and trigger this notification.
*
* @param ctx The context of the channel close notification.
*/
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
IOUtils.cleanup(LOG, bufferResponseDecoder, nonBufferResponseDecoder);
frameHeaderBuffer.release();
super.channelInactive(ctx);
} | 3.68 |
flink_BinarySegmentUtils_setDouble | /**
* set double from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setDouble(MemorySegment[] segments, int offset, double value) {
if (inFirstSegment(segments, offset, 8)) {
segments[0].putDouble(offset, value);
} else {
setDoubleMultiSegments(segments, offset, value);
}
} | 3.68 |
shardingsphere-elasticjob_RDBJobEventRepository_getInstance | /**
* The same data source always return the same RDB job event repository instance.
*
* @param dataSource dataSource
* @return RDBJobEventStorage instance
* @throws SQLException SQLException
*/
public static RDBJobEventRepository getInstance(final DataSource dataSource) throws SQLException {
return getInstance(() -> STORAGE_MAP.computeIfAbsent(dataSource, ds -> {
try {
return new RDBJobEventRepository(ds);
} catch (final SQLException ex) {
throw new TracingStorageUnavailableException(ex);
}
}));
} | 3.68 |
hadoop_HeaderProcessing_decodeBytes | /**
* Get the string value from the bytes.
* if null : return null, otherwise the UTF-8 decoded
* bytes.
* @param bytes source bytes
* @return decoded value
*/
public static String decodeBytes(byte[] bytes) {
return bytes == null
? null
: new String(bytes, StandardCharsets.UTF_8);
} | 3.68 |
framework_HierarchyMapper_setInMemorySorting | /**
* Sets the current in-memory sorting. This will cause the hierarchy to be
* constructed again.
*
* @param inMemorySorting
* the in-memory sorting
*/
public void setInMemorySorting(Comparator<T> inMemorySorting) {
this.inMemorySorting = inMemorySorting;
} | 3.68 |
hbase_Table_getRequestAttributes | /**
* Get the attributes to be submitted with requests
* @return map of request attributes
*/
default Map<String, byte[]> getRequestAttributes() {
throw new NotImplementedException("Add an implementation!");
} | 3.68 |
framework_FilesystemContainer_addItemProperty | /**
* Filesystem container does not support adding new properties.
*
* @see Item#addItemProperty(Object, Property)
*/
@Override
public boolean addItemProperty(Object id, Property property)
throws UnsupportedOperationException {
throw new UnsupportedOperationException("Filesystem container "
+ "does not support adding new properties");
} | 3.68 |
hmily_Timeout_isDefault | /**
* 是否为自定义的一个timeout.
*
* @return true or false.
*/
default boolean isDefault() {
return true;
} | 3.68 |
hadoop_NvidiaGPUPluginForRuntimeV2_generateAllDeviceCombination | /**
* For every possible combination of i elements.
* We generate a map whose key is the combination, value is cost.
*/
private void generateAllDeviceCombination(
Map<Integer, List<Map.Entry<Set<Device>, Integer>>> cTable,
Device[] allDevices, int n) {
// allocated devices count range from 1 to n-1
for (int i = 2; i < n; i++) {
Map<Set<Device>, Integer> combinationToCost =
new HashMap<>();
buildCombination(combinationToCost, allDevices, n, i);
// sort the map entry by cost ascending order
List<Map.Entry<Set<Device>, Integer>> listSortedByCost =
new LinkedList<>(combinationToCost.entrySet());
Collections.sort(listSortedByCost,
(o1, o2) -> (o1.getValue()).compareTo(o2.getValue()));
cTable.put(i, listSortedByCost);
}
} | 3.68 |
hudi_SerializableSchema_writeObjectTo | // create a public write method for unit test
public void writeObjectTo(ObjectOutputStream out) throws IOException {
// Note: writeUTF cannot support string length > 64K. So use writeObject which has small overhead (relatively).
out.writeObject(schema.toString());
} | 3.68 |
hadoop_SharedKeyCredentials_addCanonicalizedHeaders | /**
* Add x-ms- prefixed headers in a fixed order.
*
* @param conn the HttpURLConnection for the operation
* @param canonicalizedString the canonicalized string to add the canonicalized headerst to.
*/
private static void addCanonicalizedHeaders(final HttpURLConnection conn, final StringBuilder canonicalizedString) {
// Look for header names that start with
// HeaderNames.PrefixForStorageHeader
// Then sort them in case-insensitive manner.
final Map<String, List<String>> headers = conn.getRequestProperties();
final ArrayList<String> httpStorageHeaderNameArray = new ArrayList<String>();
for (final String key : headers.keySet()) {
if (key.toLowerCase(Locale.ROOT).startsWith(AbfsHttpConstants.HTTP_HEADER_PREFIX)) {
httpStorageHeaderNameArray.add(key.toLowerCase(Locale.ROOT));
}
}
Collections.sort(httpStorageHeaderNameArray);
// Now go through each header's values in the sorted order and append
// them to the canonicalized string.
for (final String key : httpStorageHeaderNameArray) {
final StringBuilder canonicalizedElement = new StringBuilder(key);
String delimiter = ":";
final ArrayList<String> values = getHeaderValues(headers, key);
boolean appendCanonicalizedElement = false;
// Go through values, unfold them, and then append them to the
// canonicalized element string.
for (final String value : values) {
if (value != null) {
appendCanonicalizedElement = true;
}
// Unfolding is simply removal of CRLF.
final String unfoldedValue = CRLF.matcher(value)
.replaceAll(Matcher.quoteReplacement(""));
// Append it to the canonicalized element string.
canonicalizedElement.append(delimiter);
canonicalizedElement.append(unfoldedValue);
delimiter = ",";
}
// Now, add this canonicalized element to the canonicalized header
// string.
if (appendCanonicalizedElement) {
appendCanonicalizedElement(canonicalizedString, canonicalizedElement.toString());
}
}
} | 3.68 |
hbase_UnsafeAccess_toByte | /**
* Returns the byte at the given offset of the object
* @return the byte at the given offset
*/
public static byte toByte(Object ref, long offset) {
return HBasePlatformDependent.getByte(ref, offset);
} | 3.68 |
framework_AbsoluteLayoutRelativeSizeContent_createHalfTableOnFixed | /**
* Creates an {@link AbsoluteLayout} of fixed size that contains a
* half-sized {@link Table}.
*
* @return the created layout
*/
private Component createHalfTableOnFixed() {
AbsoluteLayout absoluteLayout = new AbsoluteLayout();
absoluteLayout.setWidth(200, Unit.PIXELS);
absoluteLayout.setHeight(200, Unit.PIXELS);
absoluteLayout.setCaption("half-sized table expected");
Table table = new Table();
table.setWidth(50, Unit.PERCENTAGE);
table.setHeight(50, Unit.PERCENTAGE);
table.setId("half-table");
absoluteLayout.addComponent(table);
return absoluteLayout;
} | 3.68 |
hbase_ZKWatcher_syncOrTimeout | /**
* Forces a synchronization of this ZooKeeper client connection within a timeout. Enforcing a
* timeout lets the callers fail-fast rather than wait forever for the sync to finish.
* <p>
* Executing this method before running other methods will ensure that the subsequent operations
* are up-to-date and consistent as of the time that the sync is complete.
* <p>
* This is used for compareAndSwap type operations where we need to read the data of an existing
* node and delete or transition that node, utilizing the previously read version and data. We
* want to ensure that the version read is up-to-date from when we begin the operation.
* <p>
*/
public void syncOrTimeout(String path) throws KeeperException {
final CountDownLatch latch = new CountDownLatch(1);
long startTime = EnvironmentEdgeManager.currentTime();
this.recoverableZooKeeper.sync(path, (i, s, o) -> latch.countDown(), null);
try {
if (!latch.await(zkSyncTimeout, TimeUnit.MILLISECONDS)) {
LOG.warn("sync() operation to ZK timed out. Configured timeout: {}ms. This usually points "
+ "to a ZK side issue. Check ZK server logs and metrics.", zkSyncTimeout);
throw new KeeperException.RequestTimeoutException();
}
} catch (InterruptedException e) {
LOG.warn("Interrupted waiting for ZK sync() to finish.", e);
Thread.currentThread().interrupt();
return;
}
if (LOG.isDebugEnabled()) {
// TODO: Switch to a metric once server side ZK watcher metrics are implemented. This is a
// useful metric to have since the latency of sync() impacts the callers.
LOG.debug("ZK sync() operation took {}ms", EnvironmentEdgeManager.currentTime() - startTime);
}
} | 3.68 |
dubbo_Page_hasData | /**
* Returns whether the page has data at all.
*
* @return
*/
default boolean hasData() {
return getDataSize() > 0;
} | 3.68 |
morf_AliasedField_isNotNull | /**
* @return criteria for this field being not null
*/
public Criterion isNotNull() {
return Criterion.isNotNull(this);
} | 3.68 |
flink_SchedulerNG_requestJobResourceRequirements | /**
* Read current {@link JobResourceRequirements job resource requirements}.
*
* @return Current resource requirements.
*/
default JobResourceRequirements requestJobResourceRequirements() {
throw new UnsupportedOperationException(
String.format(
"The %s does not support changing the parallelism without a job restart. This feature is currently only expected to work with the %s.",
getClass().getSimpleName(), AdaptiveScheduler.class.getSimpleName()));
} | 3.68 |
hbase_VersionInfo_getDate | /**
* The date that hbase was compiled.
* @return the compilation date in unix date format
*/
public static String getDate() {
return Version.date;
} | 3.68 |
morf_SqlDialect_getSqlForEvery | /**
* Converts the every function into SQL.
*
* @param function the function details
* @return a string representation of the SQL
*/
protected String getSqlForEvery(Function function) {
return getSqlForMin(function);
} | 3.68 |
framework_InfoSection_getThemeVersion | /**
* Finds out the version of the current theme (i.e. the version of Vaadin
* used to compile it)
*
* @since 7.1
* @return The full version as a string
*/
private String getThemeVersion() {
Element div = DOM.createDiv();
div.setClassName(THEME_VERSION_CLASSNAME);
RootPanel.get().getElement().appendChild(div);
String version = getComputedStyle(div, ":after", "content");
div.removeFromParent();
if (version != null) {
// String version = new ComputedStyle(div).getProperty("content");
version = version.replace("'", "");
version = version.replace("\"", "");
}
return version;
} | 3.68 |
flink_GenericRowData_ofKind | /**
* Creates an instance of {@link GenericRowData} with given kind and field values.
*
* <p>Note: All fields of the row must be internal data structures.
*/
public static GenericRowData ofKind(RowKind kind, Object... values) {
GenericRowData row = new GenericRowData(kind, values.length);
for (int i = 0; i < values.length; ++i) {
row.setField(i, values[i]);
}
return row;
} | 3.68 |
hbase_HMaster_getActiveMasterInfoPort | /** Returns info port of active master or 0 if any exception occurs. */
public int getActiveMasterInfoPort() {
return activeMasterManager.getActiveMasterInfoPort();
} | 3.68 |
framework_Slot_hasCaption | /**
* Does the slot have a caption.
*
* @return {@code true} if the slot has a caption, {@code false} otherwise
*/
public boolean hasCaption() {
return caption != null;
} | 3.68 |
hadoop_IOStatisticsBinding_pairedTrackerFactory | /**
* Create a DurationTrackerFactory which aggregates the tracking
* of two other factories.
* @param first first tracker factory
* @param second second tracker factory
* @return a factory
*/
public static DurationTrackerFactory pairedTrackerFactory(
final DurationTrackerFactory first,
final DurationTrackerFactory second) {
return new PairedDurationTrackerFactory(first, second);
} | 3.68 |
framework_AbstractDateField_getResolution | /**
* Gets the resolution.
*
* @return the date/time field resolution
*/
public R getResolution() {
return resolution;
} | 3.68 |
querydsl_SQLTemplates_serializeMerge | /**
* template method for MERGE serialization
*
* @param metadata
* @param entity
* @param keys
* @param columns
* @param values
* @param subQuery
* @param context
*/
public void serializeMerge(QueryMetadata metadata, RelationalPath<?> entity,
List<Path<?>> keys, List<Path<?>> columns, List<Expression<?>> values,
SubQueryExpression<?> subQuery, SQLSerializer context) {
context.serializeForMerge(metadata, entity, keys, columns, values, subQuery);
if (!metadata.getFlags().isEmpty()) {
context.serialize(Position.END, metadata.getFlags());
}
} | 3.68 |
framework_BrowserWindowOpener_getWindowName | /**
* Gets the target window name.
*
* @see #setWindowName(String)
*
* @return the window target string
*/
public String getWindowName() {
return getState(false).target;
} | 3.68 |
morf_SqlDialect_createAllIndexStatements | /**
* Helper method to create all index statements defined for a table
*
* @param table the table to create indexes for
* @return a list of index statements
*/
protected List<String> createAllIndexStatements(Table table) {
List<String> indexStatements = new ArrayList<>();
for (Index index : table.indexes()) {
indexStatements.addAll(addIndexStatements(table, index));
}
return indexStatements;
} | 3.68 |
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_buildSplitClusteringGroups | /**
* Generate clustering groups according to split rules.
* Currently, we always split bucket into two sub-buckets.
*
* @param identifier bucket identifier
* @param fileSlices file slice candidate to be built as split clustering groups
* @param splitSlot number of new bucket allowed to produce, in order to constrain the upper bound of the total number of bucket
* @return list of clustering group, number of new buckets generated, remaining file slice (that does not split)
*/
protected Triple<List<HoodieClusteringGroup>, Integer, List<FileSlice>> buildSplitClusteringGroups(
ConsistentBucketIdentifier identifier, List<FileSlice> fileSlices, int splitSlot) {
List<HoodieClusteringGroup> retGroup = new ArrayList<>();
List<FileSlice> fsUntouched = new ArrayList<>();
long splitSize = getSplitSize();
int remainingSplitSlot = splitSlot;
for (FileSlice fs : fileSlices) {
boolean needSplit = fs.getTotalFileSize() > splitSize;
if (!needSplit || remainingSplitSlot == 0) {
fsUntouched.add(fs);
continue;
}
Option<List<ConsistentHashingNode>> nodes = identifier.splitBucket(fs.getFileId());
// Bucket cannot be split
if (!nodes.isPresent()) {
fsUntouched.add(fs);
continue;
}
remainingSplitSlot--;
List<FileSlice> fsList = Collections.singletonList(fs);
retGroup.add(HoodieClusteringGroup.newBuilder()
.setSlices(getFileSliceInfo(fsList))
.setNumOutputFileGroups(2)
.setMetrics(buildMetrics(fsList))
.setExtraMetadata(constructExtraMetadata(fs.getPartitionPath(), nodes.get(), identifier.getMetadata().getSeqNo()))
.build());
}
return Triple.of(retGroup, splitSlot - remainingSplitSlot, fsUntouched);
} | 3.68 |
flink_BaseMappingExtractor_verifyMappingForMethod | /** Checks if the given method can be called and returns what hints declare. */
private void verifyMappingForMethod(
Method method,
Map<FunctionSignatureTemplate, FunctionResultTemplate> collectedMappingsPerMethod,
MethodVerification verification) {
collectedMappingsPerMethod.forEach(
(signature, result) ->
verification.verify(method, signature.toClass(), result.toClass()));
} | 3.68 |
flink_SessionWindowAssigner_withGap | /**
* Creates a new {@code SessionWindowAssigner} {@link WindowAssigner} that assigns elements to
* sessions based on the timestamp.
*
* @param size The session timeout, i.e. the time gap between sessions
* @return The policy.
*/
public static SessionWindowAssigner withGap(Duration size) {
return new SessionWindowAssigner(size.toMillis(), true);
} | 3.68 |
hbase_ConnectionFactory_createConnection | /**
* Create a new Connection instance using the passed <code>conf</code> instance. Connection
* encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
* created from returned connection share zookeeper connection, meta cache, and connections to
* region servers and masters. <br>
* The caller is responsible for calling {@link Connection#close()} on the returned connection
* instance. Typical usage:
*
* <pre>
* Connection connection = ConnectionFactory.createConnection(conf);
* Table table = connection.getTable(TableName.valueOf("table1"));
* try {
* table.get(...);
* ...
* } finally {
* table.close();
* connection.close();
* }
* </pre>
*
* @param conf configuration
* @param user the user the connection is for
* @param pool the thread pool to use for batch operations
* @param connectionAttributes attributes to be sent along to server during connection establish
* @return Connection object for <code>conf</code>
*/
public static Connection createConnection(Configuration conf, ExecutorService pool,
final User user, Map<String, byte[]> connectionAttributes) throws IOException {
Class<?> clazz = conf.getClass(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
ConnectionOverAsyncConnection.class, Connection.class);
if (clazz != ConnectionOverAsyncConnection.class) {
try {
// Default HCM#HCI is not accessible; make it so before invoking.
Constructor<?> constructor = clazz.getDeclaredConstructor(Configuration.class,
ExecutorService.class, User.class, Map.class);
constructor.setAccessible(true);
return user.runAs((PrivilegedExceptionAction<Connection>) () -> (Connection) constructor
.newInstance(conf, pool, user, connectionAttributes));
} catch (Exception e) {
throw new IOException(e);
}
} else {
return FutureUtils.get(createAsyncConnection(conf, user, connectionAttributes))
.toConnection();
}
} | 3.68 |
hudi_HoodieAsyncService_fetchNextAsyncServiceInstant | /**
* Fetch next pending compaction/clustering instant if available.
*
* @return {@link HoodieInstant} corresponding to the next pending compaction/clustering.
* @throws InterruptedException
*/
HoodieInstant fetchNextAsyncServiceInstant() throws InterruptedException {
LOG.info(String.format("Waiting for next instant up to %d seconds", POLLING_SECONDS));
HoodieInstant instant = pendingInstants.poll(POLLING_SECONDS, TimeUnit.SECONDS);
if (instant != null) {
try {
queueLock.lock();
// Signal waiting thread
consumed.signal();
} finally {
queueLock.unlock();
}
}
return instant;
} | 3.68 |
hbase_RowResource_increment | /**
* Validates the input request parameters, parses columns from CellSetModel, and invokes Increment
* on HTable.
* @param model instance of CellSetModel
* @return Response 200 OK, 304 Not modified, 400 Bad request
*/
Response increment(final CellSetModel model) {
Table table = null;
Increment increment = null;
try {
table = servlet.getTable(tableResource.getName());
if (model.getRows().size() != 1) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Number of rows specified is not 1." + CRLF).build();
}
RowModel rowModel = model.getRows().get(0);
byte[] key = rowModel.getKey();
if (key == null) {
key = rowspec.getRow();
}
if (key == null) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Row key found to be null." + CRLF).build();
}
increment = new Increment(key);
increment.setReturnResults(returnResult);
int i = 0;
for (CellModel cell : rowModel.getCells()) {
byte[] col = cell.getColumn();
if (col == null) {
try {
col = rowspec.getColumns()[i++];
} catch (ArrayIndexOutOfBoundsException e) {
col = null;
}
}
if (col == null) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Column found to be null." + CRLF).build();
}
byte[][] parts = CellUtil.parseColumn(col);
if (parts.length != 2) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Column incorrectly specified." + CRLF).build();
}
increment.addColumn(parts[0], parts[1],
Long.parseLong(Bytes.toStringBinary(cell.getValue())));
}
if (LOG.isDebugEnabled()) {
LOG.debug("INCREMENT " + increment.toString());
}
Result result = table.increment(increment);
if (returnResult) {
if (result.isEmpty()) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT)
.entity("Increment return empty." + CRLF).build();
}
CellSetModel rModel = new CellSetModel();
RowModel rRowModel = new RowModel(result.getRow());
for (Cell cell : result.listCells()) {
rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
cell.getTimestamp(), CellUtil.cloneValue(cell)));
}
rModel.addRow(rowModel);
servlet.getMetrics().incrementSucessfulIncrementRequests(1);
return Response.ok(rModel).build();
}
ResponseBuilder response = Response.ok();
servlet.getMetrics().incrementSucessfulIncrementRequests(1);
return response.build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return processException(e);
} finally {
if (table != null) {
try {
table.close();
} catch (IOException ioe) {
LOG.debug("Exception received while closing the table " + table.getName(), ioe);
}
}
}
} | 3.68 |
morf_SqlDialect_resultSetToRecord | /**
* Given an ordered list of columns and a {@link ResultSet}, creates a
* {@link Record} from the current row.
*
* @param resultSet The {@link ResultSet}. Must have been advanced (using
* {@link ResultSet#next()}) to the appropriate row.
* @param columns The columns, ordered according to their appearance in the
* {@link ResultSet}. Use {@link ResultSetMetadataSorter} to pre-sort
* your columns according to the {@link ResultSetMetaData} if you
* can't be sure that the SQL will return the columns in the precise
* order that you are expecting.
* @return A {@link Record} representation of the current {@link ResultSet}
* row.
*/
public Record resultSetToRecord(ResultSet resultSet, Iterable<Column> columns) {
// Provide initial sizing hint to the array. This potentially means double-traversal
// of the columns if the column list is not a simple list, but it's almost certainly
// worth it to minimise the array size and prevent resizing.
RecordBuilder recordBuilder = DataSetUtils.record()
.withInitialColumnCount(Iterables.size(columns));
int idx = 1;
for (Column column : columns) {
try {
switch (column.getType()) {
case BIG_INTEGER:
long longVal = resultSet.getLong(idx);
if (resultSet.wasNull()) {
recordBuilder.setObject(column.getName(), null);
} else {
recordBuilder.setLong(column.getName(), longVal);
}
break;
case BOOLEAN:
boolean boolVal = resultSet.getBoolean(idx);
if (resultSet.wasNull()) {
recordBuilder.setObject(column.getName(), null);
} else {
recordBuilder.setBoolean(column.getName(), boolVal);
}
break;
case INTEGER:
int intVal = resultSet.getInt(idx);
if (resultSet.wasNull()) {
recordBuilder.setObject(column.getName(), null);
} else {
recordBuilder.setInteger(column.getName(), intVal);
}
break;
case DATE:
Date date = resultSet.getDate(idx);
if (date == null) {
recordBuilder.setObject(column.getName(), null);
} else {
recordBuilder.setDate(column.getName(), date);
}
break;
case DECIMAL:
recordBuilder.setBigDecimal(column.getName(), resultSet.getBigDecimal(idx));
break;
case BLOB:
recordBuilder.setByteArray(column.getName(), resultSet.getBytes(idx));
break;
case CLOB:
case STRING:
recordBuilder.setString(column.getName(), resultSet.getString(idx));
break;
default:
recordBuilder.setObject(column.getName(), resultSet.getObject(idx));
break;
}
idx++;
} catch (SQLException e) {
throw new RuntimeSqlException("Error retrieving value from result set with name [" + column.getName() + "]", e);
}
}
return recordBuilder;
} | 3.68 |
hudi_AbstractTableFileSystemView_fetchLatestFileSlice | /**
* Default implementation for fetching file-slice.
*
* @param partitionPath Partition path
* @param fileId File Id
* @return File Slice if present
*/
public Option<FileSlice> fetchLatestFileSlice(String partitionPath, String fileId) {
return Option
.fromJavaOptional(fetchLatestFileSlices(partitionPath).filter(fs -> fs.getFileId().equals(fileId)).findFirst());
} | 3.68 |
hbase_EncryptionUtil_createCryptoAES | /**
* Helper to create an instance of CryptoAES.
* @param conf The current configuration.
* @param cryptoCipherMeta The metadata for create CryptoAES.
* @return The instance of CryptoAES.
* @throws IOException if create CryptoAES failed
*/
public static CryptoAES createCryptoAES(RPCProtos.CryptoCipherMeta cryptoCipherMeta,
Configuration conf) throws IOException {
Properties properties = new Properties();
// the property for cipher class
properties.setProperty(CryptoCipherFactory.CLASSES_KEY,
conf.get("hbase.rpc.crypto.encryption.aes.cipher.class",
"org.apache.commons.crypto.cipher.JceCipher"));
// create SaslAES for client
return new CryptoAES(cryptoCipherMeta.getTransformation(), properties,
cryptoCipherMeta.getInKey().toByteArray(), cryptoCipherMeta.getOutKey().toByteArray(),
cryptoCipherMeta.getInIv().toByteArray(), cryptoCipherMeta.getOutIv().toByteArray());
} | 3.68 |
morf_XmlDataSetProducer_next | /**
* @see java.util.Iterator#next()
*/
@Override
public Record next() {
if (hasNext()) {
// Buffer this record
RecordBuilder result = DataSetUtils.record();
for (Entry<String, String> columnNameAndUpperCase : columnNamesAndUpperCase.entrySet()) {
result.setString(columnNameAndUpperCase.getValue(),
Escaping.unescapeCharacters(xmlStreamReader.getAttributeValue(XmlDataSetNode.URI, columnNameAndUpperCase.getKey()))
);
}
// Is there another
currentTagName = readNextTagInsideParent(XmlDataSetNode.DATA_NODE);
return result;
} else {
throw new NoSuchElementException("No more records");
}
} | 3.68 |
framework_ComponentSizeValidator_validateLayouts | /**
* Validates the layout and returns a collection of errors.
*
* @since 7.1
* @param ui
* The UI to validate
* @return A collection of errors. An empty collection if there are no
* errors.
*/
public static List<InvalidLayout> validateLayouts(UI ui) {
List<InvalidLayout> invalidRelativeSizes = ComponentSizeValidator
.validateComponentRelativeSizes(ui.getContent(),
new ArrayList<>(), null);
// Also check any existing subwindows
if (ui.getWindows() != null) {
for (Window subWindow : ui.getWindows()) {
invalidRelativeSizes = ComponentSizeValidator
.validateComponentRelativeSizes(subWindow.getContent(),
invalidRelativeSizes, null);
}
}
return invalidRelativeSizes;
} | 3.68 |
hadoop_FileIoProvider_sync | /**
* Sync the given {@link FileOutputStream}.
*
* @param volume target volume. null if unavailable.
* @throws IOException
*/
public void sync(
@Nullable FsVolumeSpi volume, FileOutputStream fos) throws IOException {
final long begin = profilingEventHook.beforeFileIo(volume, SYNC, 0);
try {
faultInjectorEventHook.beforeFileIo(volume, SYNC, 0);
IOUtils.fsync(fos.getChannel(), false);
profilingEventHook.afterFileIo(volume, SYNC, begin, 0);
} catch (Exception e) {
onFailure(volume, begin);
throw e;
}
} | 3.68 |
framework_FilesystemContainer_areChildrenAllowed | /**
* Tests if the specified Item in the container may have children. Since a
* <code>FileSystemContainer</code> contains files and directories, this
* method returns <code>true</code> for directory Items only.
*
* @param itemId
* the id of the item.
* @return <code>true</code> if the specified Item is a directory,
* <code>false</code> otherwise.
*/
@Override
public boolean areChildrenAllowed(Object itemId) {
return itemId instanceof File && ((File) itemId).canRead()
&& ((File) itemId).isDirectory();
} | 3.68 |
hudi_HoodieOperation_isUpdateBefore | /**
* Returns whether the operation is UPDATE_BEFORE.
*/
public static boolean isUpdateBefore(HoodieOperation operation) {
return operation == UPDATE_BEFORE;
} | 3.68 |
hbase_CompositeImmutableSegment_isEmpty | /** Returns whether the segment has any cells */
@Override
public boolean isEmpty() {
for (ImmutableSegment s : segments) {
if (!s.isEmpty()) return false;
}
return true;
} | 3.68 |
hbase_VisibilityUtils_readUserAuthsFromZKData | /**
* Reads back User auth data written to zookeeper.
* @return User auth details
*/
public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data)
throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(data)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
try {
MultiUserAuthorizations.Builder builder = MultiUserAuthorizations.newBuilder();
ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen);
return builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
}
return null;
} | 3.68 |
hadoop_FSBuilder_must | /**
* Set mandatory long option, despite passing in a floating
* point value.
*
* @param key key.
* @param value value.
* @return generic type B.
* @see #must(String, String)
*/
@Deprecated
default B must(@Nonnull String key, double value) {
return mustLong(key, (long) value);
} | 3.68 |
hadoop_OBSCommonUtils_longBytesOption | /**
* Get a long option not smaller than the minimum allowed value, supporting
* memory prefixes K,M,G,T,P.
*
* @param conf configuration
* @param key key to look up
* @param defVal default value
* @param min minimum value
* @return the value
* @throws IllegalArgumentException if the value is below the minimum
*/
static long longBytesOption(final Configuration conf, final String key,
final long defVal,
final long min) {
long v = conf.getLongBytes(key, defVal);
Preconditions.checkArgument(
v >= min,
String.format("Value of %s: %d is below the minimum value %d", key,
v, min));
LOG.debug("Value of {} is {}", key, v);
return v;
} | 3.68 |
flink_AbstractOrcFileInputFormat_seek | /**
* The argument of {@link RecordReader#seekToRow(long)} must come from {@link
* RecordReader#getRowNumber()}. The internal implementation of ORC is very confusing. It
* has special behavior when dealing with Predicate.
*/
public void seek(CheckpointedPosition position) throws IOException {
orcReader.seekToRow(position.getOffset());
recordsToSkip = position.getRecordsAfterOffset();
} | 3.68 |
graphhopper_VectorTile_hasBoolValue | /**
* <code>optional bool bool_value = 7;</code>
*/
public boolean hasBoolValue() {
return ((bitField0_ & 0x00000040) == 0x00000040);
} | 3.68 |
hudi_HoodieTableMetadataUtil_readRecordKeysFromFileSlices | /**
* Reads the record keys from the given file slices and returns a {@link HoodieData} of {@link HoodieRecord} to be updated in the metadata table.
* If file slice does not have any base file, then iterates over the log files to get the record keys.
*/
public static HoodieData<HoodieRecord> readRecordKeysFromFileSlices(HoodieEngineContext engineContext,
List<Pair<String, FileSlice>> partitionFileSlicePairs,
boolean forDelete,
int recordIndexMaxParallelism,
String activeModule, HoodieTableMetaClient metaClient, EngineType engineType) {
if (partitionFileSlicePairs.isEmpty()) {
return engineContext.emptyHoodieData();
}
engineContext.setJobStatus(activeModule, "Record Index: reading record keys from " + partitionFileSlicePairs.size() + " file slices");
final int parallelism = Math.min(partitionFileSlicePairs.size(), recordIndexMaxParallelism);
final String basePath = metaClient.getBasePathV2().toString();
final SerializableConfiguration configuration = new SerializableConfiguration(metaClient.getHadoopConf());
return engineContext.parallelize(partitionFileSlicePairs, parallelism).flatMap(partitionAndBaseFile -> {
final String partition = partitionAndBaseFile.getKey();
final FileSlice fileSlice = partitionAndBaseFile.getValue();
if (!fileSlice.getBaseFile().isPresent()) {
List<String> logFilePaths = fileSlice.getLogFiles().sorted(HoodieLogFile.getLogFileComparator())
.map(l -> l.getPath().toString()).collect(toList());
HoodieMergedLogRecordScanner mergedLogRecordScanner = HoodieMergedLogRecordScanner.newBuilder()
.withFileSystem(metaClient.getFs())
.withBasePath(basePath)
.withLogFilePaths(logFilePaths)
.withReaderSchema(HoodieAvroUtils.getRecordKeySchema())
.withLatestInstantTime(metaClient.getActiveTimeline().filterCompletedInstants().lastInstant().map(HoodieInstant::getTimestamp).orElse(""))
.withReadBlocksLazily(configuration.get().getBoolean("", true))
.withReverseReader(false)
.withMaxMemorySizeInBytes(configuration.get().getLongBytes(MAX_MEMORY_FOR_COMPACTION.key(), DEFAULT_MAX_MEMORY_FOR_SPILLABLE_MAP_IN_BYTES))
.withSpillableMapBasePath(FileIOUtils.getDefaultSpillableMapBasePath())
.withPartition(fileSlice.getPartitionPath())
.withOptimizedLogBlocksScan(configuration.get().getBoolean("hoodie" + HoodieMetadataConfig.OPTIMIZED_LOG_BLOCKS_SCAN, false))
.withDiskMapType(configuration.get().getEnum(SPILLABLE_DISK_MAP_TYPE.key(), SPILLABLE_DISK_MAP_TYPE.defaultValue()))
.withBitCaskDiskMapCompressionEnabled(configuration.get().getBoolean(DISK_MAP_BITCASK_COMPRESSION_ENABLED.key(), DISK_MAP_BITCASK_COMPRESSION_ENABLED.defaultValue()))
.withRecordMerger(HoodieRecordUtils.createRecordMerger(
metaClient.getBasePathV2().toString(),
engineType,
Collections.emptyList(), // TODO: support different merger classes, which is currently only known to write config
metaClient.getTableConfig().getRecordMergerStrategy()))
.build();
ClosableIterator<String> recordKeyIterator = ClosableIterator.wrap(mergedLogRecordScanner.getRecords().keySet().iterator());
return new ClosableIterator<HoodieRecord>() {
@Override
public void close() {
recordKeyIterator.close();
}
@Override
public boolean hasNext() {
return recordKeyIterator.hasNext();
}
@Override
public HoodieRecord next() {
return forDelete
? HoodieMetadataPayload.createRecordIndexDelete(recordKeyIterator.next())
: HoodieMetadataPayload.createRecordIndexUpdate(recordKeyIterator.next(), partition, fileSlice.getFileId(), fileSlice.getBaseInstantTime(), 0);
}
};
}
final HoodieBaseFile baseFile = fileSlice.getBaseFile().get();
final String filename = baseFile.getFileName();
Path dataFilePath = new Path(basePath, partition + Path.SEPARATOR + filename);
final String fileId = baseFile.getFileId();
final String instantTime = baseFile.getCommitTime();
HoodieFileReader reader = HoodieFileReaderFactory.getReaderFactory(HoodieRecord.HoodieRecordType.AVRO).getFileReader(configuration.get(), dataFilePath);
ClosableIterator<String> recordKeyIterator = reader.getRecordKeyIterator();
return new ClosableIterator<HoodieRecord>() {
@Override
public void close() {
recordKeyIterator.close();
}
@Override
public boolean hasNext() {
return recordKeyIterator.hasNext();
}
@Override
public HoodieRecord next() {
return forDelete
? HoodieMetadataPayload.createRecordIndexDelete(recordKeyIterator.next())
: HoodieMetadataPayload.createRecordIndexUpdate(recordKeyIterator.next(), partition, fileId, instantTime, 0);
}
};
});
} | 3.68 |
pulsar_LeastLongTermMessageRate_selectBroker | /**
* Find a suitable broker to assign the given bundle to.
*
* @param candidates
* The candidates for which the bundle may be assigned.
* @param bundleToAssign
* The data for the bundle to assign.
* @param loadData
* The load data from the leader broker.
* @param conf
* The service configuration.
* @return The name of the selected broker as it appears on ZooKeeper.
*/
@Override
public Optional<String> selectBroker(final Set<String> candidates, final BundleData bundleToAssign,
final LoadData loadData,
final ServiceConfiguration conf) {
bestBrokers.clear();
double minScore = Double.POSITIVE_INFINITY;
// Maintain of list of all the best scoring brokers and then randomly
// select one of them at the end.
for (String broker : candidates) {
final BrokerData brokerData = loadData.getBrokerData().get(broker);
final double score = getScore(brokerData, conf);
if (score == Double.POSITIVE_INFINITY) {
final LocalBrokerData localData = brokerData.getLocalData();
log.warn(
"Broker {} is overloaded: CPU: {}%, MEMORY: {}%, DIRECT MEMORY: {}%, BANDWIDTH IN: {}%, "
+ "BANDWIDTH OUT: {}%",
broker, localData.getCpu().percentUsage(), localData.getMemory().percentUsage(),
localData.getDirectMemory().percentUsage(), localData.getBandwidthIn().percentUsage(),
localData.getBandwidthOut().percentUsage());
}
if (score < minScore) {
// Clear best brokers since this score beats the other brokers.
bestBrokers.clear();
bestBrokers.add(broker);
minScore = score;
} else if (score == minScore) {
// Add this broker to best brokers since it ties with the best score.
bestBrokers.add(broker);
}
}
if (bestBrokers.isEmpty()) {
// All brokers are overloaded.
// Assign randomly in this case.
bestBrokers.addAll(candidates);
}
if (bestBrokers.isEmpty()) {
// If still, it means there are no available brokers at this point
return Optional.empty();
}
return Optional.of(bestBrokers.get(ThreadLocalRandom.current().nextInt(bestBrokers.size())));
} | 3.68 |
framework_Calendar_isMonthlyMode | /**
* Is the calendar in a mode where all days of the month is shown.
*
* @return Returns true if calendar is in monthly mode and false if it is in
* weekly mode
*/
public boolean isMonthlyMode() {
CalendarState state = getState(false);
if (state.days != null) {
return state.days.size() > 7;
} else {
// Default mode
return true;
}
} | 3.68 |
framework_VCalendarPanel_focusNextDay | /**
* Moves the focus forward the given number of days.
*/
private void focusNextDay(int days) {
if (focusedDate == null) {
return;
}
Date focusCopy = ((Date) focusedDate.clone());
focusCopy.setDate(focusedDate.getDate() + days);
if (!isDateInsideRange(focusCopy, resolution)) {
// If not inside allowed range, then do not move anything
return;
}
int oldMonth = focusedDate.getMonth();
int oldYear = focusedDate.getYear();
focusedDate.setDate(focusedDate.getDate() + days);
if (focusedDate.getMonth() == oldMonth
&& focusedDate.getYear() == oldYear) {
// Month did not change, only move the selection
focusDay(focusedDate);
} else {
// If the month changed we need to re-render the calendar
displayedMonth.setMonth(focusedDate.getMonth());
displayedMonth.setYear(focusedDate.getYear());
renderCalendar();
}
} | 3.68 |
hbase_SimpleRequestController_canTakeOperation | /**
* 1) check the regions is allowed. 2) check the concurrent tasks for regions. 3) check the
* total concurrent tasks. 4) check the concurrent tasks for server.
* @param loc the destination of data
* @param heapSizeOfRow the data size
* @return either Include {@link RequestController.ReturnCode} or skip
* {@link RequestController.ReturnCode}
*/
@Override
public ReturnCode canTakeOperation(HRegionLocation loc, long heapSizeOfRow) {
RegionInfo regionInfo = loc.getRegion();
if (regionsIncluded.contains(regionInfo)) {
// We already know what to do with this region.
return ReturnCode.INCLUDE;
}
AtomicInteger regionCnt = taskCounterPerRegion.get(loc.getRegion().getRegionName());
if (regionCnt != null && regionCnt.get() >= maxConcurrentTasksPerRegion) {
// Too many tasks on this region already.
return ReturnCode.SKIP;
}
int newServers =
serversIncluded.size() + (serversIncluded.contains(loc.getServerName()) ? 0 : 1);
if ((newServers + tasksInProgress.get()) > maxTotalConcurrentTasks) {
// Too many tasks.
return ReturnCode.SKIP;
}
AtomicInteger serverCnt = taskCounterPerServer.get(loc.getServerName());
if (serverCnt != null && serverCnt.get() >= maxConcurrentTasksPerServer) {
// Too many tasks for this individual server
return ReturnCode.SKIP;
}
return ReturnCode.INCLUDE;
} | 3.68 |
morf_SchemaBean_viewExists | /**
* @see org.alfasoftware.morf.metadata.Schema#viewExists(java.lang.String)
*/
@Override
public boolean viewExists(String name) {
return views.containsKey(name.toUpperCase());
} | 3.68 |
flink_ApiExpressionUtils_isFunctionOfKind | /**
* Checks if the expression is a function call of given type.
*
* @param expression expression to check
* @param kind expected type of function
* @return true if the expression is function call of given type, false otherwise
*/
public static boolean isFunctionOfKind(Expression expression, FunctionKind kind) {
if (expression instanceof UnresolvedCallExpression) {
return ((UnresolvedCallExpression) expression).getFunctionDefinition().getKind()
== kind;
}
if (expression instanceof CallExpression) {
return ((CallExpression) expression).getFunctionDefinition().getKind() == kind;
}
return false;
} | 3.68 |
framework_VScrollTable_ensureCacheFilled | /**
* Ensure we have the correct set of rows on client side, e.g. if the
* content on the server side has changed, or the client scroll position
* has changed since the last request.
*/
protected void ensureCacheFilled() {
/**
* Fixes cache issue #13576 where unnecessary rows are fetched
*/
if (isLazyScrollerActive()) {
return;
}
int reactFirstRow = (int) (firstRowInViewPort
- pageLength * cacheReactRate);
int reactLastRow = (int) (firstRowInViewPort + pageLength
+ pageLength * cacheReactRate);
if (reactFirstRow < 0) {
reactFirstRow = 0;
}
if (reactLastRow >= totalRows) {
reactLastRow = totalRows - 1;
}
if (lastRendered < reactFirstRow || firstRendered > reactLastRow) {
/*
* #8040 - scroll position is completely changed since the
* latest request, so request a new set of rows.
*
* TODO: We should probably check whether the fetched rows match
* the current scroll position right when they arrive, so as to
* not waste time rendering a set of rows that will never be
* visible...
*/
rowRequestHandler.triggerRowFetch(reactFirstRow,
reactLastRow - reactFirstRow + 1, 1);
} else if (lastRendered < reactLastRow) {
// get some cache rows below visible area
rowRequestHandler.triggerRowFetch(lastRendered + 1,
reactLastRow - lastRendered, 1);
} else if (firstRendered > reactFirstRow) {
/*
* Branch for fetching cache above visible area.
*
* If cache needed for both before and after visible area, this
* will be rendered after-cache is received and rendered. So in
* some rare situations the table may make two cache visits to
* server.
*/
rowRequestHandler.triggerRowFetch(reactFirstRow,
firstRendered - reactFirstRow, 1);
}
} | 3.68 |
hbase_ColumnSchemaModel_getAny | /** Returns the map for holding unspecified (user) attributes */
@XmlAnyAttribute
@JsonAnyGetter
public Map<QName, Object> getAny() {
return attrs;
} | 3.68 |
hadoop_CacheStats_release | /**
* Release some bytes that we're using.
*
* @param count
* The number of bytes to release. We will round this up to the
* page size.
*
* @return The new number of usedBytes.
*/
long release(long count) {
return usedBytesCount.release(count);
} | 3.68 |
morf_AbstractSqlDialectTest_nullOrderForDirection | /**
* A database platform may need to specify the null order by direction.
*
* <p>If a null order is not required for a SQL dialect descendant classes need to implement this method.</p>
*
* @param descending the order direction
* @return the null order for an SQL dialect
*/
protected String nullOrderForDirection(@SuppressWarnings("unused") Direction descending) {
return nullOrder();
} | 3.68 |
hadoop_DatanodeVolumeInfo_getNumBlocks | /**
* get number of blocks.
*/
public long getNumBlocks() {
return numBlocks;
} | 3.68 |
flink_Pool_addBack | /** Internal callback to put an entry back to the pool. */
void addBack(T object) {
pool.add(object);
} | 3.68 |
framework_ComboBox_setMultiSelect | /**
* ComboBox does not support multi select mode.
*
* @deprecated As of 7.0, use {@link ListSelect}, {@link OptionGroup} or
* {@link TwinColSelect} instead
* @see com.vaadin.ui.AbstractSelect#setMultiSelect(boolean)
* @throws UnsupportedOperationException
* if trying to activate multiselect mode
*/
@Deprecated
@Override
public void setMultiSelect(boolean multiSelect) {
if (multiSelect) {
throw new UnsupportedOperationException(
"Multiselect not supported");
}
} | 3.68 |
hadoop_DiskBalancerWorkItem_setBytesCopied | /**
* Sets bytes copied so far.
*
* @param bytesCopied - long
*/
public void setBytesCopied(long bytesCopied) {
this.bytesCopied = bytesCopied;
} | 3.68 |
zxing_BitMatrix_getRowSize | /**
* @return The row size of the matrix
*/
public int getRowSize() {
return rowSize;
} | 3.68 |
pulsar_MessageDeduplication_checkStatus | /**
* Check the status of deduplication. If the configuration has changed, it will enable/disable deduplication,
* returning a future to track the completion of the task
*/
public CompletableFuture<Void> checkStatus() {
boolean shouldBeEnabled = isDeduplicationEnabled();
synchronized (this) {
if (status == Status.Recovering || status == Status.Removing) {
// If there's already a transition happening, check later for status
pulsar.getExecutor().schedule(this::checkStatus, 1, TimeUnit.MINUTES);
return CompletableFuture.completedFuture(null);
}
if (status == Status.Initialized && !shouldBeEnabled) {
status = Status.Removing;
managedLedger.asyncDeleteCursor(PersistentTopic.DEDUPLICATION_CURSOR_NAME,
new DeleteCursorCallback() {
@Override
public void deleteCursorComplete(Object ctx) {
status = Status.Disabled;
log.info("[{}] Deleted deduplication cursor", topic.getName());
}
@Override
public void deleteCursorFailed(ManagedLedgerException exception, Object ctx) {
if (exception instanceof ManagedLedgerException.CursorNotFoundException) {
status = Status.Disabled;
} else {
log.error("[{}] Deleted deduplication cursor error", topic.getName(), exception);
}
}
}, null);
}
if (status == Status.Enabled && !shouldBeEnabled) {
// Disabled deduping
CompletableFuture<Void> future = new CompletableFuture<>();
status = Status.Removing;
managedLedger.asyncDeleteCursor(PersistentTopic.DEDUPLICATION_CURSOR_NAME,
new DeleteCursorCallback() {
@Override
public void deleteCursorComplete(Object ctx) {
status = Status.Disabled;
managedCursor = null;
highestSequencedPushed.clear();
highestSequencedPersisted.clear();
future.complete(null);
log.info("[{}] Disabled deduplication", topic.getName());
}
@Override
public void deleteCursorFailed(ManagedLedgerException exception, Object ctx) {
// It's ok for disable message deduplication.
if (exception instanceof ManagedLedgerException.CursorNotFoundException) {
status = Status.Disabled;
managedCursor = null;
highestSequencedPushed.clear();
highestSequencedPersisted.clear();
future.complete(null);
} else {
log.warn("[{}] Failed to disable deduplication: {}", topic.getName(),
exception.getMessage());
status = Status.Failed;
future.completeExceptionally(exception);
}
}
}, null);
return future;
} else if ((status == Status.Disabled || status == Status.Initialized) && shouldBeEnabled) {
// Enable deduping
CompletableFuture<Void> future = new CompletableFuture<>();
managedLedger.asyncOpenCursor(PersistentTopic.DEDUPLICATION_CURSOR_NAME, new OpenCursorCallback() {
@Override
public void openCursorComplete(ManagedCursor cursor, Object ctx) {
// We don't want to retain cache for this cursor
cursor.setAlwaysInactive();
managedCursor = cursor;
recoverSequenceIdsMap().thenRun(() -> {
status = Status.Enabled;
future.complete(null);
log.info("[{}] Enabled deduplication", topic.getName());
}).exceptionally(ex -> {
status = Status.Failed;
log.warn("[{}] Failed to enable deduplication: {}", topic.getName(), ex.getMessage());
future.completeExceptionally(ex);
return null;
});
}
@Override
public void openCursorFailed(ManagedLedgerException exception, Object ctx) {
log.warn("[{}] Failed to enable deduplication: {}", topic.getName(),
exception.getMessage());
future.completeExceptionally(exception);
}
}, null);
return future;
} else {
// Nothing to do, we are in the correct state
return CompletableFuture.completedFuture(null);
}
}
} | 3.68 |
flink_OptimizerNode_initId | /**
* Sets the ID of this node.
*
* @param id The id for this node.
*/
public void initId(int id) {
if (id <= 0) {
throw new IllegalArgumentException();
}
if (this.id == -1) {
this.id = id;
} else {
throw new IllegalStateException("Id has already been initialized.");
}
} | 3.68 |
hbase_HBackupFileSystem_getBackupTmpDirPath | /**
* Get backup temporary directory
* @param backupRootDir backup root
* @return backup tmp directory path
*/
public static Path getBackupTmpDirPath(String backupRootDir) {
return new Path(backupRootDir, ".tmp");
} | 3.68 |
flink_MurmurHashUtils_hashBytesByWords | /**
* Hash bytes in MemorySegment, length must be aligned to 4 bytes.
*
* @param segment segment.
* @param offset offset for MemorySegment
* @param lengthInBytes length in MemorySegment
* @return hash code
*/
public static int hashBytesByWords(MemorySegment segment, int offset, int lengthInBytes) {
return hashBytesByWords(segment, offset, lengthInBytes, DEFAULT_SEED);
} | 3.68 |
flink_SchedulerFactory_create | /**
* Create a {@link ScheduledThreadPoolExecutor} using the provided corePoolSize. The following
* behaviour is configured:
*
* <ul>
* <li>rejected executions are logged if the executor is {@link
* java.util.concurrent.ThreadPoolExecutor#isShutdown shutdown}
* <li>otherwise, {@link RejectedExecutionException} is thrown
* <li>any uncaught exception fails the JVM (using {@link
* org.apache.flink.runtime.util.FatalExitExceptionHandler FatalExitExceptionHandler})
* </ul>
*/
public static ScheduledThreadPoolExecutor create(int corePoolSize, String name, Logger log) {
AtomicInteger cnt = new AtomicInteger(0);
return new ScheduledThreadPoolExecutor(
corePoolSize,
runnable -> {
Thread thread = new Thread(runnable);
thread.setName(name + "-" + cnt.incrementAndGet());
thread.setUncaughtExceptionHandler(INSTANCE);
return thread;
},
new IgnoreShutdownRejectedExecutionHandler(log));
} | 3.68 |
framework_DateField_notifyFormOfValidityChange | /**
* Detects if this field is used in a Form (logically) and if so, notifies
* it (by repainting it) that the validity of this field might have changed.
*/
private void notifyFormOfValidityChange() {
Component parenOfDateField = getParent();
boolean formFound = false;
while (parenOfDateField != null || formFound) {
if (parenOfDateField instanceof Form) {
Form f = (Form) parenOfDateField;
Collection<?> visibleItemProperties = f.getItemPropertyIds();
for (Object fieldId : visibleItemProperties) {
Field<?> field = f.getField(fieldId);
if (equals(field)) {
/*
* this datefield is logically in a form. Do the same
* thing as form does in its value change listener that
* it registers to all fields.
*/
f.markAsDirty();
formFound = true;
break;
}
}
}
if (formFound) {
break;
}
parenOfDateField = parenOfDateField.getParent();
}
} | 3.68 |
flink_JoinOperator_projectTuple19 | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18>
ProjectJoin<
I1,
I2,
Tuple19<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>>
projectTuple19() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<
Tuple19<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>>
tType =
new TupleTypeInfo<
Tuple19<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>>(fTypes);
return new ProjectJoin<
I1,
I2,
Tuple19<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18>>(
this.ds1,
this.ds2,
this.keys1,
this.keys2,
this.hint,
this.fieldIndexes,
this.isFieldInFirst,
tType,
this);
} | 3.68 |
framework_SerializablePredicate_isEqual | /**
* Returns a predicate that tests if two arguments are equal according to
* {@link Objects#equals(Object, Object)}.
*
* @param <T>
* the type of arguments to the predicate
* @param targetRef
* the object reference with which to compare for equality, which
* may be {@code null}
* @return a predicate that tests if two arguments are equal according to
* {@link Objects#equals(Object, Object)}
* @since 8.5
*/
static <T> SerializablePredicate<T> isEqual(Serializable targetRef) {
return (null == targetRef) ? Objects::isNull
: object -> targetRef.equals(object);
} | 3.68 |
hadoop_NMClient_getNodeIdOfStartedContainer | /**
* Get the NodeId of the node on which container is running. It returns
* null if the container if container is not found or if it is not running.
*
* @param containerId Container Id of the container.
* @return NodeId of the container on which it is running.
*/
public NodeId getNodeIdOfStartedContainer(ContainerId containerId) {
return null;
} | 3.68 |
hbase_SnapshotManager_removeSentinelIfFinished | /**
* Return the handler if it is currently live and has the same snapshot target name. The handler
* is removed from the sentinels map if completed.
* @param sentinels live handlers
* @param snapshot snapshot description
* @return null if doesn't match, else a live handler.
*/
private synchronized SnapshotSentinel removeSentinelIfFinished(
final Map<TableName, SnapshotSentinel> sentinels, final SnapshotDescription snapshot) {
if (!snapshot.hasTable()) {
return null;
}
TableName snapshotTable = TableName.valueOf(snapshot.getTable());
SnapshotSentinel h = sentinels.get(snapshotTable);
if (h == null) {
return null;
}
if (!h.getSnapshot().getName().equals(snapshot.getName())) {
// specified snapshot is to the one currently running
return null;
}
// Remove from the "in-progress" list once completed
if (h.isFinished()) {
sentinels.remove(snapshotTable);
}
return h;
} | 3.68 |
framework_MonthEventLabel_setEventIndex | /**
* Set the (server side) index of the event.
*
* @param index
* The integer index
*/
public void setEventIndex(int index) {
eventIndex = index;
} | 3.68 |
pulsar_DefaultMetadataResolver_fromIssuerUrl | /**
* Gets a well-known metadata URL for the given OAuth issuer URL.
* @param issuerUrl The authorization server's issuer identifier
* @return a resolver
*/
public static DefaultMetadataResolver fromIssuerUrl(URL issuerUrl) {
return new DefaultMetadataResolver(getWellKnownMetadataUrl(issuerUrl));
} | 3.68 |
hadoop_LoggedLocation_setUnknownAttribute | // for input parameter ignored.
@JsonAnySetter
public void setUnknownAttribute(String attributeName, Object ignored) {
if (!alreadySeenAnySetterAttributes.contains(attributeName)) {
alreadySeenAnySetterAttributes.add(attributeName);
System.err.println("In LoggedJob, we saw the unknown attribute "
+ attributeName + ".");
}
} | 3.68 |
hudi_HoodieTableConfig_getTableName | /**
* Read the table name.
*/
public String getTableName() {
return getString(NAME);
} | 3.68 |
hadoop_RouterDelegationTokenSecretManager_removeStoredToken | /**
* The Router Supports Remove Token.
*
* @param identifier Delegation Token
* @throws IOException IO exception occurred.
*/
@Override
public void removeStoredToken(RMDelegationTokenIdentifier identifier) throws IOException {
try {
federationFacade.removeStoredToken(identifier);
} catch (Exception e) {
if (!shouldIgnoreException(e)) {
LOG.error("Error in removing RMDelegationToken with sequence number: {}",
identifier.getSequenceNumber());
ExitUtil.terminate(1, e);
}
}
} | 3.68 |
flink_Schema_fromSchema | /** Adopts all members from the given unresolved schema. */
public Builder fromSchema(Schema unresolvedSchema) {
columns.addAll(unresolvedSchema.columns);
watermarkSpecs.addAll(unresolvedSchema.watermarkSpecs);
if (unresolvedSchema.primaryKey != null) {
primaryKeyNamed(
unresolvedSchema.primaryKey.getConstraintName(),
unresolvedSchema.primaryKey.getColumnNames());
}
return this;
} | 3.68 |
hbase_HFileBlockIndex_shouldWriteBlock | /**
* Whether there is an inline block ready to be written. In general, we write an leaf-level
* index block as an inline block as soon as its size as serialized in the non-root format
* reaches a certain threshold.
*/
@Override
public boolean shouldWriteBlock(boolean closing) {
if (singleLevelOnly) {
throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
}
if (curInlineChunk == null) {
throw new IllegalStateException("curInlineChunk is null; has shouldWriteBlock been "
+ "called with closing=true and then called again?");
}
if (curInlineChunk.getNumEntries() == 0) {
return false;
}
// We do have some entries in the current inline chunk.
if (closing) {
if (rootChunk.getNumEntries() == 0) {
// We did not add any leaf-level blocks yet. Instead of creating a
// leaf level with one block, move these entries to the root level.
expectNumLevels(1);
rootChunk = curInlineChunk;
curInlineChunk = null; // Disallow adding any more index entries.
return false;
}
return true;
} else {
return curInlineChunk.getNonRootSize() >= maxChunkSize;
}
} | 3.68 |
hbase_CompositeImmutableSegment_tailSet | /**
* Returns a subset of the segment cell set, which starts with the given cell
* @param firstCell a cell in the segment
* @return a subset of the segment cell set, which starts with the given cell
*/
@Override
protected SortedSet<Cell> tailSet(Cell firstCell) {
throw new IllegalStateException("Not supported by CompositeImmutableScanner");
} | 3.68 |
hbase_ClientMetaTableAccessor_getTableStartRowForMeta | /** Returns start row for scanning META according to query type */
public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type) {
if (tableName == null) {
return null;
}
switch (type) {
case REGION:
case REPLICATION: {
byte[] startRow = new byte[tableName.getName().length + 2];
System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length);
startRow[startRow.length - 2] = HConstants.DELIMITER;
startRow[startRow.length - 1] = HConstants.DELIMITER;
return startRow;
}
case ALL:
case TABLE:
default: {
return tableName.getName();
}
}
} | 3.68 |
hbase_ReplicationSink_getSinkMetrics | /**
* Get replication Sink Metrics
*/
public MetricsSink getSinkMetrics() {
return this.metrics;
} | 3.68 |
hbase_MultiByteBuff_mark | /**
* Marks the current position of the MBB
* @return this object
*/
@Override
public MultiByteBuff mark() {
checkRefCount();
this.markedItemIndex = this.curItemIndex;
this.curItem.mark();
return this;
} | 3.68 |
hadoop_Server_getStatus | /**
* Returns the current server status.
*
* @return the current server status.
*/
public Status getStatus() {
return status;
} | 3.68 |
AreaShop_RegionGroup_getWorlds | /**
* Get all worlds from which regions are added automatically.
* @return A list with the names of all worlds (immutable)
*/
public Set<String> getWorlds() {
return new HashSet<>(worlds);
} | 3.68 |
framework_TabSheet_updateSelection | /**
* Checks if the current selection is valid, and updates the selection if
* the previously selected component is not visible and enabled. The first
* visible and enabled tab is selected if the current selection is empty or
* invalid.
*
* This method does not fire tab change events, but the caller should do so
* if appropriate.
*
* @return true if selection was changed, false otherwise
*/
private boolean updateSelection() {
Component originalSelection = selected;
for (final Iterator<Component> i = iterator(); i.hasNext();) {
final Component component = i.next();
Tab tab = tabs.get(component);
/*
* If we have no selection, if the current selection is invisible or
* if the current selection is disabled (but the whole component is
* not) we select this tab instead
*/
Tab selectedTabInfo = null;
if (selected != null) {
selectedTabInfo = tabs.get(selected);
}
if (selected == null || selectedTabInfo == null
|| !selectedTabInfo.isVisible()
|| !selectedTabInfo.isEnabled()) {
// The current selection is not valid so we need to change
// it
if (tab.isEnabled() && tab.isVisible()) {
setSelected(component);
break;
} else {
/*
* The current selection is not valid but this tab cannot be
* selected either.
*/
setSelected(null);
}
}
}
return originalSelection != selected;
} | 3.68 |
dubbo_NettyChannel_buildErrorResponse | /**
* build a bad request's response
*
* @param request the request
* @param t the throwable. In most cases, serialization fails.
* @return the response
*/
private static Response buildErrorResponse(Request request, Throwable t) {
Response response = new Response(request.getId(), request.getVersion());
if (t instanceof EncoderException) {
response.setStatus(Response.SERIALIZATION_ERROR);
} else {
response.setStatus(Response.BAD_REQUEST);
}
response.setErrorMessage(StringUtils.toString(t));
return response;
} | 3.68 |
pulsar_BrokerLoadData_updateSystemResourceUsage | // Update resource usage given each individual usage.
private void updateSystemResourceUsage(final ResourceUsage cpu, final ResourceUsage memory,
final ResourceUsage directMemory, final ResourceUsage bandwidthIn,
final ResourceUsage bandwidthOut) {
this.cpu = cpu;
this.memory = memory;
this.directMemory = directMemory;
this.bandwidthIn = bandwidthIn;
this.bandwidthOut = bandwidthOut;
} | 3.68 |
framework_PushConfiguration_setTransport | /*
* (non-Javadoc)
*
* @see
* com.vaadin.ui.PushConfiguration#setTransport(com.vaadin.shared.ui.ui.
* Transport)
*/
@Override
public void setTransport(Transport transport) {
if (transport == Transport.WEBSOCKET_XHR) {
getState().alwaysUseXhrForServerRequests = true;
// Atmosphere knows only about "websocket"
setParameter(PushConfigurationState.TRANSPORT_PARAM,
Transport.WEBSOCKET.getIdentifier());
} else {
getState().alwaysUseXhrForServerRequests = false;
setParameter(PushConfigurationState.TRANSPORT_PARAM,
transport.getIdentifier());
}
} | 3.68 |
zilla_WsServerFactory_assembleHeader | // @return no bytes consumed to assemble websocket header
private int assembleHeader(
DirectBuffer buffer,
int offset,
int length)
{
int remaining = Math.min(length, MAXIMUM_HEADER_SIZE - headerLength);
// may copy more than actual header length (up to max header length), but will adjust at the end
header.putBytes(headerLength, buffer, offset, remaining);
int consumed = remaining;
if (headerLength + remaining >= 2)
{
int wsHeaderLength = wsHeaderLength(header);
// eventual headLength must not be more than wsHeaderLength
if (headerLength + remaining > wsHeaderLength)
{
consumed = wsHeaderLength - headerLength;
}
}
headerLength += consumed;
return consumed;
} | 3.68 |
hbase_MasterObserver_preTruncateRegionAction | /**
* Called before the region is truncated.
* @param c The environment to interact with the framework and master
* @param regionInfo The Region being truncated
*/
@SuppressWarnings("unused")
default void preTruncateRegionAction(final ObserverContext<MasterCoprocessorEnvironment> c,
final RegionInfo regionInfo) {
} | 3.68 |
morf_Oracle_getXADataSource | /**
* Returns an Oracle XA data source. Note that this method may fail at
* run-time if {@code OracleXADataSource} is not available on the classpath.
*
* @throws IllegalStateException If the data source cannot be created.
*
* @see org.alfasoftware.morf.jdbc.DatabaseType#getXADataSource(java.lang.String,
* java.lang.String, java.lang.String)
*/
@Override
public XADataSource getXADataSource(String jdbcUrl, String username, String password) {
try {
log.info("Initialising Oracle XA data source...");
XADataSource dataSource = (XADataSource) Class.forName("oracle.jdbc.xa.client.OracleXADataSource").newInstance();
dataSource.getClass().getMethod("setURL", String.class).invoke(dataSource, jdbcUrl);
dataSource.getClass().getMethod("setUser", String.class).invoke(dataSource, username);
dataSource.getClass().getMethod("setPassword", String.class).invoke(dataSource, password);
return dataSource;
} catch (Exception e) {
throw new IllegalStateException("Failed to create Oracle XA data source", e);
}
} | 3.68 |
hbase_HMaster_finishActiveMasterInitialization | /**
* Finish initialization of HMaster after becoming the primary master.
* <p/>
* The startup order is a bit complicated but very important, do not change it unless you know
* what you are doing.
* <ol>
* <li>Initialize file system based components - file system manager, wal manager, table
* descriptors, etc</li>
* <li>Publish cluster id</li>
* <li>Here comes the most complicated part - initialize server manager, assignment manager and
* region server tracker
* <ol type='i'>
* <li>Create server manager</li>
* <li>Create master local region</li>
* <li>Create procedure executor, load the procedures, but do not start workers. We will start it
* later after we finish scheduling SCPs to avoid scheduling duplicated SCPs for the same
* server</li>
* <li>Create assignment manager and start it, load the meta region state, but do not load data
* from meta region</li>
* <li>Start region server tracker, construct the online servers set and find out dead servers and
* schedule SCP for them. The online servers will be constructed by scanning zk, and we will also
* scan the wal directory and load from master local region to find out possible live region
* servers, and the differences between these two sets are the dead servers</li>
* </ol>
* </li>
* <li>If this is a new deploy, schedule a InitMetaProcedure to initialize meta</li>
* <li>Start necessary service threads - balancer, catalog janitor, executor services, and also
* the procedure executor, etc. Notice that the balancer must be created first as assignment
* manager may use it when assigning regions.</li>
* <li>Wait for meta to be initialized if necessary, start table state manager.</li>
* <li>Wait for enough region servers to check-in</li>
* <li>Let assignment manager load data from meta and construct region states</li>
* <li>Start all other things such as chore services, etc</li>
* </ol>
* <p/>
* Notice that now we will not schedule a special procedure to make meta online(unless the first
* time where meta has not been created yet), we will rely on SCP to bring meta online.
*/
private void finishActiveMasterInitialization() throws IOException, InterruptedException,
KeeperException, ReplicationException, DeserializationException {
/*
* We are active master now... go initialize components we need to run.
*/
startupTaskGroup.addTask("Initializing Master file system");
this.masterActiveTime = EnvironmentEdgeManager.currentTime();
// TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
// always initialize the MemStoreLAB as we use a region to store data in master now, see
// localStore.
initializeMemStoreChunkCreator(null);
this.fileSystemManager = new MasterFileSystem(conf);
this.walManager = new MasterWalManager(this);
// warm-up HTDs cache on master initialization
if (preLoadTableDescriptors) {
startupTaskGroup.addTask("Pre-loading table descriptors");
this.tableDescriptors.getAll();
}
// Publish cluster ID; set it in Master too. The superclass RegionServer does this later but
// only after it has checked in with the Master. At least a few tests ask Master for clusterId
// before it has called its run method and before RegionServer has done the reportForDuty.
ClusterId clusterId = fileSystemManager.getClusterId();
startupTaskGroup.addTask("Publishing Cluster ID " + clusterId + " in ZooKeeper");
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
this.clusterId = clusterId.toString();
// Precaution. Put in place the old hbck1 lock file to fence out old hbase1s running their
// hbck1s against an hbase2 cluster; it could do damage. To skip this behavior, set
// hbase.write.hbck1.lock.file to false.
if (this.conf.getBoolean("hbase.write.hbck1.lock.file", true)) {
Pair<Path, FSDataOutputStream> result = null;
try {
result = HBaseFsck.checkAndMarkRunningHbck(this.conf,
HBaseFsck.createLockRetryCounterFactory(this.conf).create());
} finally {
if (result != null) {
Closeables.close(result.getSecond(), true);
}
}
}
startupTaskGroup.addTask("Initialize ServerManager and schedule SCP for crash servers");
// The below two managers must be created before loading procedures, as they will be used during
// loading.
// initialize master local region
masterRegion = MasterRegionFactory.create(this);
rsListStorage = new MasterRegionServerList(masterRegion, this);
this.serverManager = createServerManager(this, rsListStorage);
this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this);
if (
!conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)
) {
this.splitWALManager = new SplitWALManager(this);
}
tryMigrateMetaLocationsFromZooKeeper();
createProcedureExecutor();
Map<Class<?>, List<Procedure<MasterProcedureEnv>>> procsByType = procedureExecutor
.getActiveProceduresNoCopy().stream().collect(Collectors.groupingBy(p -> p.getClass()));
// Create Assignment Manager
this.assignmentManager = createAssignmentManager(this, masterRegion);
this.assignmentManager.start();
// TODO: TRSP can perform as the sub procedure for other procedures, so even if it is marked as
// completed, it could still be in the procedure list. This is a bit strange but is another
// story, need to verify the implementation for ProcedureExecutor and ProcedureStore.
List<TransitRegionStateProcedure> ritList =
procsByType.getOrDefault(TransitRegionStateProcedure.class, Collections.emptyList()).stream()
.filter(p -> !p.isFinished()).map(p -> (TransitRegionStateProcedure) p)
.collect(Collectors.toList());
this.assignmentManager.setupRIT(ritList);
// Start RegionServerTracker with listing of servers found with exiting SCPs -- these should
// be registered in the deadServers set -- and the servernames loaded from the WAL directory
// and master local region that COULD BE 'alive'(we'll schedule SCPs for each and let SCP figure
// it out).
// We also pass dirs that are already 'splitting'... so we can do some checks down in tracker.
// TODO: Generate the splitting and live Set in one pass instead of two as we currently do.
this.regionServerTracker.upgrade(
procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream()
.map(p -> (ServerCrashProcedure) p).map(p -> p.getServerName()).collect(Collectors.toSet()),
Sets.union(rsListStorage.getAll(), walManager.getLiveServersFromWALDir()),
walManager.getSplittingServersFromWALDir());
// This manager must be accessed AFTER hbase:meta is confirmed on line..
this.tableStateManager = new TableStateManager(this);
startupTaskGroup.addTask("Initializing ZK system trackers");
initializeZKBasedSystemTrackers();
startupTaskGroup.addTask("Loading last flushed sequence id of regions");
try {
this.serverManager.loadLastFlushedSequenceIds();
} catch (IOException e) {
LOG.info("Failed to load last flushed sequence id of regions" + " from file system", e);
}
// Set ourselves as active Master now our claim has succeeded up in zk.
this.activeMaster = true;
// Start the Zombie master detector after setting master as active, see HBASE-21535
Thread zombieDetector = new Thread(new MasterInitializationMonitor(this),
"ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime());
zombieDetector.setDaemon(true);
zombieDetector.start();
if (!maintenanceMode) {
startupTaskGroup.addTask("Initializing master coprocessors");
setQuotasObserver(conf);
initializeCoprocessorHost(conf);
} else {
// start an in process region server for carrying system regions
maintenanceRegionServer =
JVMClusterUtil.createRegionServerThread(getConfiguration(), HRegionServer.class, 0);
maintenanceRegionServer.start();
}
// Checking if meta needs initializing.
startupTaskGroup.addTask("Initializing meta table if this is a new deploy");
InitMetaProcedure initMetaProc = null;
// Print out state of hbase:meta on startup; helps debugging.
if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) {
Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream()
.filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny();
initMetaProc = optProc.orElseGet(() -> {
// schedule an init meta procedure if meta has not been deployed yet
InitMetaProcedure temp = new InitMetaProcedure();
procedureExecutor.submitProcedure(temp);
return temp;
});
}
// initialize load balancer
this.balancer.setMasterServices(this);
this.balancer.initialize();
this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
// try migrate replication data
ZKReplicationQueueStorageForMigration oldReplicationQueueStorage =
new ZKReplicationQueueStorageForMigration(zooKeeper, conf);
// check whether there are something to migrate and we haven't scheduled a migration procedure
// yet
if (
oldReplicationQueueStorage.hasData() && procedureExecutor.getProcedures().stream()
.allMatch(p -> !(p instanceof MigrateReplicationQueueFromZkToTableProcedure))
) {
procedureExecutor.submitProcedure(new MigrateReplicationQueueFromZkToTableProcedure());
}
// start up all service threads.
startupTaskGroup.addTask("Initializing master service threads");
startServiceThreads();
// wait meta to be initialized after we start procedure executor
if (initMetaProc != null) {
initMetaProc.await();
}
// Wake up this server to check in
sleeper.skipSleepCycle();
// Wait for region servers to report in.
// With this as part of master initialization, it precludes our being able to start a single
// server that is both Master and RegionServer. Needs more thought. TODO.
String statusStr = "Wait for region servers to report in";
MonitoredTask waitRegionServer = startupTaskGroup.addTask(statusStr);
LOG.info(Objects.toString(waitRegionServer));
waitForRegionServers(waitRegionServer);
// Check if master is shutting down because issue initializing regionservers or balancer.
if (isStopped()) {
return;
}
startupTaskGroup.addTask("Starting assignment manager");
// FIRST HBASE:META READ!!!!
// The below cannot make progress w/o hbase:meta being online.
// This is the FIRST attempt at going to hbase:meta. Meta on-lining is going on in background
// as procedures run -- in particular SCPs for crashed servers... One should put up hbase:meta
// if it is down. It may take a while to come online. So, wait here until meta if for sure
// available. That's what waitForMetaOnline does.
if (!waitForMetaOnline()) {
return;
}
TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME);
final ColumnFamilyDescriptor tableFamilyDesc =
metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY);
final ColumnFamilyDescriptor replBarrierFamilyDesc =
metaDescriptor.getColumnFamily(HConstants.REPLICATION_BARRIER_FAMILY);
this.assignmentManager.joinCluster();
// The below depends on hbase:meta being online.
this.assignmentManager.processOfflineRegions();
// this must be called after the above processOfflineRegions to prevent race
this.assignmentManager.wakeMetaLoadedEvent();
// for migrating from a version without HBASE-25099, and also for honoring the configuration
// first.
if (conf.get(HConstants.META_REPLICAS_NUM) != null) {
int replicasNumInConf =
conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM);
TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME);
if (metaDesc.getRegionReplication() != replicasNumInConf) {
// it is possible that we already have some replicas before upgrading, so we must set the
// region replication number in meta TableDescriptor directly first, without creating a
// ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas.
int existingReplicasCount =
assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size();
if (existingReplicasCount > metaDesc.getRegionReplication()) {
LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)"
+ " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount);
metaDesc = TableDescriptorBuilder.newBuilder(metaDesc)
.setRegionReplication(existingReplicasCount).build();
tableDescriptors.update(metaDesc);
}
// check again, and issue a ModifyTableProcedure if needed
if (metaDesc.getRegionReplication() != replicasNumInConf) {
LOG.info(
"The {} config is {} while the replica count in TableDescriptor is {}"
+ " for hbase:meta, altering...",
HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication());
procedureExecutor.submitProcedure(new ModifyTableProcedure(
procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc)
.setRegionReplication(replicasNumInConf).build(),
null, metaDesc, false, true));
}
}
}
// Initialize after meta is up as below scans meta
FavoredNodesManager fnm = getFavoredNodesManager();
if (fnm != null) {
fnm.initializeFromMeta();
}
// set cluster status again after user regions are assigned
this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
// Start balancer and meta catalog janitor after meta and regions have been assigned.
startupTaskGroup.addTask("Starting balancer and catalog janitor");
this.clusterStatusChore = new ClusterStatusChore(this, balancer);
getChoreService().scheduleChore(clusterStatusChore);
this.balancerChore = new BalancerChore(this);
if (!disableBalancerChoreForTest) {
getChoreService().scheduleChore(balancerChore);
}
if (regionNormalizerManager != null) {
getChoreService().scheduleChore(regionNormalizerManager.getRegionNormalizerChore());
}
this.catalogJanitorChore = new CatalogJanitor(this);
getChoreService().scheduleChore(catalogJanitorChore);
this.hbckChore = new HbckChore(this);
getChoreService().scheduleChore(hbckChore);
this.serverManager.startChore();
// Only for rolling upgrade, where we need to migrate the data in namespace table to meta table.
if (!waitForNamespaceOnline()) {
return;
}
startupTaskGroup.addTask("Starting cluster schema service");
try {
initClusterSchemaService();
} catch (IllegalStateException e) {
if (
e.getCause() != null && e.getCause() instanceof NoSuchColumnFamilyException
&& tableFamilyDesc == null && replBarrierFamilyDesc == null
) {
LOG.info("ClusterSchema service could not be initialized. This is "
+ "expected during HBase 1 to 2 upgrade", e);
} else {
throw e;
}
}
if (this.cpHost != null) {
try {
this.cpHost.preMasterInitialization();
} catch (IOException e) {
LOG.error("Coprocessor preMasterInitialization() hook failed", e);
}
}
LOG.info(String.format("Master has completed initialization %.3fsec",
(EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f));
this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime();
configurationManager.registerObserver(this.balancer);
configurationManager.registerObserver(this.logCleanerPool);
configurationManager.registerObserver(this.logCleaner);
configurationManager.registerObserver(this.regionsRecoveryConfigManager);
configurationManager.registerObserver(this.exclusiveHFileCleanerPool);
if (this.sharedHFileCleanerPool != null) {
configurationManager.registerObserver(this.sharedHFileCleanerPool);
}
if (this.hfileCleaners != null) {
for (HFileCleaner cleaner : hfileCleaners) {
configurationManager.registerObserver(cleaner);
}
}
// Set master as 'initialized'.
setInitialized(true);
startupTaskGroup.markComplete("Initialization successful");
MonitoredTask status =
TaskMonitor.get().createStatus("Progress after master initialized", false, true);
if (tableFamilyDesc == null && replBarrierFamilyDesc == null) {
// create missing CFs in meta table after master is set to 'initialized'.
createMissingCFsInMetaDuringUpgrade(metaDescriptor);
// Throwing this Exception to abort active master is painful but this
// seems the only way to add missing CFs in meta while upgrading from
// HBase 1 to 2 (where HBase 2 has HBASE-23055 & HBASE-23782 checked-in).
// So, why do we abort active master after adding missing CFs in meta?
// When we reach here, we would have already bypassed NoSuchColumnFamilyException
// in initClusterSchemaService(), meaning ClusterSchemaService is not
// correctly initialized but we bypassed it. Similarly, we bypassed
// tableStateManager.start() as well. Hence, we should better abort
// current active master because our main task - adding missing CFs
// in meta table is done (possible only after master state is set as
// initialized) at the expense of bypassing few important tasks as part
// of active master init routine. So now we abort active master so that
// next active master init will not face any issues and all mandatory
// services will be started during master init phase.
throw new PleaseRestartMasterException("Aborting active master after missing"
+ " CFs are successfully added in meta. Subsequent active master "
+ "initialization should be uninterrupted");
}
if (maintenanceMode) {
LOG.info("Detected repair mode, skipping final initialization steps.");
return;
}
assignmentManager.checkIfShouldMoveSystemRegionAsync();
status.setStatus("Starting quota manager");
initQuotaManager();
if (QuotaUtil.isQuotaEnabled(conf)) {
// Create the quota snapshot notifier
spaceQuotaSnapshotNotifier = createQuotaSnapshotNotifier();
spaceQuotaSnapshotNotifier.initialize(getConnection());
this.quotaObserverChore = new QuotaObserverChore(this, getMasterMetrics());
// Start the chore to read the region FS space reports and act on them
getChoreService().scheduleChore(quotaObserverChore);
this.snapshotQuotaChore = new SnapshotQuotaObserverChore(this, getMasterMetrics());
// Start the chore to read snapshots and add their usage to table/NS quotas
getChoreService().scheduleChore(snapshotQuotaChore);
}
final SlowLogMasterService slowLogMasterService = new SlowLogMasterService(conf, this);
slowLogMasterService.init();
WALEventTrackerTableCreator.createIfNeededAndNotExists(conf, this);
// Create REPLICATION.SINK_TRACKER table if needed.
ReplicationSinkTrackerTableCreator.createIfNeededAndNotExists(conf, this);
// clear the dead servers with same host name and port of online server because we are not
// removing dead server with same hostname and port of rs which is trying to check in before
// master initialization. See HBASE-5916.
this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
// Check and set the znode ACLs if needed in case we are overtaking a non-secure configuration
status.setStatus("Checking ZNode ACLs");
zooKeeper.checkAndSetZNodeAcls();
status.setStatus("Initializing MOB Cleaner");
initMobCleaner();
// delete the stale data for replication sync up tool if necessary
status.setStatus("Cleanup ReplicationSyncUp status if necessary");
Path replicationSyncUpInfoFile =
new Path(new Path(dataRootDir, ReplicationSyncUp.INFO_DIR), ReplicationSyncUp.INFO_FILE);
if (dataFs.exists(replicationSyncUpInfoFile)) {
// info file is available, load the timestamp and use it to clean up stale data in replication
// queue storage.
byte[] data;
try (FSDataInputStream in = dataFs.open(replicationSyncUpInfoFile)) {
data = ByteStreams.toByteArray(in);
}
ReplicationSyncUpToolInfo info = null;
try {
info = JsonMapper.fromJson(Bytes.toString(data), ReplicationSyncUpToolInfo.class);
} catch (JsonParseException e) {
// usually this should be a partial file, which means the ReplicationSyncUp tool did not
// finish properly, so not a problem. Here we do not clean up the status as we do not know
// the reason why the tool did not finish properly, so let users clean the status up
// manually
LOG.warn("failed to parse replication sync up info file, ignore and continue...", e);
}
if (info != null) {
LOG.info("Remove last sequence ids and hfile references which are written before {}({})",
info.getStartTimeMs(), DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.systemDefault())
.format(Instant.ofEpochMilli(info.getStartTimeMs())));
replicationPeerManager.getQueueStorage()
.removeLastSequenceIdsAndHFileRefsBefore(info.getStartTimeMs());
// delete the file after removing the stale data, so next time we do not need to do this
// again.
dataFs.delete(replicationSyncUpInfoFile, false);
}
}
status.setStatus("Calling postStartMaster coprocessors");
if (this.cpHost != null) {
// don't let cp initialization errors kill the master
try {
this.cpHost.postStartMaster();
} catch (IOException ioe) {
LOG.error("Coprocessor postStartMaster() hook failed", ioe);
}
}
zombieDetector.interrupt();
/*
* After master has started up, lets do balancer post startup initialization. Since this runs in
* activeMasterManager thread, it should be fine.
*/
long start = EnvironmentEdgeManager.currentTime();
this.balancer.postMasterStartupInitialize();
if (LOG.isDebugEnabled()) {
LOG.debug("Balancer post startup initialization complete, took "
+ ((EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds");
}
this.rollingUpgradeChore = new RollingUpgradeChore(this);
getChoreService().scheduleChore(rollingUpgradeChore);
status.markComplete("Progress after master initialized complete");
} | 3.68 |
pulsar_ClientCnxIdleState_isReleasing | /**
* @return Whether this connection is in idle and will be released soon.
*/
public boolean isReleasing() {
return getIdleStat() == State.RELEASING;
} | 3.68 |
Subsets and Splits