name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
graphhopper_OSMReaderConfig_setLongEdgeSamplingDistance | /**
* Sets the distance between elevation samples on long edges
*/
public OSMReaderConfig setLongEdgeSamplingDistance(double longEdgeSamplingDistance) {
this.longEdgeSamplingDistance = longEdgeSamplingDistance;
return this;
} | 3.68 |
flink_MemoryManager_create | /**
* Creates a memory manager with the given capacity and given page size.
*
* <p>This is a production version of MemoryManager which checks for memory leaks ({@link
* #verifyEmpty()}) once the owner of the MemoryManager is ready to dispose.
*
* @param memorySize The total size of the off-heap memory to be managed by this memory manager.
* @param pageSize The size of the pages handed out by the memory manager.
*/
public static MemoryManager create(long memorySize, int pageSize) {
return new MemoryManager(memorySize, pageSize);
} | 3.68 |
framework_Button_fireClick | /**
* Fires a click event to all listeners.
*
* @param details
* MouseEventDetails from which keyboard modifiers and other
* information about the mouse click can be obtained. If the
* button was clicked by a keyboard event, some of the fields may
* be empty/undefined.
*/
protected void fireClick(MouseEventDetails details) {
fireEvent(new Button.ClickEvent(this, details));
} | 3.68 |
hudi_HoodieLogFormatWriter_getLogBlockLength | /**
* This method returns the total LogBlock Length which is the sum of 1. Number of bytes to write version 2. Number of
* bytes to write ordinal 3. Length of the headers 4. Number of bytes used to write content length 5. Length of the
* content 6. Length of the footers 7. Number of bytes to write totalLogBlockLength
*/
private int getLogBlockLength(int contentLength, int headerLength, int footerLength) {
return Integer.BYTES + // Number of bytes to write version
Integer.BYTES + // Number of bytes to write ordinal
headerLength + // Length of the headers
Long.BYTES + // Number of bytes used to write content length
contentLength + // Length of the content
footerLength + // Length of the footers
Long.BYTES; // bytes to write totalLogBlockLength at end of block (for reverse ptr)
} | 3.68 |
framework_Button_setDisableOnClick | /**
* Determines if a button is automatically disabled when clicked. If this is
* set to true the button will be automatically disabled when clicked,
* typically to prevent (accidental) extra clicks on a button.
* <p>
* Note that this is only used when the click comes from the user, not when
* calling {@link #click()} method programmatically. Also, if developer
* wants to re-enable the button, it needs to be done programmatically.
* </p>
*
* @param disableOnClick
* true to disable button when it is clicked, false otherwise
*/
public void setDisableOnClick(boolean disableOnClick) {
getState().disableOnClick = disableOnClick;
} | 3.68 |
hbase_AccessControlUtil_getUserPermissions | /**
* A utility used to get permissions for selected namespace based on the specified user name.
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
* @param namespace name of the namespace
* @param userName User name, if empty then all user permissions will be retrieved.
* @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List<UserPermission> getUserPermissions(RpcController controller,
AccessControlService.BlockingInterface protocol, byte[] namespace, String userName)
throws ServiceException {
AccessControlProtos.GetUserPermissionsRequest.Builder builder =
AccessControlProtos.GetUserPermissionsRequest.newBuilder();
if (namespace != null) {
builder.setNamespaceName(UnsafeByteOperations.unsafeWrap(namespace));
}
if (!StringUtils.isEmpty(userName)) {
builder.setUserName(ByteString.copyFromUtf8(userName));
}
builder.setType(AccessControlProtos.Permission.Type.Namespace);
AccessControlProtos.GetUserPermissionsRequest request = builder.build();
AccessControlProtos.GetUserPermissionsResponse response =
protocol.getUserPermissions(controller, request);
List<UserPermission> perms = new ArrayList<>(response.getUserPermissionCount());
for (AccessControlProtos.UserPermission perm : response.getUserPermissionList()) {
perms.add(toUserPermission(perm));
}
return perms;
} | 3.68 |
hudi_CompactionStrategy_orderAndFilter | /**
* Order and Filter the list of compactions. Use the metrics captured with the captureMetrics to order and filter out
* compactions
*
* @param writeConfig config for this compaction is passed in
* @param operations list of compactions collected
* @param pendingCompactionPlans Pending Compaction Plans for strategy to schedule next compaction plan
* @return list of compactions to perform in this run
*/
public List<HoodieCompactionOperation> orderAndFilter(HoodieWriteConfig writeConfig,
List<HoodieCompactionOperation> operations, List<HoodieCompactionPlan> pendingCompactionPlans) {
return operations;
} | 3.68 |
hudi_ClusteringPlanStrategy_getExtraMetadata | /**
* Returns any specific parameters to be stored as part of clustering metadata.
*/
protected Map<String, String> getExtraMetadata() {
return Collections.emptyMap();
} | 3.68 |
hbase_AbstractFSWAL_getFileNumFromFileName | /**
* A log file has a creation timestamp (in ms) in its file name ({@link #filenum}. This helper
* method returns the creation timestamp from a given log file. It extracts the timestamp assuming
* the filename is created with the {@link #computeFilename(long filenum)} method.
* @return timestamp, as in the log file name.
*/
protected long getFileNumFromFileName(Path fileName) {
checkNotNull(fileName, "file name can't be null");
if (!ourFiles.accept(fileName)) {
throw new IllegalArgumentException(
"The log file " + fileName + " doesn't belong to this WAL. (" + toString() + ")");
}
final String fileNameString = fileName.toString();
String chompedPath = fileNameString.substring(prefixPathStr.length(),
(fileNameString.length() - walFileSuffix.length()));
return Long.parseLong(chompedPath);
} | 3.68 |
framework_VAbstractOrderedLayout_setHeight | /**
* {@inheritDoc}
*/
@Override
public void setHeight(String height) {
super.setHeight(height);
definedHeight = height != null && !height.isEmpty();
} | 3.68 |
flink_RocksDBIncrementalRestoreOperation_restore | /** Root method that branches for different implementations of {@link KeyedStateHandle}. */
@Override
public RocksDBRestoreResult restore() throws Exception {
if (restoreStateHandles == null || restoreStateHandles.isEmpty()) {
return null;
}
final KeyedStateHandle theFirstStateHandle = restoreStateHandles.iterator().next();
boolean isRescaling =
(restoreStateHandles.size() > 1
|| !Objects.equals(theFirstStateHandle.getKeyGroupRange(), keyGroupRange));
if (isRescaling) {
restoreWithRescaling(restoreStateHandles);
} else {
restoreWithoutRescaling(theFirstStateHandle);
}
return new RocksDBRestoreResult(
this.rocksHandle.getDb(),
this.rocksHandle.getDefaultColumnFamilyHandle(),
this.rocksHandle.getNativeMetricMonitor(),
lastCompletedCheckpointId,
backendUID,
restoredSstFiles);
} | 3.68 |
framework_StreamResource_setBufferSize | /**
* Sets the size of the download buffer used for this resource.
*
* @param bufferSize
* the size of the buffer in bytes.
*/
public void setBufferSize(int bufferSize) {
this.bufferSize = bufferSize;
} | 3.68 |
hadoop_DataJoinJob_runJob | /**
* Submit/run a map/reduce job.
*
* @param job
* @return true for success
* @throws IOException
*/
public static boolean runJob(JobConf job) throws IOException {
JobClient jc = new JobClient(job);
boolean sucess = true;
RunningJob running = null;
try {
running = jc.submitJob(job);
JobID jobId = running.getID();
System.out.println("Job " + jobId + " is submitted");
while (!running.isComplete()) {
System.out.println("Job " + jobId + " is still running.");
try {
Thread.sleep(60000);
} catch (InterruptedException e) {
}
running = jc.getJob(jobId);
}
sucess = running.isSuccessful();
} finally {
if (!sucess && (running != null)) {
running.killJob();
}
jc.close();
}
return sucess;
} | 3.68 |
hbase_HFileReaderImpl_getGeneralBloomFilterMetadata | /**
* Returns a buffer with the Bloom filter metadata. The caller takes ownership of the buffer.
*/
@Override
public DataInput getGeneralBloomFilterMetadata() throws IOException {
return this.getBloomFilterMetadata(BlockType.GENERAL_BLOOM_META);
} | 3.68 |
hibernate-validator_INNValidator_checkChecksumJuridicalINN | /**
* Check the digits for juridical INN using algorithm from
* <a href="https://ru.wikipedia.org/wiki/%D0%98%D0%B4%D0%B5%D0%BD%D1%82%D0%B8%D1%84%D0%B8%D0%BA%D0%B0%D1%86%D0%B8%D0%BE%D0%BD%D0%BD%D1%8B%D0%B9_%D0%BD%D0%BE%D0%BC%D0%B5%D1%80_%D0%BD%D0%B0%D0%BB%D0%BE%D0%B3%D0%BE%D0%BF%D0%BB%D0%B0%D1%82%D0%B5%D0%BB%D1%8C%D1%89%D0%B8%D0%BA%D0%B0#%D0%92%D1%8B%D1%87%D0%B8%D1%81%D0%BB%D0%B5%D0%BD%D0%B8%D0%B5_%D0%BA%D0%BE%D0%BD%D1%82%D1%80%D0%BE%D0%BB%D1%8C%D0%BD%D1%8B%D1%85_%D1%86%D0%B8%D1%84%D1%80">Wikipedia</a>.
*/
private static boolean checkChecksumJuridicalINN(int[] digits) {
final int checkSum = getCheckSum( digits, JURIDICAL_WEIGHTS );
return digits[digits.length - 1] == checkSum;
} | 3.68 |
flink_Catalog_getTableFactory | /**
* Get an optional {@link TableFactory} instance that's responsible for generating table-related
* instances stored in this catalog, instances such as source/sink.
*
* @return an optional TableFactory instance
* @deprecated Use {@link #getFactory()} for the new factory stack. The new factory stack uses
* the new table sources and sinks defined in FLIP-95 and a slightly different discovery
* mechanism.
*/
@Deprecated
default Optional<TableFactory> getTableFactory() {
return Optional.empty();
} | 3.68 |
hbase_TableBackupClient_failBackup | /**
* Fail the overall backup.
* @param backupInfo backup info
* @param e exception
* @throws IOException exception
*/
protected void failBackup(Connection conn, BackupInfo backupInfo, BackupManager backupManager,
Exception e, String msg, BackupType type, Configuration conf) throws IOException {
try {
LOG.error(msg + getMessage(e), e);
// If this is a cancel exception, then we've already cleaned.
// set the failure timestamp of the overall backup
backupInfo.setCompleteTs(EnvironmentEdgeManager.currentTime());
// set failure message
backupInfo.setFailedMsg(e.getMessage());
// set overall backup status: failed
backupInfo.setState(BackupState.FAILED);
// compose the backup failed data
String backupFailedData = "BackupId=" + backupInfo.getBackupId() + ",startts="
+ backupInfo.getStartTs() + ",failedts=" + backupInfo.getCompleteTs() + ",failedphase="
+ backupInfo.getPhase() + ",failedmessage=" + backupInfo.getFailedMsg();
LOG.error(backupFailedData);
cleanupAndRestoreBackupSystem(conn, backupInfo, conf);
// If backup session is updated to FAILED state - means we
// processed recovery already.
backupManager.updateBackupInfo(backupInfo);
backupManager.finishBackupSession();
LOG.error("Backup " + backupInfo.getBackupId() + " failed.");
} catch (IOException ee) {
LOG.error("Please run backup repair tool manually to restore backup system integrity");
throw ee;
}
} | 3.68 |
framework_AbstractColorPicker_isHtmlContentAllowed | /**
* Return HTML rendering setting.
*
* @return <code>true</code> if the caption text is to be rendered as HTML,
* <code>false</code> otherwise
* @deprecated as of , use {@link #isCaptionAsHtml()} instead
*/
@Deprecated
public boolean isHtmlContentAllowed() {
return isCaptionAsHtml();
} | 3.68 |
druid_MySqlSelectIntoParser_parseIntoArgs | /**
* parser the select into arguments
*
* @return
*/
protected List<SQLExpr> parseIntoArgs() {
List<SQLExpr> args = new ArrayList<SQLExpr>();
if (lexer.token() == (Token.INTO)) {
accept(Token.INTO);
//lexer.nextToken();
for (; ; ) {
SQLExpr var = exprParser.primary();
if (var instanceof SQLIdentifierExpr) {
var = new SQLVariantRefExpr(
((SQLIdentifierExpr) var).getName());
}
args.add(var);
if (lexer.token() == Token.COMMA) {
accept(Token.COMMA);
continue;
} else {
break;
}
}
}
return args;
} | 3.68 |
hbase_PrivateCellUtil_isDeleteColumnOrFamily | /** Returns True if this cell is a delete family or column type. */
public static boolean isDeleteColumnOrFamily(Cell cell) {
int t = cell.getTypeByte();
return t == KeyValue.Type.DeleteColumn.getCode() || t == KeyValue.Type.DeleteFamily.getCode();
} | 3.68 |
hbase_AsyncConnection_getBufferedMutator | /**
* Retrieve an {@link AsyncBufferedMutator} for performing client-side buffering of writes.
* <p>
* The returned instance will use default configs. Use
* {@link #getBufferedMutatorBuilder(TableName, ExecutorService)} if you want to customize some
* configs.
* @param tableName the name of the table
* @param pool the thread pool to use for executing callback
* @return an {@link AsyncBufferedMutator} for the supplied tableName.
*/
default AsyncBufferedMutator getBufferedMutator(TableName tableName, ExecutorService pool) {
return getBufferedMutatorBuilder(tableName, pool).build();
} | 3.68 |
flink_StreamExecutionEnvironment_getTypeInfo | // Private helpers.
@SuppressWarnings("unchecked")
private <OUT, T extends TypeInformation<OUT>> T getTypeInfo(
Object source,
String sourceName,
Class<?> baseSourceClass,
TypeInformation<OUT> typeInfo) {
TypeInformation<OUT> resolvedTypeInfo = typeInfo;
if (resolvedTypeInfo == null && source instanceof ResultTypeQueryable) {
resolvedTypeInfo = ((ResultTypeQueryable<OUT>) source).getProducedType();
}
if (resolvedTypeInfo == null) {
try {
resolvedTypeInfo =
TypeExtractor.createTypeInfo(
baseSourceClass, source.getClass(), 0, null, null);
} catch (final InvalidTypesException e) {
resolvedTypeInfo = (TypeInformation<OUT>) new MissingTypeInfo(sourceName, e);
}
}
return (T) resolvedTypeInfo;
} | 3.68 |
hudi_CopyOnWriteInputFormat_getBlockIndexForPosition | /**
* Retrieves the index of the <tt>BlockLocation</tt> that contains the part of the file described by the given
* offset.
*
* @param blocks The different blocks of the file. Must be ordered by their offset.
* @param offset The offset of the position in the file.
* @param startIndex The earliest index to look at.
* @return The index of the block containing the given position.
*/
private int getBlockIndexForPosition(BlockLocation[] blocks, long offset, long halfSplitSize, int startIndex) {
// go over all indexes after the startIndex
for (int i = startIndex; i < blocks.length; i++) {
long blockStart = blocks[i].getOffset();
long blockEnd = blockStart + blocks[i].getLength();
if (offset >= blockStart && offset < blockEnd) {
// got the block where the split starts
// check if the next block contains more than this one does
if (i < blocks.length - 1 && blockEnd - offset < halfSplitSize) {
return i + 1;
} else {
return i;
}
}
}
throw new IllegalArgumentException("The given offset is not contained in the any block.");
} | 3.68 |
framework_VaadinSession_setLastRequestDuration | /**
* Sets the time spent servicing the last request in the session and updates
* the total time spent servicing requests in this session.
*
* @param time
* The time spent in the last request, in milliseconds.
*/
public void setLastRequestDuration(long time) {
assert hasLock();
lastRequestDuration = time;
cumulativeRequestDuration += time;
} | 3.68 |
pulsar_ManagedLedgerConfig_getClock | /**
* Get clock to use to time operations.
*
* @return a clock
*/
public Clock getClock() {
return clock;
} | 3.68 |
flink_AbstractInvokable_getEnvironment | /**
* Returns the environment of this task.
*
* @return The environment of this task.
*/
public final Environment getEnvironment() {
return this.environment;
} | 3.68 |
streampipes_DataSinkApi_subscribe | /**
* Subscribe to the input stream of the sink
*
* @param sink The data sink to subscribe to
* @param brokerConfigOverride Additional kafka settings which will override the default value (see docs)
* @param callback The callback where events will be received
*/
@Override
public ISubscription subscribe(DataSinkInvocation sink,
IBrokerConfigOverride brokerConfigOverride,
EventProcessor callback) {
return new SubscriptionManager(brokerConfigOverride,
sink.getInputStreams().get(0).getEventGrounding(), callback).subscribe();
} | 3.68 |
flink_KvStateRegistry_registerKvState | /**
* Registers the KvState instance and returns the assigned ID.
*
* @param jobId JobId the KvState instance belongs to
* @param jobVertexId JobVertexID the KvState instance belongs to
* @param keyGroupRange Key group range the KvState instance belongs to
* @param registrationName Name under which the KvState is registered
* @param kvState KvState instance to be registered
* @return Assigned KvStateID
*/
public KvStateID registerKvState(
JobID jobId,
JobVertexID jobVertexId,
KeyGroupRange keyGroupRange,
String registrationName,
InternalKvState<?, ?, ?> kvState,
ClassLoader userClassLoader) {
KvStateID kvStateId = new KvStateID();
if (registeredKvStates.putIfAbsent(kvStateId, new KvStateEntry<>(kvState, userClassLoader))
== null) {
final KvStateRegistryListener listener = getKvStateRegistryListener(jobId);
if (listener != null) {
listener.notifyKvStateRegistered(
jobId, jobVertexId, keyGroupRange, registrationName, kvStateId);
}
return kvStateId;
} else {
throw new IllegalStateException(
"State \""
+ registrationName
+ " \"(id="
+ kvStateId
+ ") appears registered although it should not.");
}
} | 3.68 |
MagicPlugin_BoundingBox_scaleFromBase | /**
* Scale this BoundingBox, but keep the min-Y value constant.
*
* <p>Useful for scaling entity AABB's.
*
* @return the scaled BB (this object)
*/
public BoundingBox scaleFromBase(double scale, double scaleY)
{
if (scale <= 0 || scale == 1) return this;
Vector center = this.center();
this.min.setX((this.min.getX() - center.getX()) * scale + center.getX());
// We just skip setting minY, scaling Y only upward
this.min.setZ((this.min.getZ() - center.getZ()) * scale + center.getZ());
this.max.setX((this.max.getX() - center.getX()) * scale + center.getX());
this.max.setY((this.max.getY() - center.getY()) * scaleY + center.getY());
this.max.setZ((this.max.getZ() - center.getZ()) * scale + center.getZ());
return this;
} | 3.68 |
hbase_TableSchemaModel_getColumns | /** Returns the columns */
@XmlElement(name = "ColumnSchema")
public List<ColumnSchemaModel> getColumns() {
return columns;
} | 3.68 |
hadoop_AbfsHttpOperation_getConnResponseCode | /**
* Gets the connection response code.
* @return response code.
* @throws IOException
*/
Integer getConnResponseCode() throws IOException {
return connection.getResponseCode();
} | 3.68 |
dubbo_ConfigUtils_loadProperties | /**
* Load properties file to {@link Properties} from class path.
*
* @param fileName properties file name. for example: <code>dubbo.properties</code>, <code>METE-INF/conf/foo.properties</code>
* @param allowMultiFile if <code>false</code>, throw {@link IllegalStateException} when found multi file on the class path.
* @param optional is optional. if <code>false</code>, log warn when properties config file not found!s
* @return loaded {@link Properties} content. <ul>
* <li>return empty Properties if no file found.
* <li>merge multi properties file if found multi file
* </ul>
* @throws IllegalStateException not allow multi-file, but multi-file exist on class path.
*/
public static Properties loadProperties(
Set<ClassLoader> classLoaders, String fileName, boolean allowMultiFile, boolean optional) {
Properties properties = new Properties();
// add scene judgement in windows environment Fix 2557
if (checkFileNameExist(fileName)) {
try {
FileInputStream input = new FileInputStream(fileName);
try {
properties.load(input);
} finally {
input.close();
}
} catch (Throwable e) {
logger.warn(
COMMON_IO_EXCEPTION,
"",
"",
"Failed to load " + fileName + " file from " + fileName + "(ignore this file): "
+ e.getMessage(),
e);
}
return properties;
}
Set<java.net.URL> set = null;
try {
List<ClassLoader> classLoadersToLoad = new LinkedList<>();
classLoadersToLoad.add(ClassUtils.getClassLoader());
classLoadersToLoad.addAll(classLoaders);
set = ClassLoaderResourceLoader.loadResources(fileName, classLoadersToLoad).values().stream()
.reduce(new LinkedHashSet<>(), (a, i) -> {
a.addAll(i);
return a;
});
} catch (Throwable t) {
logger.warn(COMMON_IO_EXCEPTION, "", "", "Fail to load " + fileName + " file: " + t.getMessage(), t);
}
if (CollectionUtils.isEmpty(set)) {
if (!optional) {
logger.warn(COMMON_IO_EXCEPTION, "", "", "No " + fileName + " found on the class path.");
}
return properties;
}
if (!allowMultiFile) {
if (set.size() > 1) {
String errMsg = String.format(
"only 1 %s file is expected, but %d dubbo.properties files found on class path: %s",
fileName, set.size(), set);
logger.warn(COMMON_IO_EXCEPTION, "", "", errMsg);
}
// fall back to use method getResourceAsStream
try {
properties.load(ClassUtils.getClassLoader().getResourceAsStream(fileName));
} catch (Throwable e) {
logger.warn(
COMMON_IO_EXCEPTION,
"",
"",
"Failed to load " + fileName + " file from " + fileName + "(ignore this file): "
+ e.getMessage(),
e);
}
return properties;
}
logger.info("load " + fileName + " properties file from " + set);
for (java.net.URL url : set) {
try {
Properties p = new Properties();
InputStream input = url.openStream();
if (input != null) {
try {
p.load(input);
properties.putAll(p);
} finally {
try {
input.close();
} catch (Throwable t) {
}
}
}
} catch (Throwable e) {
logger.warn(
COMMON_IO_EXCEPTION,
"",
"",
"Fail to load " + fileName + " file from " + url + "(ignore this file): " + e.getMessage(),
e);
}
}
return properties;
} | 3.68 |
querydsl_StringExpression_isEmpty | /**
* Create a {@code this.isEmpty()} expression
*
* <p>Return true if this String is empty</p>
*
* @return this.isEmpty()
* @see java.lang.String#isEmpty()
*/
public BooleanExpression isEmpty() {
if (isempty == null) {
isempty = Expressions.booleanOperation(Ops.STRING_IS_EMPTY, mixin);
}
return isempty;
} | 3.68 |
zxing_Code39Reader_toNarrowWidePattern | // For efficiency, returns -1 on failure. Not throwing here saved as many as 700 exceptions
// per image when using some of our blackbox images.
private static int toNarrowWidePattern(int[] counters) {
int numCounters = counters.length;
int maxNarrowCounter = 0;
int wideCounters;
do {
int minCounter = Integer.MAX_VALUE;
for (int counter : counters) {
if (counter < minCounter && counter > maxNarrowCounter) {
minCounter = counter;
}
}
maxNarrowCounter = minCounter;
wideCounters = 0;
int totalWideCountersWidth = 0;
int pattern = 0;
for (int i = 0; i < numCounters; i++) {
int counter = counters[i];
if (counter > maxNarrowCounter) {
pattern |= 1 << (numCounters - 1 - i);
wideCounters++;
totalWideCountersWidth += counter;
}
}
if (wideCounters == 3) {
// Found 3 wide counters, but are they close enough in width?
// We can perform a cheap, conservative check to see if any individual
// counter is more than 1.5 times the average:
for (int i = 0; i < numCounters && wideCounters > 0; i++) {
int counter = counters[i];
if (counter > maxNarrowCounter) {
wideCounters--;
// totalWideCountersWidth = 3 * average, so this checks if counter >= 3/2 * average
if ((counter * 2) >= totalWideCountersWidth) {
return -1;
}
}
}
return pattern;
}
} while (wideCounters > 3);
return -1;
} | 3.68 |
flink_SavepointReader_read | /**
* Loads an existing savepoint. Useful if you want to query the state of an existing
* application.
*
* @param env The execution environment used to transform the savepoint.
* @param path The path to an existing savepoint on disk.
* @param stateBackend The state backend of the savepoint.
* @return A {@link SavepointReader}.
*/
public static SavepointReader read(
StreamExecutionEnvironment env, String path, StateBackend stateBackend)
throws IOException {
CheckpointMetadata metadata = SavepointLoader.loadSavepointMetadata(path);
int maxParallelism =
metadata.getOperatorStates().stream()
.map(OperatorState::getMaxParallelism)
.max(Comparator.naturalOrder())
.orElseThrow(
() ->
new RuntimeException(
"Savepoint must contain at least one operator state."));
SavepointMetadataV2 savepointMetadata =
new SavepointMetadataV2(
maxParallelism, metadata.getMasterStates(), metadata.getOperatorStates());
return new SavepointReader(env, savepointMetadata, stateBackend);
} | 3.68 |
hadoop_TimedHealthReporterService_setHealthReport | /**
* Sets the health report from the node health check. Also set the disks'
* health info obtained from DiskHealthCheckerService.
*
* @param report report String
*/
private synchronized void setHealthReport(String report) {
this.healthReport = report;
} | 3.68 |
morf_AbstractSelectStatementBuilder_castToChild | /**
* @param abstractSelectStatement
* @return
*/
@SuppressWarnings("unchecked")
private T castToChild(AbstractSelectStatementBuilder<U, T> abstractSelectStatement) {
return (T) abstractSelectStatement;
} | 3.68 |
zxing_IntentIntegrator_initiateScan | /**
* Initiates a scan, using the specified camera, only for a certain set of barcode types, given as strings
* corresponding to their names in ZXing's {@code BarcodeFormat} class like "UPC_A". You can supply constants
* like {@link #PRODUCT_CODE_TYPES} for example.
*
* @param desiredBarcodeFormats names of {@code BarcodeFormat}s to scan for
* @param cameraId camera ID of the camera to use. A negative value means "no preference".
* @return the {@link AlertDialog} that was shown to the user prompting them to download the app
* if a prompt was needed, or null otherwise
*/
public final AlertDialog initiateScan(Collection<String> desiredBarcodeFormats, int cameraId) {
Intent intentScan = new Intent(BS_PACKAGE + ".SCAN");
intentScan.addCategory(Intent.CATEGORY_DEFAULT);
// check which types of codes to scan for
if (desiredBarcodeFormats != null) {
// set the desired barcode types
StringBuilder joinedByComma = new StringBuilder();
for (String format : desiredBarcodeFormats) {
if (joinedByComma.length() > 0) {
joinedByComma.append(',');
}
joinedByComma.append(format);
}
intentScan.putExtra("SCAN_FORMATS", joinedByComma.toString());
}
// check requested camera ID
if (cameraId >= 0) {
intentScan.putExtra("SCAN_CAMERA_ID", cameraId);
}
String targetAppPackage = findTargetAppPackage(intentScan);
if (targetAppPackage == null) {
return showDownloadDialog();
}
intentScan.setPackage(targetAppPackage);
intentScan.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP);
intentScan.addFlags(FLAG_NEW_DOC);
attachMoreExtras(intentScan);
startActivityForResult(intentScan, REQUEST_CODE);
return null;
} | 3.68 |
hbase_Get_setTimestamp | /**
* Get versions of columns with the specified timestamp.
* @param timestamp version timestamp
* @return this for invocation chaining
*/
public Get setTimestamp(long timestamp) {
try {
tr = TimeRange.at(timestamp);
} catch (Exception e) {
// This should never happen, unless integer overflow or something extremely wrong...
LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
throw e;
}
return this;
} | 3.68 |
flink_SerializationSchema_open | /**
* Initialization method for the schema. It is called before the actual working methods {@link
* #serialize(Object)} and thus suitable for one time setup work.
*
* <p>The provided {@link InitializationContext} can be used to access additional features such
* as e.g. registering user metrics.
*
* @param context Contextual information that can be used during initialization.
*/
@PublicEvolving
default void open(InitializationContext context) throws Exception {} | 3.68 |
flink_RocksDBMemoryConfiguration_isUsingManagedMemory | /**
* Gets whether the state backend is configured to use the managed memory of a slot for RocksDB.
* See {@link RocksDBOptions#USE_MANAGED_MEMORY} for details.
*/
public boolean isUsingManagedMemory() {
return useManagedMemory != null
? useManagedMemory
: RocksDBOptions.USE_MANAGED_MEMORY.defaultValue();
} | 3.68 |
morf_SqlParameter_hashCode | /**
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
return new HashCodeBuilder()
.appendSuper(super.hashCode())
.append(name)
.append(scale)
.append(width)
.append(type)
.toHashCode();
} | 3.68 |
framework_Action_setIcon | /**
* Sets the icon.
*
* @param icon
* the icon to set.
*/
public void setIcon(Resource icon) {
this.icon = icon;
} | 3.68 |
hbase_BitSetNode_alignDown | /** Returns lower boundary (aligned to multiple of BITS_PER_WORD) of bitmap range x belongs to. */
private static long alignDown(final long x) {
return x & -BITS_PER_WORD;
} | 3.68 |
framework_Table_getColumnAlignment | /**
* Gets the specified column's alignment.
*
* @param propertyId
* the propertyID identifying the column.
* @return the specified column's alignment if it as one; {@link Align#LEFT}
* otherwise.
*/
public Align getColumnAlignment(Object propertyId) {
final Align a = columnAlignments.get(propertyId);
return a == null ? Align.LEFT : a;
} | 3.68 |
framework_VTree_getCommonGrandParent | /**
* Returns the first common parent of two nodes.
*
* @param node1
* The first node
* @param node2
* The second node
* @return The common parent or null
*/
public TreeNode getCommonGrandParent(TreeNode node1, TreeNode node2) {
// If either one does not have a grand parent then return null
if (node1.getParentNode() == null || node2.getParentNode() == null) {
return null;
}
// If the nodes are parents of each other then return null
if (node1.isGrandParentOf(node2) || node2.isGrandParentOf(node1)) {
return null;
}
// Get parents of node1
List<TreeNode> parents1 = new ArrayList<TreeNode>();
TreeNode parent1 = node1.getParentNode();
while (parent1 != null) {
parents1.add(parent1);
parent1 = parent1.getParentNode();
}
// Get parents of node2
List<TreeNode> parents2 = new ArrayList<TreeNode>();
TreeNode parent2 = node2.getParentNode();
while (parent2 != null) {
parents2.add(parent2);
parent2 = parent2.getParentNode();
}
// Search the parents for the first common parent
for (int i = 0; i < parents1.size(); i++) {
parent1 = parents1.get(i);
for (int j = 0; j < parents2.size(); j++) {
parent2 = parents2.get(j);
if (parent1 == parent2) {
return parent1;
}
}
}
return null;
} | 3.68 |
flink_DataSet_printToErr | /**
* Writes a DataSet to the standard error stream (stderr).
*
* <p>For each element of the DataSet the result of {@link Object#toString()} is written.
*
* @param sinkIdentifier The string to prefix the output with.
* @return The DataSink that writes the DataSet.
* @deprecated Use {@link #printOnTaskManager(String)} instead, or the {@link
* PrintingOutputFormat}.
*/
@Deprecated
@PublicEvolving
public DataSink<T> printToErr(String sinkIdentifier) {
return output(new PrintingOutputFormat<T>(sinkIdentifier, true));
} | 3.68 |
flink_FunctionTemplate_createResultTemplate | /** Creates an instance of {@link FunctionResultTemplate} from a {@link DataTypeHint}. */
static @Nullable FunctionResultTemplate createResultTemplate(
DataTypeFactory typeFactory, @Nullable DataTypeHint hint) {
if (hint == null) {
return null;
}
final DataTypeTemplate template;
try {
template = DataTypeTemplate.fromAnnotation(typeFactory, hint);
} catch (Throwable t) {
throw extractionError(t, "Error in data type hint annotation.");
}
if (template.dataType != null) {
return FunctionResultTemplate.of(template.dataType);
}
throw extractionError(
"Data type hint does not specify a data type for use as function result.");
} | 3.68 |
hudi_BaseHoodieWriteClient_writeTableMetadata | /**
* Write the HoodieCommitMetadata to metadata table if available.
*
* @param table {@link HoodieTable} of interest.
* @param instantTime instant time of the commit.
* @param metadata instance of {@link HoodieCommitMetadata}.
* @param writeStatuses WriteStatuses for the completed action.
*/
protected void writeTableMetadata(HoodieTable table, String instantTime, HoodieCommitMetadata metadata, HoodieData<WriteStatus> writeStatuses) {
context.setJobStatus(this.getClass().getSimpleName(), "Committing to metadata table: " + config.getTableName());
Option<HoodieTableMetadataWriter> metadataWriterOpt = table.getMetadataWriter(instantTime);
if (metadataWriterOpt.isPresent()) {
try (HoodieTableMetadataWriter metadataWriter = metadataWriterOpt.get()) {
metadataWriter.updateFromWriteStatuses(metadata, writeStatuses, instantTime);
} catch (Exception e) {
if (e instanceof HoodieException) {
throw (HoodieException) e;
} else {
throw new HoodieException("Failed to update metadata", e);
}
}
}
} | 3.68 |
hudi_HadoopConfigurations_getParquetConf | /**
* Creates a merged hadoop configuration with given flink configuration and hadoop configuration.
*/
public static org.apache.hadoop.conf.Configuration getParquetConf(
org.apache.flink.configuration.Configuration options,
org.apache.hadoop.conf.Configuration hadoopConf) {
org.apache.hadoop.conf.Configuration copy = new org.apache.hadoop.conf.Configuration(hadoopConf);
Map<String, String> parquetOptions = FlinkOptions.getPropertiesWithPrefix(options.toMap(), PARQUET_PREFIX);
parquetOptions.forEach((k, v) -> copy.set(PARQUET_PREFIX + k, v));
return copy;
} | 3.68 |
framework_AbstractSplitPanel_getOldSplitPosition | /**
* Returns the position of the split before this change event occurred.
*
* @since 8.1
*
* @return the split position previously set to the source of this event
*/
public float getOldSplitPosition() {
return oldPosition;
} | 3.68 |
hbase_BloomFilterFactory_createDeleteBloomAtWrite | /**
* Creates a new Delete Family Bloom filter at the time of
* {@link org.apache.hadoop.hbase.regionserver.HStoreFile} writing.
* @param maxKeys an estimate of the number of keys we expect to insert. Irrelevant if compound
* Bloom filters are enabled.
* @param writer the HFile writer
* @return the new Bloom filter, or null in case Bloom filters are disabled or when failed to
* create one.
*/
public static BloomFilterWriter createDeleteBloomAtWrite(Configuration conf,
CacheConfig cacheConf, int maxKeys, HFile.Writer writer) {
if (!isDeleteFamilyBloomEnabled(conf)) {
LOG.info("Delete Bloom filters are disabled by configuration for " + writer.getPath()
+ (conf == null ? " (configuration is null)" : ""));
return null;
}
float err = getErrorRate(conf);
int maxFold = getMaxFold(conf);
// In case of compound Bloom filters we ignore the maxKeys hint.
CompoundBloomFilterWriter bloomWriter =
new CompoundBloomFilterWriter(getBloomBlockSize(conf), err, Hash.getHashType(conf), maxFold,
cacheConf.shouldCacheBloomsOnWrite(), null, BloomType.ROW);
writer.addInlineBlockWriter(bloomWriter);
return bloomWriter;
} | 3.68 |
framework_LogSection_toggleScrollLock | /**
* Toggles scroll lock, writes state to persistent storage.
*/
void toggleScrollLock() {
setScrollLock(scrollTimer != null);
Storage storage = Storage.getLocalStorageIfSupported();
if (storage == null) {
return;
}
VDebugWindow.writeState(storage, "log-scrollLock", scrollTimer == null);
} | 3.68 |
flink_LookupFunction_eval | /** Invoke {@link #lookup} and handle exceptions. */
public final void eval(Object... keys) {
GenericRowData keyRow = GenericRowData.of(keys);
try {
Collection<RowData> lookup = lookup(keyRow);
if (lookup == null) {
return;
}
lookup.forEach(this::collect);
} catch (IOException e) {
throw new RuntimeException(
String.format("Failed to lookup values with given key row '%s'", keyRow), e);
}
} | 3.68 |
hudi_LSMTimelineWriter_compactedFileName | /**
* Returns a new file name.
*/
@VisibleForTesting
public static String compactedFileName(List<String> files) {
String minInstant = files.stream().map(LSMTimeline::getMinInstantTime)
.min(Comparator.naturalOrder()).get();
String maxInstant = files.stream().map(LSMTimeline::getMaxInstantTime)
.max(Comparator.naturalOrder()).get();
int currentLayer = LSMTimeline.getFileLayer(files.get(0));
return newFileName(minInstant, maxInstant, currentLayer + 1);
} | 3.68 |
flink_StateMap_releaseSnapshot | /**
* Releases a snapshot for this {@link StateMap}. This method should be called once a snapshot
* is no more needed.
*
* @param snapshotToRelease the snapshot to release, which was previously created by this state
* map.
*/
public void releaseSnapshot(
StateMapSnapshot<K, N, S, ? extends StateMap<K, N, S>> snapshotToRelease) {} | 3.68 |
pulsar_RawBatchMessageContainerImpl_setCryptoKeyReader | /**
* Sets a CryptoKeyReader instance to encrypt batched messages during serialization, `toByteBuf()`.
* @param cryptoKeyReader a CryptoKeyReader instance
*/
public void setCryptoKeyReader(CryptoKeyReader cryptoKeyReader) {
this.cryptoKeyReader = cryptoKeyReader;
} | 3.68 |
flink_Transformation_setOutputType | /**
* Tries to fill in the type information. Type information can be filled in later when the
* program uses a type hint. This method checks whether the type information has ever been
* accessed before and does not allow modifications if the type was accessed already. This
* ensures consistency by making sure different parts of the operation do not assume different
* type information.
*
* @param outputType The type information to fill in.
* @throws IllegalStateException Thrown, if the type information has been accessed before.
*/
public void setOutputType(TypeInformation<T> outputType) {
if (typeUsed) {
throw new IllegalStateException(
"TypeInformation cannot be filled in for the type after it has been used. "
+ "Please make sure that the type info hints are the first call after"
+ " the transformation function, "
+ "before any access to types or semantic properties, etc.");
}
this.outputType = outputType;
} | 3.68 |
hadoop_BalanceProcedureScheduler_getAllJobs | /**
* Return all jobs in the scheduler.
*/
public Collection<BalanceJob> getAllJobs() {
return jobSet.values();
} | 3.68 |
hudi_HoodieFlinkClusteringJob_cluster | /**
* Follows the same execution methodology of HoodieFlinkCompactor, where only one clustering job is allowed to be
* executed at any point in time.
* <p>
* If there is an inflight clustering job, it will be rolled back and re-attempted.
* <p>
* A clustering plan will be generated if `schedule` is true.
*
* @throws Exception
* @see HoodieFlinkCompactor
*/
private void cluster() throws Exception {
table.getMetaClient().reloadActiveTimeline();
if (cfg.schedule) {
// create a clustering plan on the timeline
ClusteringUtil.validateClusteringScheduling(conf);
String clusteringInstantTime = cfg.clusteringInstantTime != null ? cfg.clusteringInstantTime
: writeClient.createNewInstantTime();
LOG.info("Creating a clustering plan for instant [" + clusteringInstantTime + "]");
boolean scheduled = writeClient.scheduleClusteringAtInstant(clusteringInstantTime, Option.empty());
if (!scheduled) {
// do nothing.
LOG.info("No clustering plan for this job");
return;
}
table.getMetaClient().reloadActiveTimeline();
}
// fetch the instant based on the configured execution sequence
List<HoodieInstant> instants = ClusteringUtils.getPendingClusteringInstantTimes(table.getMetaClient());
if (instants.isEmpty()) {
// do nothing.
LOG.info("No clustering plan scheduled, turns on the clustering plan schedule with --schedule option");
return;
}
final HoodieInstant clusteringInstant;
if (cfg.clusteringInstantTime != null) {
clusteringInstant = instants.stream()
.filter(i -> i.getTimestamp().equals(cfg.clusteringInstantTime))
.findFirst()
.orElseThrow(() -> new HoodieException("Clustering instant [" + cfg.clusteringInstantTime + "] not found"));
} else {
// check for inflight clustering plans and roll them back if required
clusteringInstant =
CompactionUtil.isLIFO(cfg.clusteringSeq) ? instants.get(instants.size() - 1) : instants.get(0);
}
HoodieInstant inflightInstant = HoodieTimeline.getReplaceCommitInflightInstant(
clusteringInstant.getTimestamp());
if (table.getMetaClient().getActiveTimeline().containsInstant(inflightInstant)) {
LOG.info("Rollback inflight clustering instant: [" + clusteringInstant + "]");
table.rollbackInflightClustering(inflightInstant,
commitToRollback -> writeClient.getTableServiceClient().getPendingRollbackInfo(table.getMetaClient(), commitToRollback, false));
table.getMetaClient().reloadActiveTimeline();
}
// generate clustering plan
// should support configurable commit metadata
Option<Pair<HoodieInstant, HoodieClusteringPlan>> clusteringPlanOption = ClusteringUtils.getClusteringPlan(
table.getMetaClient(), clusteringInstant);
if (!clusteringPlanOption.isPresent()) {
// do nothing.
LOG.info("No clustering plan scheduled, turns on the clustering plan schedule with --schedule option");
return;
}
HoodieClusteringPlan clusteringPlan = clusteringPlanOption.get().getRight();
if (clusteringPlan == null || (clusteringPlan.getInputGroups() == null)
|| (clusteringPlan.getInputGroups().isEmpty())) {
// no clustering plan, do nothing and return.
LOG.info("No clustering plan for instant " + clusteringInstant.getTimestamp());
return;
}
HoodieInstant instant = HoodieTimeline.getReplaceCommitRequestedInstant(clusteringInstant.getTimestamp());
int inputGroupSize = clusteringPlan.getInputGroups().size();
// get clusteringParallelism.
int clusteringParallelism = conf.getInteger(FlinkOptions.CLUSTERING_TASKS) == -1
? inputGroupSize
: Math.min(conf.getInteger(FlinkOptions.CLUSTERING_TASKS), inputGroupSize);
// Mark instant as clustering inflight
table.getActiveTimeline().transitionReplaceRequestedToInflight(instant, Option.empty());
final Schema tableAvroSchema = StreamerUtil.getTableAvroSchema(table.getMetaClient(), false);
final DataType rowDataType = AvroSchemaConverter.convertToDataType(tableAvroSchema);
final RowType rowType = (RowType) rowDataType.getLogicalType();
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// setup configuration
long ckpTimeout = env.getCheckpointConfig().getCheckpointTimeout();
conf.setLong(FlinkOptions.WRITE_COMMIT_ACK_TIMEOUT, ckpTimeout);
DataStream<ClusteringCommitEvent> dataStream = env.addSource(new ClusteringPlanSourceFunction(clusteringInstant.getTimestamp(), clusteringPlan, conf))
.name("clustering_source")
.uid("uid_clustering_source")
.rebalance()
.transform("clustering_task",
TypeInformation.of(ClusteringCommitEvent.class),
new ClusteringOperator(conf, rowType))
.setParallelism(clusteringParallelism);
if (OptionsResolver.sortClusteringEnabled(conf)) {
ExecNodeUtil.setManagedMemoryWeight(dataStream.getTransformation(),
conf.getInteger(FlinkOptions.WRITE_SORT_MEMORY) * 1024L * 1024L);
}
dataStream
.addSink(new ClusteringCommitSink(conf))
.name("clustering_commit")
.uid("uid_clustering_commit")
.setParallelism(1)
.getTransformation()
.setMaxParallelism(1);
env.execute("flink_hudi_clustering_" + clusteringInstant.getTimestamp());
} | 3.68 |
hadoop_TimelineEntity_setEntityType | /**
* Set the entity type
*
* @param entityType
* the entity type
*/
public void setEntityType(String entityType) {
this.entityType = entityType;
} | 3.68 |
hadoop_PersistentCommitData_saveFile | /**
* Save to a file.
* This uses the createFile() API, which S3A supports for
* faster load and declaring sequential access, always
*
* @param <T> type of persistent format
* @param fs filesystem
* @param path path to save to
* @param instance data to save
* @param serializer serializer to use
* @param performance skip all safety check on the write
*
* @return any IOStatistics from the output stream, or null
*
* @throws IOException IO failure
*/
public static <T extends PersistentCommitData> IOStatistics saveFile(
final FileSystem fs,
final Path path,
final T instance,
final JsonSerialization<T> serializer,
final boolean performance)
throws IOException {
FSDataOutputStreamBuilder builder = fs.createFile(path)
.create()
.recursive()
.overwrite(true);
// switch to performance mode
builder.opt(FS_S3A_CREATE_PERFORMANCE, performance);
return saveToStream(path, instance, builder, serializer);
} | 3.68 |
flink_ExecutionConfig_getExecutionMode | /**
* Gets the execution mode used to execute the program. The execution mode defines whether data
* exchanges are performed in a batch or on a pipelined manner.
*
* <p>The default execution mode is {@link ExecutionMode#PIPELINED}.
*
* @return The execution mode for the program.
* @deprecated The {@link ExecutionMode} is deprecated because it's only used in DataSet APIs.
* All Flink DataSet APIs are deprecated since Flink 1.18 and will be removed in a future
* Flink major version. You can still build your application in DataSet, but you should move
* to either the DataStream and/or Table API.
* @see <a href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=158866741">
* FLIP-131: Consolidate the user-facing Dataflow SDKs/APIs (and deprecate the DataSet
* API</a>
*/
@Deprecated
public ExecutionMode getExecutionMode() {
return configuration.get(EXECUTION_MODE);
} | 3.68 |
hadoop_MetricStringBuilder_tuple | /**
* Add any key,val pair to the string, between the prefix and suffix,
* separated by the separator.
* @param key key
* @param value value
* @return this instance
*/
public MetricStringBuilder tuple(String key, String value) {
builder.append(prefix)
.append(key)
.append(separator)
.append(value)
.append(suffix);
return this;
} | 3.68 |
hadoop_ServiceLauncher_getConfiguration | /**
* Get the configuration constructed from the command line arguments.
* @return the configuration used to create the service
*/
public final Configuration getConfiguration() {
return configuration;
} | 3.68 |
MagicPlugin_Base64Coder_encodeLines | /**
* Encodes a byte array into Base 64 format and breaks the output into lines.
*
* @param in An array containing the data bytes to be encoded.
* @param iOff Offset of the first byte in <code>in</code> to be processed.
* @param iLen Number of bytes to be processed in <code>in</code>, starting at <code>iOff</code>.
* @param lineLen Line length for the output data. Should be a multiple of 4.
* @param lineSeparator The line separator to be used to separate the output lines.
* @return A String containing the Base64 encoded data, broken into lines.
*/
public static String encodeLines(byte[] in, int iOff, int iLen, int lineLen, String lineSeparator) {
int blockLen = (lineLen * 3) / 4;
if (blockLen <= 0) throw new IllegalArgumentException();
int lines = (iLen + blockLen - 1) / blockLen;
int bufLen = ((iLen + 2) / 3) * 4 + lines * lineSeparator.length();
StringBuilder buf = new StringBuilder(bufLen);
int ip = 0;
while (ip < iLen) {
int l = Math.min(iLen - ip, blockLen);
buf.append(encode(in, iOff + ip, l));
buf.append(lineSeparator);
ip += l;
}
return buf.toString();
} | 3.68 |
open-banking-gateway_FacadeResult_getBody | /**
* Response body
*/
default T getBody() {
return null;
} | 3.68 |
hbase_JSONBean_write | /** Returns Return non-zero if failed to find bean. 0 */
private static int write(JsonWriter writer, MBeanServer mBeanServer, ObjectName qry,
String attribute, boolean description, ObjectName excluded) throws IOException {
LOG.debug("Listing beans for {}", qry);
Set<ObjectName> names = mBeanServer.queryNames(qry, null);
writer.name("beans").beginArray();
Iterator<ObjectName> it = names.iterator();
Pattern[] matchingPattern = null;
while (it.hasNext()) {
ObjectName oname = it.next();
if (excluded != null && excluded.apply(oname)) {
continue;
}
MBeanInfo minfo;
String code = "";
String descriptionStr = null;
Object attributeinfo = null;
try {
minfo = mBeanServer.getMBeanInfo(oname);
code = minfo.getClassName();
if (description) {
descriptionStr = minfo.getDescription();
}
String prs = "";
try {
if ("org.apache.commons.modeler.BaseModelMBean".equals(code)) {
prs = "modelerType";
code = (String) mBeanServer.getAttribute(oname, prs);
}
if (attribute != null) {
String[] patternAttr = null;
if (attribute.contains(ASTERICK)) {
if (attribute.contains(COMMA)) {
patternAttr = attribute.split(COMMA);
} else {
patternAttr = new String[1];
patternAttr[0] = attribute;
}
matchingPattern = new Pattern[patternAttr.length];
for (int i = 0; i < patternAttr.length; i++) {
matchingPattern[i] = Pattern.compile(patternAttr[i]);
}
// nullify the attribute
attribute = null;
} else {
prs = attribute;
attributeinfo = mBeanServer.getAttribute(oname, prs);
}
}
} catch (RuntimeMBeanException e) {
// UnsupportedOperationExceptions happen in the normal course of business,
// so no need to log them as errors all the time.
if (e.getCause() instanceof UnsupportedOperationException) {
if (LOG.isTraceEnabled()) {
LOG.trace("Getting attribute " + prs + " of " + oname + " threw " + e);
}
} else {
LOG.error("Getting attribute " + prs + " of " + oname + " threw an exception", e);
}
return 0;
} catch (AttributeNotFoundException e) {
// If the modelerType attribute was not found, the class name is used
// instead.
LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", e);
} catch (MBeanException e) {
// The code inside the attribute getter threw an exception so log it,
// and fall back on the class name
LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", e);
} catch (RuntimeException e) {
// For some reason even with an MBeanException available to them
// Runtime exceptionscan still find their way through, so treat them
// the same as MBeanException
LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", e);
} catch (ReflectionException e) {
// This happens when the code inside the JMX bean (setter?? from the
// java docs) threw an exception, so log it and fall back on the
// class name
LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", e);
}
} catch (InstanceNotFoundException e) {
// Ignored for some reason the bean was not found so don't output it
continue;
} catch (IntrospectionException e) {
// This is an internal error, something odd happened with reflection so
// log it and don't output the bean.
LOG.error("Problem while trying to process JMX query: " + qry + " with MBean " + oname, e);
continue;
} catch (ReflectionException e) {
// This happens when the code inside the JMX bean threw an exception, so
// log it and don't output the bean.
LOG.error("Problem while trying to process JMX query: " + qry + " with MBean " + oname, e);
continue;
}
writer.beginObject();
writer.name("name").value(oname.toString());
if (description && descriptionStr != null && descriptionStr.length() > 0) {
writer.name("description").value(descriptionStr);
}
writer.name("modelerType").value(code);
if (attribute != null && attributeinfo == null) {
writer.name("result").value("ERROR");
writer.name("message").value("No attribute with name " + attribute + " was found.");
writer.endObject();
writer.endArray();
writer.close();
return -1;
}
if (attribute != null) {
writeAttribute(writer, attribute, descriptionStr, attributeinfo);
} else {
MBeanAttributeInfo[] attrs = minfo.getAttributes();
for (int i = 0; i < attrs.length; i++) {
writeAttribute(writer, mBeanServer, oname, description, matchingPattern, attrs[i]);
}
}
writer.endObject();
}
writer.endArray();
return 0;
} | 3.68 |
hadoop_ManifestCommitter_getTaskAttemptCommittedManifest | /**
* Get the manifest of the last committed task.
* @return a task manifest or null.
*/
@VisibleForTesting
TaskManifest getTaskAttemptCommittedManifest() {
return taskAttemptCommittedManifest;
} | 3.68 |
flink_StreamExecutionEnvironment_setBufferTimeout | /**
* Sets the maximum time frequency (milliseconds) for the flushing of the output buffers. By
* default the output buffers flush frequently to provide low latency and to aid smooth
* developer experience. Setting the parameter can result in three logical modes:
*
* <ul>
* <li>A positive integer triggers flushing periodically by that integer
* <li>0 triggers flushing after every record thus minimizing latency
* <li>-1 triggers flushing only when the output buffer is full thus maximizing throughput
* </ul>
*
* @param timeoutMillis The maximum time between two output flushes.
*/
public StreamExecutionEnvironment setBufferTimeout(long timeoutMillis) {
if (timeoutMillis < ExecutionOptions.DISABLED_NETWORK_BUFFER_TIMEOUT) {
throw new IllegalArgumentException("Timeout of buffer must be non-negative or -1");
}
this.bufferTimeout = timeoutMillis;
return this;
} | 3.68 |
morf_XmlDataSetProducer_getScale | /**
* @see org.alfasoftware.morf.metadata.Column#getScale()
*/
@Override
public int getScale() {
if (scale == null) {
return 0;
}
return scale;
} | 3.68 |
hbase_OrderedInt8_decodeByte | /**
* Read a {@code byte} value from the buffer {@code src}.
* @param src the {@link PositionedByteRange} to read the {@code byte} from
* @return the {@code byte} read from the buffer
*/
public byte decodeByte(PositionedByteRange src) {
return OrderedBytes.decodeInt8(src);
} | 3.68 |
hadoop_ResourceRequest_getExecutionTypeRequest | /**
* Get whether locality relaxation is enabled with this
* <code>ResourceRequest</code>. Defaults to true.
*
* @return whether locality relaxation is enabled with this
* <code>ResourceRequest</code>.
*/
@Public
@Evolving
public ExecutionTypeRequest getExecutionTypeRequest() {
throw new UnsupportedOperationException();
} | 3.68 |
hbase_TableSchemaModel_addAttribute | /**
* Add an attribute to the table descriptor
* @param name attribute name
* @param value attribute value
*/
@JsonAnySetter
public void addAttribute(String name, Object value) {
attrs.put(new QName(name), value);
} | 3.68 |
shardingsphere-elasticjob_JobConfiguration_staticSharding | /**
* Set static sharding.
*
* @param staticSharding static sharding
* @return ElasticJob configuration builder
*/
public Builder staticSharding(final boolean staticSharding) {
this.staticSharding = staticSharding;
return this;
} | 3.68 |
hbase_DynamicMetricsRegistry_tag | /**
* Add a tag to the metrics
* @param info metadata of the tag
* @param value of the tag
* @param override existing tag if true
* @return the registry (for keep adding tags etc.)
*/
public DynamicMetricsRegistry tag(MetricsInfo info, String value, boolean override) {
MetricsTag tag = Interns.tag(info, value);
if (!override) {
MetricsTag existing = tagsMap.putIfAbsent(info.name(), tag);
if (existing != null) {
throw new MetricsException("Tag " + info.name() + " already exists!");
}
return this;
}
tagsMap.put(info.name(), tag);
return this;
} | 3.68 |
hbase_BaseEnvironment_startup | /** Initialize the environment */
public void startup() throws IOException {
if (state == Coprocessor.State.INSTALLED || state == Coprocessor.State.STOPPED) {
state = Coprocessor.State.STARTING;
Thread currentThread = Thread.currentThread();
ClassLoader hostClassLoader = currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(this.getClassLoader());
impl.start(this);
state = Coprocessor.State.ACTIVE;
} finally {
currentThread.setContextClassLoader(hostClassLoader);
}
} else {
LOG.warn("Not starting coprocessor " + impl.getClass().getName()
+ " because not inactive (state=" + state.toString() + ")");
}
} | 3.68 |
hadoop_ManifestCommitterSupport_createTaskManifest | /**
* Create a task attempt dir; stage config must be for a task attempt.
* @param stageConfig state config.
* @return a manifest with job and task attempt info set up.
*/
public static TaskManifest createTaskManifest(StageConfig stageConfig) {
final TaskManifest manifest = new TaskManifest();
manifest.setTaskAttemptID(stageConfig.getTaskAttemptId());
manifest.setTaskID(stageConfig.getTaskId());
manifest.setJobId(stageConfig.getJobId());
manifest.setJobAttemptNumber(stageConfig.getJobAttemptNumber());
manifest.setTaskAttemptDir(
stageConfig.getTaskAttemptDir().toUri().toString());
return manifest;
} | 3.68 |
hudi_DeletePartitionUtils_checkForPendingTableServiceActions | /**
* Check if there are any pending table service actions (requested + inflight) on a table affecting the partitions to
* be dropped.
* <p>
* This check is to prevent a drop-partition from proceeding should a partition have a table service action in
* the pending stage. If this is allowed to happen, the filegroup that is an input for a table service action, might
* also be a candidate for being replaced. As such, when the table service action and drop-partition commits are
* committed, there will be two commits replacing a single filegroup.
* <p>
* For example, a timeline might have an execution order as such:
* 000.replacecommit.requested (clustering filegroup_1 + filegroup_2 -> filegroup_3)
* 001.replacecommit.requested, 001.replacecommit.inflight, 0001.replacecommit (drop_partition to replace filegroup_1)
* 000.replacecommit.inflight (clustering is executed now)
* 000.replacecommit (clustering completed)
* For an execution order as shown above, 000.replacecommit and 001.replacecommit will both flag filegroup_1 to be replaced.
* This will cause downstream duplicate key errors when a map is being constructed.
*
* @param table Table to perform validation on
* @param partitionsToDrop List of partitions to drop
*/
public static void checkForPendingTableServiceActions(HoodieTable table, List<String> partitionsToDrop) {
List<String> instantsOfOffendingPendingTableServiceAction = new ArrayList<>();
// ensure that there are no pending inflight clustering/compaction operations involving this partition
SyncableFileSystemView fileSystemView = (SyncableFileSystemView) table.getSliceView();
// separating the iteration of pending compaction operations from clustering as they return different stream types
Stream.concat(fileSystemView.getPendingCompactionOperations(), fileSystemView.getPendingLogCompactionOperations())
.filter(op -> partitionsToDrop.contains(op.getRight().getPartitionPath()))
.forEach(op -> instantsOfOffendingPendingTableServiceAction.add(op.getLeft()));
fileSystemView.getFileGroupsInPendingClustering()
.filter(fgIdInstantPair -> partitionsToDrop.contains(fgIdInstantPair.getLeft().getPartitionPath()))
.forEach(x -> instantsOfOffendingPendingTableServiceAction.add(x.getRight().getTimestamp()));
if (instantsOfOffendingPendingTableServiceAction.size() > 0) {
throw new HoodieDeletePartitionException("Failed to drop partitions. "
+ "Please ensure that there are no pending table service actions (clustering/compaction) for the partitions to be deleted: " + partitionsToDrop + ". "
+ "Instant(s) of offending pending table service action: "
+ instantsOfOffendingPendingTableServiceAction.stream().distinct().collect(Collectors.toList()));
}
} | 3.68 |
hbase_SingleColumnValueFilter_getLatestVersionOnly | /**
* Get whether only the latest version of the column value should be compared. If true, the row
* will be returned if only the latest version of the column value matches. If false, the row will
* be returned if any version of the column value matches. The default is true.
* @return return value
*/
public boolean getLatestVersionOnly() {
return latestVersionOnly;
} | 3.68 |
hbase_SkipFilter_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() throws IOException {
FilterProtos.SkipFilter.Builder builder = FilterProtos.SkipFilter.newBuilder();
builder.setFilter(ProtobufUtil.toFilter(this.filter));
return builder.build().toByteArray();
} | 3.68 |
framework_LogSection_getLimit | /**
* Gets the current log row limit.
*
* @return
*/
public int getLimit() {
// TODO should be read from persistent storage
return limit;
} | 3.68 |
hbase_QuotaSettingsFactory_removeNamespaceSpaceLimit | /**
* Creates a {@link QuotaSettings} object to remove the FileSystem space quota for the given
* namespace.
* @param namespace The namespace to remove the quota on.
* @return A {@link QuotaSettings} object.
*/
public static QuotaSettings removeNamespaceSpaceLimit(String namespace) {
return new SpaceLimitSettings(namespace);
} | 3.68 |
flink_OrInputTypeStrategy_commonMax | /** Returns the common maximum argument count or null if undefined. */
private static @Nullable Integer commonMax(List<ArgumentCount> counts) {
// max=5, max=3, max=0 -> max=5
// max=5, max=3, max=0, max=null -> max=null
int commonMax = Integer.MIN_VALUE;
for (ArgumentCount count : counts) {
final Optional<Integer> max = count.getMaxCount();
if (!max.isPresent()) {
return null;
}
commonMax = Math.max(commonMax, max.get());
}
if (commonMax == Integer.MIN_VALUE) {
return null;
}
return commonMax;
} | 3.68 |
hudi_KafkaConnectHdfsProvider_listAllFileStatus | /**
* List file status recursively.
*
* @param curPath Current Path
* @param filter PathFilter
* @return All file status match kafka connect naming convention
* @throws IOException
*/
private ArrayList<FileStatus> listAllFileStatus(Path curPath,
KafkaConnectPathFilter filter) throws IOException {
ArrayList<FileStatus> allFileStatus = new ArrayList<>();
FileStatus[] fileStatus = this.fs.listStatus(curPath);
for (FileStatus status : fileStatus) {
if (status.isDirectory() && filter.acceptDir(status.getPath())) {
allFileStatus.addAll(listAllFileStatus(status.getPath(), filter));
} else {
if (filter.accept(status.getPath())) {
allFileStatus.add(status);
}
}
}
return allFileStatus;
} | 3.68 |
hbase_HRegion_checkInterrupt | /**
* Check thread interrupt status and throw an exception if interrupted.
* @throws NotServingRegionException if region is closing
* @throws InterruptedIOException if interrupted but region is not closing
*/
// Package scope for tests
void checkInterrupt() throws NotServingRegionException, InterruptedIOException {
if (Thread.interrupted()) {
if (this.closing.get()) {
throw new NotServingRegionException(
getRegionInfo().getRegionNameAsString() + " is closing");
}
throw new InterruptedIOException();
}
} | 3.68 |
AreaShop_FileManager_addRegionNoSave | /**
* Add a region to the list without saving it to disk (useful for loading at startup).
* @param region The region to add
* @return true when successful, otherwise false (denied by an event listener)
*/
public AddingRegionEvent addRegionNoSave(GeneralRegion region) {
AddingRegionEvent event = new AddingRegionEvent(region);
if(region == null) {
AreaShop.debug("Tried adding a null region!");
event.cancel("null region");
return event;
}
Bukkit.getPluginManager().callEvent(event);
if (event.isCancelled()) {
return event;
}
regions.put(region.getName().toLowerCase(), region);
Bukkit.getPluginManager().callEvent(new AddedRegionEvent(region));
return event;
} | 3.68 |
MagicPlugin_CompatibilityUtilsBase_toMinecraftAttribute | // Taken from CraftBukkit code.
protected String toMinecraftAttribute(Attribute attribute) {
String bukkit = attribute.name();
int first = bukkit.indexOf('_');
int second = bukkit.indexOf('_', first + 1);
StringBuilder sb = new StringBuilder(bukkit.toLowerCase(java.util.Locale.ENGLISH));
sb.setCharAt(first, '.');
if (second != -1) {
sb.deleteCharAt(second);
sb.setCharAt(second, bukkit.charAt(second + 1));
}
return sb.toString();
} | 3.68 |
hudi_HoodieTimer_start | /**
* Creates an instance of {@link HoodieTimer} already started
*/
public static HoodieTimer start() {
return new HoodieTimer(true);
} | 3.68 |
flink_TaskManagerLocation_getResourceID | /**
* Gets the ID of the resource in which the TaskManager is started. The format of this depends
* on how the TaskManager is started:
*
* <ul>
* <li>If the TaskManager is started via YARN, this is the YARN container ID.
* <li>If the TaskManager is started in standalone mode, or via a MiniCluster, this is a
* random ID.
* <li>Other deployment modes can set the resource ID in other ways.
* </ul>
*
* @return The ID of the resource in which the TaskManager is started
*/
public ResourceID getResourceID() {
return resourceID;
} | 3.68 |
hbase_CellFlatMap_navigableKeySet | // -------------------------------- Sub-Sets --------------------------------
@Override
public NavigableSet<Cell> navigableKeySet() {
throw new UnsupportedOperationException();
} | 3.68 |
hbase_RSGroupAdminClient_getRSGroupOfServer | /**
* Retrieve the RSGroupInfo a server is affiliated to
* @param hostPort HostPort to get RSGroupInfo for
*/
public RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException {
GetRSGroupInfoOfServerRequest request =
GetRSGroupInfoOfServerRequest.newBuilder().setServer(HBaseProtos.ServerName.newBuilder()
.setHostName(hostPort.getHostname()).setPort(hostPort.getPort()).build()).build();
try {
GetRSGroupInfoOfServerResponse resp = stub.getRSGroupInfoOfServer(null, request);
if (resp.hasRSGroupInfo()) {
return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo());
}
return null;
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.68 |
framework_Window_addWindowModeChangeListener | /**
* Adds a WindowModeChangeListener to the window.
*
* The WindowModeChangeEvent is fired when the user changed the display
* state by clicking the maximize/restore button or by double clicking on
* the window header. The event is also fired if the state is changed using
* {@link #setWindowMode(WindowMode)}.
*
* @param listener
* the WindowModeChangeListener to add.
* @since 8.0
*/
public Registration addWindowModeChangeListener(
WindowModeChangeListener listener) {
return addListener(WindowModeChangeEvent.class, listener,
WindowModeChangeListener.windowModeChangeMethod);
} | 3.68 |
hbase_IdentityTableReduce_reduce | /**
* No aggregation, output pairs of (key, record)
*/
public void reduce(ImmutableBytesWritable key, Iterator<Put> values,
OutputCollector<ImmutableBytesWritable, Put> output, Reporter reporter) throws IOException {
while (values.hasNext()) {
output.collect(key, values.next());
}
} | 3.68 |
hadoop_CacheDirectiveStats_setBytesNeeded | /**
* Sets the bytes needed by this directive.
*
* @param bytesNeeded The bytes needed.
* @return This builder, for call chaining.
*/
public Builder setBytesNeeded(long bytesNeeded) {
this.bytesNeeded = bytesNeeded;
return this;
} | 3.68 |
flink_ResultPartitionFactory_createBufferPoolFactory | /**
* The minimum pool size should be <code>numberOfSubpartitions + 1</code> for two
* considerations:
*
* <p>1. StreamTask can only process input if there is at-least one available buffer on output
* side, so it might cause stuck problem if the minimum pool size is exactly equal to the number
* of subpartitions, because every subpartition might maintain a partial unfilled buffer.
*
* <p>2. Increases one more buffer for every output LocalBufferPool to avoid performance
* regression if processing input is based on at-least one buffer available on output side.
*/
@VisibleForTesting
SupplierWithException<BufferPool, IOException> createBufferPoolFactory(
int numberOfSubpartitions, ResultPartitionType type) {
return () -> {
Pair<Integer, Integer> pair =
NettyShuffleUtils.getMinMaxNetworkBuffersPerResultPartition(
configuredNetworkBuffersPerChannel,
floatingNetworkBuffersPerGate,
sortShuffleMinParallelism,
sortShuffleMinBuffers,
numberOfSubpartitions,
tieredStorage.isPresent(),
tieredStorage
.map(
storage ->
storage.getTieredStorageConfiguration()
.getTotalExclusiveBufferNum())
.orElse(0),
type);
return bufferPoolFactory.createBufferPool(
pair.getLeft(),
pair.getRight(),
numberOfSubpartitions,
maxBuffersPerChannel,
isOverdraftBufferNeeded(type) ? maxOverdraftBuffersPerGate : 0);
};
} | 3.68 |
flink_DataSet_minBy | /**
* Selects an element with minimum value.
*
* <p>The minimum is computed over the specified fields in lexicographical order.
*
* <p><strong>Example 1</strong>: Given a data set with elements <code>[0, 1], [1, 0]</code>,
* the results will be:
*
* <ul>
* <li><code>minBy(0)</code>: <code>[0, 1]</code>
* <li><code>minBy(1)</code>: <code>[1, 0]</code>
* </ul>
*
* <p><strong>Example 2</strong>: Given a data set with elements <code>[0, 0], [0, 1]</code>,
* the results will be:
*
* <ul>
* <li><code>minBy(0, 1)</code>: <code>[0, 0]</code>
* </ul>
*
* <p>If multiple values with minimum value at the specified fields exist, a random one will be
* picked.
*
* <p>Internally, this operation is implemented as a {@link ReduceFunction}.
*
* @param fields Field positions to compute the minimum over
* @return A {@link ReduceOperator} representing the minimum
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public ReduceOperator<T> minBy(int... fields) {
if (!getType().isTupleType() || !(getType() instanceof TupleTypeInfo)) {
throw new InvalidProgramException("DataSet#minBy(int...) only works on Tuple types.");
}
return new ReduceOperator<>(
this,
new SelectByMinFunction((TupleTypeInfo) getType(), fields),
Utils.getCallLocationName());
} | 3.68 |
hbase_ClusterStatusTracker_setClusterUp | /**
* Sets the cluster as up.
* @throws KeeperException unexpected zk exception
*/
public void setClusterUp() throws KeeperException {
byte[] upData = toByteArray();
try {
ZKUtil.createAndWatch(watcher, watcher.getZNodePaths().clusterStateZNode, upData);
} catch (KeeperException.NodeExistsException nee) {
ZKUtil.setData(watcher, watcher.getZNodePaths().clusterStateZNode, upData);
}
} | 3.68 |
dubbo_ConfigurationUtils_getCachedDynamicProperty | /**
* For compact single instance
*
* @deprecated Replaced to {@link ConfigurationUtils#getCachedDynamicProperty(ScopeModel, String, String)}
*/
@Deprecated
public static String getCachedDynamicProperty(String key, String defaultValue) {
return getCachedDynamicProperty(ApplicationModel.defaultModel(), key, defaultValue);
} | 3.68 |
flink_RocksDBOptionsFactory_createReadOptions | /**
* This method should set the additional options on top of the current options object. The
* current options object may contain pre-defined options based on flags that have been
* configured on the state backend.
*
* <p>It is important to set the options on the current object and return the result from the
* setter methods, otherwise the pre-defined options may get lost.
*
* @param currentOptions The options object with the pre-defined options.
* @param handlesToClose The collection to register newly created {@link
* org.rocksdb.RocksObject}s.
* @return The options object on which the additional options are set.
*/
default ReadOptions createReadOptions(
ReadOptions currentOptions, Collection<AutoCloseable> handlesToClose) {
return currentOptions;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.