name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_SchedulerHealth_getAggregateReservationCount | /**
* Get the aggregate of all the reservations count.
*
* @return aggregate reservation count
*/
public Long getAggregateReservationCount() {
return getAggregateOperationCount(Operation.RESERVATION);
} | 3.68 |
hadoop_QuotaUsage_toString | /**
* Return the string representation of the object in the output format.
* if hOption is false file sizes are returned in bytes
* if hOption is true file sizes are returned in human readable
*
* @param hOption a flag indicating if human readable output if to be used
* @param tOption type option.
* @param types storage types.
* @return the string representation of the object.
*/
public String toString(boolean hOption,
boolean tOption, List<StorageType> types) {
if (tOption) {
return getTypesQuotaUsage(hOption, types);
}
return getQuotaUsage(hOption);
} | 3.68 |
hudi_LSMTimeline_latestSnapshotVersion | /**
* Returns the latest snapshot version.
*/
public static int latestSnapshotVersion(HoodieTableMetaClient metaClient) throws IOException {
Path versionFilePath = getVersionFilePath(metaClient);
if (metaClient.getFs().exists(versionFilePath)) {
try {
Option<byte[]> content = FileIOUtils.readDataFromPath(metaClient.getFs(), versionFilePath);
if (content.isPresent()) {
return Integer.parseInt(new String(content.get(), StandardCharsets.UTF_8));
}
} catch (Exception e) {
// fallback to manifest file listing.
LOG.warn("Error reading version file {}", versionFilePath, e);
}
}
return allSnapshotVersions(metaClient).stream().max(Integer::compareTo).orElse(-1);
} | 3.68 |
morf_SqlServerDialect_getSqlForRandomString | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForRandomString(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForRandomString(Function function) {
return String.format("SUBSTRING(REPLACE(CONVERT(varchar(255),NEWID()),'-',''), 1, %s)",getSqlFrom(function.getArguments().get(0)));
} | 3.68 |
hbase_DemoClient_bytes | // Helper to translate strings to UTF8 bytes
private byte[] bytes(String s) {
return Bytes.toBytes(s);
} | 3.68 |
hudi_StreamSync_setupWriteClient | /**
* Note that depending on configs and source-type, schemaProvider could either be eagerly or lazily created.
* SchemaProvider creation is a precursor to HoodieWriteClient and AsyncCompactor creation. This method takes care of
* this constraint.
*/
private void setupWriteClient(Option<JavaRDD<HoodieRecord>> recordsOpt) throws IOException {
if ((null != schemaProvider)) {
Schema sourceSchema = schemaProvider.getSourceSchema();
Schema targetSchema = schemaProvider.getTargetSchema();
reInitWriteClient(sourceSchema, targetSchema, recordsOpt);
}
} | 3.68 |
hadoop_Server_destroy | /**
* Destroys the server.
* <p>
* All services are destroyed in reverse order of initialization, then the
* Log4j framework is shutdown.
*/
public void destroy() {
ensureOperational();
destroyServices();
log.info("Server [{}] shutdown!", name);
log.info("======================================================");
if (!Boolean.getBoolean("test.circus")) {
LogManager.shutdown();
}
status = Status.SHUTDOWN;
} | 3.68 |
hibernate-validator_ConstraintCheckIssue_isError | /**
* Determine if issue is an error
*
* @return true if {@link ConstraintCheckIssue#getKind()} equals to {@link IssueKind#ERROR}
*/
public boolean isError() {
return IssueKind.ERROR.equals( kind );
} | 3.68 |
framework_VCalendar_setSortOrder | /**
* Set sort strategy for events.
*
* @param order
* sort order
*/
public void setSortOrder(EventSortOrder order) {
if (order == null) {
eventSortOrder = EventSortOrder.DURATION_DESC;
} else {
eventSortOrder = order;
}
} | 3.68 |
hadoop_SnappyCompressor_compress | /**
* Fills specified buffer with compressed data. Returns actual number
* of bytes of compressed data. A return value of 0 indicates that
* needsInput() should be called in order to determine if more input
* data is required.
*
* @param b Buffer for the compressed data
* @param off Start offset of the data
* @param len Size of the buffer
* @return The actual number of bytes of compressed data.
*/
@Override
public int compress(byte[] b, int off, int len)
throws IOException {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
// Check if there is compressed data
int n = compressedDirectBuf.remaining();
if (n > 0) {
n = Math.min(n, len);
((ByteBuffer) compressedDirectBuf).get(b, off, n);
bytesWritten += n;
return n;
}
// Re-initialize the snappy's output direct-buffer
compressedDirectBuf.clear();
compressedDirectBuf.limit(0);
if (0 == uncompressedDirectBuf.position()) {
// No compressed data, so we should have !needsInput or !finished
setInputFromSavedData();
if (0 == uncompressedDirectBuf.position()) {
// Called without data; write nothing
finished = true;
return 0;
}
}
// Compress data
n = compressDirectBuf();
compressedDirectBuf.limit(n);
uncompressedDirectBuf.clear(); // snappy consumes all buffer input
// Set 'finished' if snapy has consumed all user-data
if (0 == userBufLen) {
finished = true;
}
// Get atmost 'len' bytes
n = Math.min(n, len);
bytesWritten += n;
((ByteBuffer) compressedDirectBuf).get(b, off, n);
return n;
} | 3.68 |
druid_SQLServerStatementParser_parseExecParameter | /**
* SQLServer parse Parameter statement support out type
*
* @author zz [[email protected]]
*/
public void parseExecParameter(Collection<SQLServerParameter> exprCol, SQLObject parent) {
if (lexer.token() == Token.RPAREN || lexer.token() == Token.RBRACKET) {
return;
}
if (lexer.token() == Token.EOF) {
return;
}
SQLServerParameter param = new SQLServerParameter();
SQLExpr expr = this.exprParser.expr();
expr.setParent(parent);
param.setExpr(expr);
if (lexer.token() == Token.OUT) {
param.setType(true);
accept(Token.OUT);
}
exprCol.add(param);
while (lexer.token() == Token.COMMA) {
lexer.nextToken();
param = new SQLServerParameter();
expr = this.exprParser.expr();
expr.setParent(parent);
param.setExpr(expr);
if (lexer.token() == Token.OUT) {
param.setType(true);
accept(Token.OUT);
}
exprCol.add(param);
}
} | 3.68 |
hbase_MetricsAssignmentManager_getOpenProcMetrics | /** Returns Set of common metrics for OpenRegionProcedure */
public ProcedureMetrics getOpenProcMetrics() {
return openProcMetrics;
} | 3.68 |
morf_SqlDialect_convertCommentToSQL | /**
* Convert a string to an SQL comment
*
* @param string The comment string
* @return An SQL comment containing the comment string
*/
public String convertCommentToSQL(String string) {
return "-- "+string;
} | 3.68 |
hbase_MergeTableRegionsProcedure_updateMetaForMergedRegions | /**
* Add merged region to META and delete original regions.
*/
private void updateMetaForMergedRegions(final MasterProcedureEnv env) throws IOException {
env.getAssignmentManager().markRegionAsMerged(mergedRegion, getServerName(env),
this.regionsToMerge);
} | 3.68 |
hmily_XidImpl_newResId | /**
* New res id x id.
*
* @param index the index
* @return the x id
*/
public XidImpl newResId(final int index) {
return new XidImpl(this, index);
} | 3.68 |
hbase_FullTableBackupClient_execute | /**
* Backup request execution.
* @throws IOException if the execution of the backup fails
*/
@Override
public void execute() throws IOException {
try (Admin admin = conn.getAdmin()) {
// Begin BACKUP
beginBackup(backupManager, backupInfo);
String savedStartCode;
boolean firstBackup;
// do snapshot for full table backup
savedStartCode = backupManager.readBackupStartCode();
firstBackup = savedStartCode == null || Long.parseLong(savedStartCode) == 0L;
if (firstBackup) {
// This is our first backup. Let's put some marker to system table so that we can hold the
// logs while we do the backup.
backupManager.writeBackupStartCode(0L);
}
// We roll log here before we do the snapshot. It is possible there is duplicate data
// in the log that is already in the snapshot. But if we do it after the snapshot, we
// could have data loss.
// A better approach is to do the roll log on each RS in the same global procedure as
// the snapshot.
LOG.info("Execute roll log procedure for full backup ...");
Map<String, String> props = new HashMap<>();
props.put("backupRoot", backupInfo.getBackupRootDir());
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
newTimestamps = backupManager.readRegionServerLastLogRollResult();
// SNAPSHOT_TABLES:
backupInfo.setPhase(BackupPhase.SNAPSHOT);
for (TableName tableName : tableList) {
String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime())
+ "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
snapshotTable(admin, tableName, snapshotName);
backupInfo.setSnapshotName(tableName, snapshotName);
}
// SNAPSHOT_COPY:
// do snapshot copy
LOG.debug("snapshot copy for " + backupId);
snapshotCopy(backupInfo);
// Updates incremental backup table set
backupManager.addIncrementalBackupTableSet(backupInfo.getTables());
// BACKUP_COMPLETE:
// set overall backup status: complete. Here we make sure to complete the backup.
// After this checkpoint, even if entering cancel process, will let the backup finished
backupInfo.setState(BackupState.COMPLETE);
// The table list in backupInfo is good for both full backup and incremental backup.
// For incremental backup, it contains the incremental backup table set.
backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
Map<TableName, Map<String, Long>> newTableSetTimestampMap =
backupManager.readLogTimestampMap();
backupInfo.setTableSetTimestampMap(newTableSetTimestampMap);
Long newStartCode =
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
backupManager.writeBackupStartCode(newStartCode);
// backup complete
completeBackup(conn, backupInfo, backupManager, BackupType.FULL, conf);
} catch (Exception e) {
failBackup(conn, backupInfo, backupManager, e, "Unexpected BackupException : ",
BackupType.FULL, conf);
throw new IOException(e);
}
} | 3.68 |
flink_KeyMap_put | /**
* Inserts the given value, mapped under the given key. If the table already contains a value
* for the key, the value is replaced and returned. If no value is contained, yet, the function
* returns null.
*
* @param key The key to insert.
* @param value The value to insert.
* @return The previously mapped value for the key, or null, if no value was mapped for the key.
* @throws java.lang.NullPointerException Thrown, if the key is null.
*/
public final V put(K key, V value) {
final int hash = hash(key);
final int slot = indexOf(hash);
// search the chain from the slot
for (Entry<K, V> e = table[slot]; e != null; e = e.next) {
Object k;
if (e.hashCode == hash && ((k = e.key) == key || key.equals(k))) {
// found match
V old = e.value;
e.value = value;
return old;
}
}
// no match, insert a new value
insertNewEntry(hash, key, value, slot);
return null;
} | 3.68 |
hadoop_ResourceCalculatorProcessTree_getCumulativeCpuTime | /**
* Get the CPU time in millisecond used by all the processes in the
* process-tree since the process-tree was created
*
* @return cumulative CPU time in millisecond since the process-tree
* created, {@link #UNAVAILABLE} if it cannot be calculated.
*/
public long getCumulativeCpuTime() {
return UNAVAILABLE;
} | 3.68 |
morf_FieldFromSelectFirst_drive | /**
* @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser)
*/
@Override
public void drive(ObjectTreeTraverser traverser) {
traverser.dispatch(getSelectFirstStatement());
} | 3.68 |
pulsar_ConcurrentOpenLongPairRangeSet_add | /**
* Adds the specified range to this {@code RangeSet} (optional operation). That is, for equal range sets a and b,
* the result of {@code a.add(range)} is that {@code a} will be the minimal range set for which both
* {@code a.enclosesAll(b)} and {@code a.encloses(range)}.
*
* <p>Note that {@code range} will merge given {@code range} with any ranges in the range set that are
* {@linkplain Range#isConnected(Range) connected} with it. Moreover, if {@code range} is empty/invalid, this is a
* no-op.
*/
public void add(Range<LongPair> range) {
LongPair lowerEndpoint = range.hasLowerBound() ? range.lowerEndpoint() : LongPair.earliest;
LongPair upperEndpoint = range.hasUpperBound() ? range.upperEndpoint() : LongPair.latest;
long lowerValueOpen = (range.hasLowerBound() && range.lowerBoundType().equals(BoundType.CLOSED))
? getSafeEntry(lowerEndpoint) - 1
: getSafeEntry(lowerEndpoint);
long upperValueClosed = (range.hasUpperBound() && range.upperBoundType().equals(BoundType.CLOSED))
? getSafeEntry(upperEndpoint)
: getSafeEntry(upperEndpoint) + 1;
// #addOpenClosed doesn't create bitSet for lower-key because it avoids setting up values for non-exist items
// into the key-ledger. so, create bitSet and initialize so, it can't be ignored at #addOpenClosed
rangeBitSetMap.computeIfAbsent(lowerEndpoint.getKey(), (key) -> createNewBitSet())
.set((int) lowerValueOpen + 1);
this.addOpenClosed(lowerEndpoint.getKey(), lowerValueOpen, upperEndpoint.getKey(), upperValueClosed);
} | 3.68 |
hadoop_AMRMProxyService_init | /**
* Initializes the wrapper with the specified parameters.
*
* @param interceptor the root request interceptor
* @param appAttemptId attempt id
*/
public synchronized void init(RequestInterceptor interceptor,
ApplicationAttemptId appAttemptId) {
rootInterceptor = interceptor;
applicationAttemptId = appAttemptId;
} | 3.68 |
hbase_AbstractStateMachineTableProcedure_preflightChecks | /**
* Check that cluster is up and master is running. Check table is modifiable. If
* <code>enabled</code>, check table is enabled else check it is disabled. Call in Procedure
* constructor so can pass any exception to caller.
* @param enabled If true, check table is enabled and throw exception if not. If false, do the
* inverse. If null, do no table checks.
*/
protected void preflightChecks(MasterProcedureEnv env, Boolean enabled) throws HBaseIOException {
MasterServices master = env.getMasterServices();
if (!master.isClusterUp()) {
throw new HBaseIOException("Cluster not up!");
}
if (master.isStopping() || master.isStopped()) {
throw new HBaseIOException(
"Master stopping=" + master.isStopping() + ", stopped=" + master.isStopped());
}
if (enabled == null) {
// Don't do any table checks.
return;
}
try {
// Checks table exists and is modifiable.
checkTableModifiable(env);
TableName tn = getTableName();
TableStateManager tsm = master.getTableStateManager();
TableState ts = tsm.getTableState(tn);
if (enabled) {
if (!ts.isEnabledOrEnabling()) {
throw new TableNotEnabledException(tn);
}
} else {
if (!ts.isDisabledOrDisabling()) {
throw new TableNotDisabledException(tn);
}
}
} catch (IOException ioe) {
if (ioe instanceof HBaseIOException) {
throw (HBaseIOException) ioe;
}
throw new HBaseIOException(ioe);
}
} | 3.68 |
dubbo_AbstractClusterInvoker_initLoadBalance | /**
* Init LoadBalance.
* <p>
* if invokers is not empty, init from the first invoke's url and invocation
* if invokes is empty, init a default LoadBalance(RandomLoadBalance)
* </p>
*
* @param invokers invokers
* @param invocation invocation
* @return LoadBalance instance. if not need init, return null.
*/
protected LoadBalance initLoadBalance(List<Invoker<T>> invokers, Invocation invocation) {
ApplicationModel applicationModel = ScopeModelUtil.getApplicationModel(invocation.getModuleModel());
if (CollectionUtils.isNotEmpty(invokers)) {
return applicationModel
.getExtensionLoader(LoadBalance.class)
.getExtension(invokers.get(0)
.getUrl()
.getMethodParameter(
RpcUtils.getMethodName(invocation), LOADBALANCE_KEY, DEFAULT_LOADBALANCE));
} else {
return applicationModel.getExtensionLoader(LoadBalance.class).getExtension(DEFAULT_LOADBALANCE);
}
} | 3.68 |
hbase_QuotaTableUtil_getObservedSnapshotSizes | /**
* Fetches any persisted HBase snapshot sizes stored in the quota table. The sizes here are
* computed relative to the table which the snapshot was created from. A snapshot's size will not
* include the size of files which the table still refers. These sizes, in bytes, are what is used
* internally to compute quota violation for tables and namespaces.
* @return A map of snapshot name to size in bytes per space quota computations
*/
public static Map<String, Long> getObservedSnapshotSizes(Connection conn) throws IOException {
try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME);
ResultScanner rs = quotaTable.getScanner(createScanForSpaceSnapshotSizes())) {
final Map<String, Long> snapshotSizes = new HashMap<>();
for (Result r : rs) {
CellScanner cs = r.cellScanner();
while (cs.advance()) {
Cell c = cs.current();
final String snapshot = extractSnapshotNameFromSizeCell(c);
final long size = parseSnapshotSize(c);
snapshotSizes.put(snapshot, size);
}
}
return snapshotSizes;
}
} | 3.68 |
framework_DateFieldElement_setValue | /**
* Set value of the date field element.
*
* @param chars
* new value of the date field
* @throws ReadOnlyException
* if the date field is in readonly mode
*/
public void setValue(CharSequence chars) throws ReadOnlyException {
if (isReadOnly()) {
throw new ReadOnlyException();
}
WebElement elem = getInputElement();
TestBenchElement tbElement = (TestBenchElement) elem;
clearElementClientSide(tbElement);
tbElement.sendKeys(chars);
tbElement.sendKeys(Keys.TAB);
} | 3.68 |
flink_UserDefinedFunctionHelper_validateNotSingleton | /**
* Check whether this is a Scala object. Using Scala objects can lead to concurrency issues,
* e.g., due to a shared collector.
*/
private static void validateNotSingleton(Class<?> clazz) {
if (Arrays.stream(clazz.getFields()).anyMatch(f -> f.getName().equals("MODULE$"))) {
throw new ValidationException(
String.format(
"Function implemented by class %s is a Scala object. This is forbidden because of concurrency"
+ " problems when using them.",
clazz.getName()));
}
} | 3.68 |
framework_Result_ifOk | /**
* Applies the {@code consumer} if result is not an error.
*
* @param consumer
* consumer to apply in case it's not an error
*/
public default void ifOk(SerializableConsumer<R> consumer) {
handle(consumer, error -> {
});
} | 3.68 |
hadoop_SubApplicationEntityReader_createFilterListForColsOfInfoFamily | /**
* Creates a filter list which indicates that only some of the column
* qualifiers in the info column family will be returned in result.
*
* @param isApplication If true, it means operations are to be performed for
* application table, otherwise for entity table.
* @return filter list.
* @throws IOException if any problem occurs while creating filter list.
*/
private FilterList createFilterListForColsOfInfoFamily() throws IOException {
FilterList infoFamilyColsFilter = new FilterList(Operator.MUST_PASS_ONE);
// Add filters for each column in entity table.
updateFixedColumns(infoFamilyColsFilter);
EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
// If INFO field has to be retrieved, add a filter for fetching columns
// with INFO column prefix.
if (hasField(fieldsToRetrieve, Field.INFO)) {
infoFamilyColsFilter.addFilter(
TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
SubApplicationColumnPrefix.INFO));
}
TimelineFilterList relatesTo = getFilters().getRelatesTo();
if (hasField(fieldsToRetrieve, Field.RELATES_TO)) {
// If RELATES_TO field has to be retrieved, add a filter for fetching
// columns with RELATES_TO column prefix.
infoFamilyColsFilter.addFilter(
TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
SubApplicationColumnPrefix.RELATES_TO));
} else if (relatesTo != null && !relatesTo.getFilterList().isEmpty()) {
// Even if fields to retrieve does not contain RELATES_TO, we still
// need to have a filter to fetch some of the column qualifiers if
// relatesTo filters are specified. relatesTo filters will then be
// matched after fetching rows from HBase.
Set<String> relatesToCols =
TimelineFilterUtils.fetchColumnsFromFilterList(relatesTo);
infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
SubApplicationColumnPrefix.RELATES_TO, relatesToCols));
}
TimelineFilterList isRelatedTo = getFilters().getIsRelatedTo();
if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
// If IS_RELATED_TO field has to be retrieved, add a filter for fetching
// columns with IS_RELATED_TO column prefix.
infoFamilyColsFilter.addFilter(
TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
SubApplicationColumnPrefix.IS_RELATED_TO));
} else if (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty()) {
// Even if fields to retrieve does not contain IS_RELATED_TO, we still
// need to have a filter to fetch some of the column qualifiers if
// isRelatedTo filters are specified. isRelatedTo filters will then be
// matched after fetching rows from HBase.
Set<String> isRelatedToCols =
TimelineFilterUtils.fetchColumnsFromFilterList(isRelatedTo);
infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
SubApplicationColumnPrefix.IS_RELATED_TO, isRelatedToCols));
}
TimelineFilterList eventFilters = getFilters().getEventFilters();
if (hasField(fieldsToRetrieve, Field.EVENTS)) {
// If EVENTS field has to be retrieved, add a filter for fetching columns
// with EVENT column prefix.
infoFamilyColsFilter.addFilter(
TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
SubApplicationColumnPrefix.EVENT));
} else if (eventFilters != null
&& !eventFilters.getFilterList().isEmpty()) {
// Even if fields to retrieve does not contain EVENTS, we still need to
// have a filter to fetch some of the column qualifiers on the basis of
// event filters specified. Event filters will then be matched after
// fetching rows from HBase.
Set<String> eventCols =
TimelineFilterUtils.fetchColumnsFromFilterList(eventFilters);
infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
SubApplicationColumnPrefix.EVENT, eventCols));
}
return infoFamilyColsFilter;
} | 3.68 |
hbase_WALKey_getExtendedAttributes | /**
* Returns a map of all extended attributes injected into this WAL key.
*/
default Map<String, byte[]> getExtendedAttributes() {
return new HashMap<>();
} | 3.68 |
rocketmq-connect_AbstractKafkaSinkConnector_start | /**
* Start the component
*
* @param config component context
*/
@Override
public void start(KeyValue config) {
this.configValue = new ConnectKeyValue();
config.keySet().forEach(key -> {
this.configValue.put(key, config.getString(key));
});
setConnectorClass(configValue);
taskConfig = new HashMap<>(configValue.config());
// get the source class name from config and create source task from reflection
try {
sinkConnector = Class.forName(taskConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG))
.asSubclass(org.apache.kafka.connect.sink.SinkConnector.class)
.getDeclaredConstructor()
.newInstance();
} catch (Exception e) {
throw new ConnectException("Load task class failed, " + taskConfig.get(TaskConfig.TASK_CLASS_CONFIG));
}
} | 3.68 |
zxing_AlignmentPatternFinder_foundPatternCross | /**
* @param stateCount count of black/white/black pixels just read
* @return true iff the proportions of the counts is close enough to the 1/1/1 ratios
* used by alignment patterns to be considered a match
*/
private boolean foundPatternCross(int[] stateCount) {
float moduleSize = this.moduleSize;
float maxVariance = moduleSize / 2.0f;
for (int i = 0; i < 3; i++) {
if (Math.abs(moduleSize - stateCount[i]) >= maxVariance) {
return false;
}
}
return true;
} | 3.68 |
morf_XmlDataSetProducer_close | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#close()
*/
@Override
public void close() {
xmlStreamProvider.close();
urlHandler.close();
} | 3.68 |
querydsl_DateExpression_dayOfMonth | /**
* Create a day of month expression (range 1-31)
*
* @return day of month
*/
public NumberExpression<Integer> dayOfMonth() {
if (dayOfMonth == null) {
dayOfMonth = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.DAY_OF_MONTH, mixin);
}
return dayOfMonth;
} | 3.68 |
AreaShop_RentRegion_getDuration | /**
* Get the duration of 1 rent period.
* @return The duration in milliseconds of 1 rent period
*/
public long getDuration() {
return Utils.durationStringToLong(getDurationString());
} | 3.68 |
druid_DruidDataSourceWrapper_setMaxEvictableIdleTimeMillis | /**
* Ignore the 'maxEvictableIdleTimeMillis < minEvictableIdleTimeMillis' validate,
* it will be validated again in {@link DruidDataSource#init()}.
* <p>
* for fix issue #3084, #2763
*
* @since 1.1.14
*/
@Override
public void setMaxEvictableIdleTimeMillis(long maxEvictableIdleTimeMillis) {
try {
super.setMaxEvictableIdleTimeMillis(maxEvictableIdleTimeMillis);
} catch (IllegalArgumentException ignore) {
super.maxEvictableIdleTimeMillis = maxEvictableIdleTimeMillis;
}
} | 3.68 |
hadoop_RouterQuotaUpdateService_getQuotaSetMountTables | /**
* Get mount tables which quota was set.
* During this time, the quota usage cache will also be updated by
* quota manager:
* 1. Stale paths (entries) will be removed.
* 2. Existing entries will be overridden and updated.
* @return List of mount tables which quota was set.
* @throws IOException
*/
private List<MountTable> getQuotaSetMountTables() throws IOException {
List<MountTable> mountTables = getMountTableEntries();
Set<String> allPaths = this.quotaManager.getAll();
Set<String> stalePaths = new HashSet<>(allPaths);
List<MountTable> neededMountTables = new LinkedList<>();
for (MountTable entry : mountTables) {
// select mount tables which is quota set
if (isQuotaSet(entry)) {
neededMountTables.add(entry);
}
// update mount table entries info in quota cache
String src = entry.getSourcePath();
this.quotaManager.updateQuota(src, entry.getQuota());
stalePaths.remove(src);
}
// remove stale paths that currently cached
for (String stalePath : stalePaths) {
this.quotaManager.remove(stalePath);
}
return neededMountTables;
} | 3.68 |
dubbo_AbstractHttpClientFactory_createRestClient | //////////////////////////////////////// implements start ///////////////////////////////////////////////
@Override
public RestClient createRestClient(URL url) throws RpcException {
beforeCreated(url);
// create a raw client
RestClient restClient = doCreateRestClient(url);
// postprocessor
afterCreated(restClient);
return restClient;
} | 3.68 |
hbase_HFileCorruptionChecker_checkHFile | /**
* Checks a path to see if it is a valid hfile. full Path to an HFile This is a connectivity
* related exception
*/
protected void checkHFile(Path p) throws IOException {
HFile.Reader r = null;
try {
r = HFile.createReader(fs, p, cacheConf, true, conf);
} catch (CorruptHFileException che) {
LOG.warn("Found corrupt HFile " + p, che);
corrupted.add(p);
if (inQuarantineMode) {
Path dest = createQuarantinePath(p);
LOG.warn("Quarantining corrupt HFile " + p + " into " + dest);
boolean success = fs.mkdirs(dest.getParent());
success = success ? fs.rename(p, dest) : false;
if (!success) {
failures.add(p);
} else {
quarantined.add(dest);
}
}
return;
} catch (FileNotFoundException fnfe) {
LOG.warn("HFile " + p + " was missing. Likely removed due to compaction/split?");
missing.add(p);
} finally {
hfilesChecked.addAndGet(1);
if (r != null) {
r.close(true);
}
}
} | 3.68 |
hbase_RequestConverter_buildIsNormalizerEnabledRequest | /**
* Creates a protocol buffer IsNormalizerEnabledRequest
* @return a IsNormalizerEnabledRequest
*/
public static IsNormalizerEnabledRequest buildIsNormalizerEnabledRequest() {
return IsNormalizerEnabledRequest.newBuilder().build();
} | 3.68 |
AreaShop_RegionGroup_getPriority | /**
* Get the priority of the group (higher overwrites).
* @return The priority of the group
*/
public int getPriority() {
return getSettings().getInt("priority");
} | 3.68 |
framework_LoginForm_getUsernameCaption | /**
* Gets the caption set with {@link #setUsernameCaption(String)}. Note that
* this method might not match what is shown to the user if
* {@link #createUsernameField()} has been overridden.
*
* @return the user name field caption
*/
public String getUsernameCaption() {
return usernameCaption;
} | 3.68 |
hudi_HoodieTableFactory_sanityCheck | /**
* The sanity check.
*
* @param conf The table options
* @param schema The table schema
*/
private void sanityCheck(Configuration conf, ResolvedSchema schema) {
checkTableType(conf);
if (!OptionsResolver.isAppendMode(conf)) {
checkRecordKey(conf, schema);
}
checkPreCombineKey(conf, schema);
} | 3.68 |
flink_ScriptProcessBuilder_splitArgs | // Code below shameless borrowed from Hadoop Streaming
private String[] splitArgs(String args) {
final int outSide = 1;
final int singLeq = 2;
final int doubleLeq = 3;
List<String> argList = new ArrayList<>();
char[] ch = args.toCharArray();
int clen = ch.length;
int state = outSide;
int argstart = 0;
for (int c = 0; c <= clen; c++) {
boolean last = (c == clen);
int lastState = state;
boolean endToken = false;
if (!last) {
if (ch[c] == '\'') {
if (state == outSide) {
state = singLeq;
} else if (state == singLeq) {
state = outSide;
}
endToken = (state != lastState);
} else if (ch[c] == '"') {
if (state == outSide) {
state = doubleLeq;
} else if (state == doubleLeq) {
state = outSide;
}
endToken = (state != lastState);
} else if (ch[c] == ' ') {
if (state == outSide) {
endToken = true;
}
}
}
if (last || endToken) {
if (c != argstart) {
String a;
a = args.substring(argstart, c);
argList.add(a);
}
argstart = c + 1;
}
}
return argList.toArray(new String[0]);
} | 3.68 |
rocketmq-connect_MetricsReporter_onGaugeAdded | /**
* Called when a {@link Gauge} is added to the registry.
*
* @param name the gauge's name
* @param gauge the gauge
*/
public void onGaugeAdded(String name, Gauge<?> gauge) {
this.onGaugeAdded(MetricUtils.stringToMetricName(name), gauge.getValue());
} | 3.68 |
flink_CalculatedTableFactory_create | /**
* Creates a valid {@link CalculatedQueryOperation} operation.
*
* @param callExpr call to table function as expression
* @return valid calculated table
*/
QueryOperation create(ResolvedExpression callExpr, List<String> leftTableFieldNames) {
FunctionTableCallVisitor calculatedTableCreator =
new FunctionTableCallVisitor(leftTableFieldNames);
return callExpr.accept(calculatedTableCreator);
} | 3.68 |
hudi_AvroInternalSchemaConverter_visitInternalArrayToBuildAvroArray | /**
* Converts hudi ArrayType to Avro ArrayType.
* this is auxiliary function used by visitInternalSchemaToBuildAvroSchema
*/
private static Schema visitInternalArrayToBuildAvroArray(Types.ArrayType array, Schema elementSchema) {
Schema result;
if (array.isElementOptional()) {
result = Schema.createArray(AvroInternalSchemaConverter.nullableSchema(elementSchema));
} else {
result = Schema.createArray(elementSchema);
}
return result;
} | 3.68 |
hadoop_MutableGaugeLong_set | /**
* Set the value of the metric
* @param value to set
*/
public void set(long value) {
this.value.set(value);
setChanged();
} | 3.68 |
hbase_DataBlockEncoding_getDataBlockEncoderById | /**
* Find and create data block encoder for given id;
* @param encoderId id of data block encoder.
* @return Newly created data block encoder.
*/
public static DataBlockEncoder getDataBlockEncoderById(short encoderId) {
return getEncodingById(encoderId).getEncoder();
} | 3.68 |
hbase_ReplicationSourceLogQueue_getOldestWalAge | /*
* Returns the age of oldest wal.
*/
long getOldestWalAge() {
long now = EnvironmentEdgeManager.currentTime();
long timestamp = getOldestWalTimestamp();
if (timestamp == Long.MAX_VALUE) {
// If there are no wals in the queue then set the oldest wal timestamp to current time
// so that the oldest wal age will be 0.
timestamp = now;
}
long age = now - timestamp;
return age;
} | 3.68 |
hbase_AccessControlUtil_toPermission | /**
* Convert a protobuf UserTablePermissions to a ListMultimap<Username, Permission>
* @param proto the proto UsersAndPermissions
* @return a ListMultimap with user and its permissions
*/
public static ListMultimap<String, Permission>
toPermission(AccessControlProtos.UsersAndPermissions proto) {
ListMultimap<String, Permission> perms = ArrayListMultimap.create();
AccessControlProtos.UsersAndPermissions.UserPermissions userPerm;
for (int i = 0; i < proto.getUserPermissionsCount(); i++) {
userPerm = proto.getUserPermissions(i);
String username = userPerm.getUser().toStringUtf8();
for (int j = 0; j < userPerm.getPermissionsCount(); j++) {
perms.put(username, toPermission(userPerm.getPermissions(j)));
}
}
return perms;
} | 3.68 |
flink_SingleInputGate_notifyPriorityEvent | /**
* Notifies that the respective channel has a priority event at the head for the given buffer
* number.
*
* <p>The buffer number limits the notification to the respective buffer and voids the whole
* notification in case that the buffer has been polled in the meantime. That is, if task thread
* polls the enqueued priority buffer before this notification occurs (notification is not
* performed under lock), this buffer number allows {@link #queueChannel(InputChannel, Integer,
* boolean)} to avoid spurious priority wake-ups.
*/
void notifyPriorityEvent(InputChannel inputChannel, int prioritySequenceNumber) {
queueChannel(checkNotNull(inputChannel), prioritySequenceNumber, false);
} | 3.68 |
hbase_Encryption_hash128 | /**
* Return the MD5 digest of the concatenation of the supplied arguments.
*/
public static byte[] hash128(byte[]... args) {
return hashWithAlg("MD5", args);
} | 3.68 |
framework_DDEventHandleStrategy_getTargetElement | /**
* Get target element for {@code event}.
*
* @param event
* GWT event to find target
* @param mediator
* VDragAndDropManager data accessor
* @return target element for {@code event}
*/
public Element getTargetElement(NativePreviewEvent event,
DDManagerMediator mediator) {
NativeEvent gwtEvent = event.getNativeEvent();
Element targetElement;
if (WidgetUtil.isTouchEvent(gwtEvent)
|| mediator.getManager().getDragElement() != null) {
int x = WidgetUtil.getTouchOrMouseClientX(gwtEvent);
int y = WidgetUtil.getTouchOrMouseClientY(gwtEvent);
// Util.browserDebugger();
targetElement = WidgetUtil.getElementFromPoint(x, y);
} else {
Node targetNode = Node.as(gwtEvent.getEventTarget());
if (Element.is(targetNode)) {
targetElement = Element.as(targetNode);
} else {
targetElement = targetNode.getParentElement();
}
}
return targetElement;
} | 3.68 |
flink_StateBackend_supportsNoClaimRestoreMode | /**
* Tells if a state backend supports the {@link RestoreMode#NO_CLAIM} mode.
*
* <p>If a state backend supports {@code NO_CLAIM} mode, it should create an independent
* snapshot when it receives {@link CheckpointType#FULL_CHECKPOINT} in {@link
* Snapshotable#snapshot(long, long, CheckpointStreamFactory, CheckpointOptions)}.
*
* @return If the state backend supports {@link RestoreMode#NO_CLAIM} mode.
*/
default boolean supportsNoClaimRestoreMode() {
return false;
} | 3.68 |
graphhopper_PbfRawBlob_getType | /**
* Gets the type of data represented by this blob. This corresponds to the type field in the
* blob header.
* <p>
*
* @return The blob type.
*/
public String getType() {
return type;
} | 3.68 |
hbase_TimeRange_from | /**
* Represents the time interval [minStamp, Long.MAX_VALUE)
* @param minStamp the minimum timestamp value, inclusive
*/
public static TimeRange from(long minStamp) {
check(minStamp, INITIAL_MAX_TIMESTAMP);
return new TimeRange(minStamp, INITIAL_MAX_TIMESTAMP);
} | 3.68 |
hudi_Pipelines_append | /**
* Insert the dataset with append mode(no upsert or deduplication).
*
* <p>The input dataset would be rebalanced among the write tasks:
*
* <pre>
* | input1 | ===\ /=== | task1 | (p1, p2, p3, p4)
* shuffle
* | input2 | ===/ \=== | task2 | (p1, p2, p3, p4)
*
* Note: Both input1 and input2's dataset come from partitions: p1, p2, p3, p4
* </pre>
*
* <p>The write task switches to new file handle each time it receives a record
* from the different partition path, so there may be many small files.
*
* @param conf The configuration
* @param rowType The input row type
* @param dataStream The input data stream
* @return the appending data stream sink
*/
public static DataStream<Object> append(
Configuration conf,
RowType rowType,
DataStream<RowData> dataStream) {
WriteOperatorFactory<RowData> operatorFactory = AppendWriteOperator.getFactory(conf, rowType);
return dataStream
.transform(opName("hoodie_append_write", conf), TypeInformation.of(Object.class), operatorFactory)
.uid(opUID("hoodie_stream_write", conf))
.setParallelism(conf.getInteger(FlinkOptions.WRITE_TASKS));
} | 3.68 |
Activiti_TreeValueExpression_getValue | /**
* Evaluates the expression as an rvalue and answers the result.
* @param context used to resolve properties (<code>base.property</code> and <code>base[property]</code>)
* and to determine the result from the last base/property pair
* @return rvalue evaluation result
* @throws ELException if evaluation fails (e.g. property not found, type conversion failed, ...)
*/
@Override
public Object getValue(ELContext context) throws ELException {
return node.getValue(bindings, context, type);
} | 3.68 |
hadoop_Check_notEmptyElements | /**
* Verifies a string list is not NULL and not emtpy
*
* @param list the list to check.
* @param name the name to use in the exception message.
*
* @return the variable.
*
* @throws IllegalArgumentException if the string list has NULL or empty
* elements.
*/
public static List<String> notEmptyElements(List<String> list, String name) {
notNull(list, name);
for (int i = 0; i < list.size(); i++) {
notEmpty(list.get(i), MessageFormat.format("list [{0}] element [{1}]", name, i));
}
return list;
} | 3.68 |
framework_VFlash_setEmbedParams | /**
* Sets the map of object parameters. Parameters are optional information,
* and they are passed to the instantiated object. Parameters are are stored
* as name value pairs. Calling this method for a second time overrides the
* previously given map.
*
* @param params
* the parameter map
*/
public void setEmbedParams(Map<String, String> params) {
if (params == null) {
if (!embedParams.isEmpty()) {
embedParams.clear();
needsRebuild = true;
}
return;
}
if (!embedParams.equals(params)) {
embedParams = new HashMap<>(params);
needsRebuild = true;
}
} | 3.68 |
flink_CompactingHashTable_open | /** Initialize the hash table */
@Override
public void open() {
synchronized (stateLock) {
if (!closed) {
throw new IllegalStateException("currently not closed.");
}
closed = false;
}
// create the partitions
final int partitionFanOut = getPartitioningFanOutNoEstimates(this.availableMemory.size());
createPartitions(partitionFanOut);
// set up the table structure. the write behind buffers are taken away, as are one buffer
// per partition
final int numBuckets =
getInitialTableSize(
this.availableMemory.size(),
this.segmentSize,
partitionFanOut,
this.avgRecordLen);
initTable(numBuckets, (byte) partitionFanOut);
} | 3.68 |
hudi_HoodiePartitionMetadata_hasPartitionMetadata | // methods related to partition meta data
public static boolean hasPartitionMetadata(FileSystem fs, Path partitionPath) {
try {
return textFormatMetaPathIfExists(fs, partitionPath).isPresent()
|| baseFormatMetaPathIfExists(fs, partitionPath).isPresent();
} catch (IOException ioe) {
throw new HoodieIOException("Error checking presence of partition meta file for " + partitionPath, ioe);
}
} | 3.68 |
morf_OracleDialect_getSqlForDateToYyyymmddHHmmss | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForDateToYyyymmddHHmmss(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForDateToYyyymmddHHmmss(Function function) {
return String.format("TO_NUMBER(TO_CHAR(%s, 'yyyymmddHH24MISS'))",getSqlFrom(function.getArguments().get(0)));
} | 3.68 |
flink_KeyedStream_getKeySelector | /**
* Gets the key selector that can get the key by which the stream if partitioned from the
* elements.
*
* @return The key selector for the key.
*/
@Internal
public KeySelector<T, KEY> getKeySelector() {
return this.keySelector;
} | 3.68 |
querydsl_TimeExpression_currentTime | /**
* Create an expression representing the current time as a TimeExpression instance
*
* @return current time
*/
public static <T extends Comparable> TimeExpression<T> currentTime(Class<T> cl) {
return Expressions.timeOperation(cl, Ops.DateTimeOps.CURRENT_TIME);
} | 3.68 |
hadoop_AdlPermission_getAclBit | /**
* Returns true if "adl.feature.support.acl.bit" configuration is set to
* true.
*
* If configuration is not set then default value is true.
*
* @return If configuration is not set then default value is true.
*/
public boolean getAclBit() {
return aclBit;
} | 3.68 |
hbase_RotateFile_delete | /**
* Deletes the two files used for rotating data. If any of the files cannot be deleted, an
* IOException is thrown.
* @throws IOException if there is an error deleting either file
*/
public void delete() throws IOException {
Path next = files[nextFile];
// delete next file first, and then the current file, so when failing to delete, we can still
// read the correct data
if (fs.exists(next) && !fs.delete(next, false)) {
throw new IOException("Can not delete " + next);
}
Path current = files[1 - nextFile];
if (fs.exists(current) && !fs.delete(current, false)) {
throw new IOException("Can not delete " + current);
}
} | 3.68 |
dubbo_MemberDescriber_getName | /**
* Return the name of the member.
* @return the name
*/
public String getName() {
return this.name;
} | 3.68 |
flink_RequestStatusOverview_readResolve | /** Preserve the singleton property by returning the singleton instance */
private Object readResolve() {
return INSTANCE;
} | 3.68 |
framework_VCalendar_getForwardListener | /**
* Get the listener which listens to forward events from the calendar.
*
* @return
*/
public ForwardListener getForwardListener() {
return forwardListener;
} | 3.68 |
morf_UpgradePathFinder_getUuid | /**
* Tells the UUID of the candidate.
*/
public java.util.UUID getUuid() {
return uuid;
} | 3.68 |
flink_HsSubpartitionFileReaderImpl_prepareForScheduling | /** Refresh downstream consumption progress for another round scheduling of reading. */
@Override
public void prepareForScheduling() {
// Access the consuming offset with lock, to prevent loading any buffer released from the
// memory data manager that is already consumed.
int consumingOffset = operations.getConsumingOffset(true);
bufferIndexManager.updateLastConsumed(consumingOffset);
cachedRegionManager.updateConsumingOffset(consumingOffset);
} | 3.68 |
hbase_RSGroupAdminClient_moveServersAndTables | /**
* Move given set of servers and tables to the specified target RegionServer group.
* @param servers set of servers to move
* @param tables set of tables to move
* @param targetGroup the target group name
* @throws IOException if moving the server and tables fail
*/
public void moveServersAndTables(Set<Address> servers, Set<TableName> tables, String targetGroup)
throws IOException {
MoveServersAndTablesRequest.Builder builder =
MoveServersAndTablesRequest.newBuilder().setTargetGroup(targetGroup);
for (Address el : servers) {
builder.addServers(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname())
.setPort(el.getPort()).build());
}
for (TableName tableName : tables) {
builder.addTableName(ProtobufUtil.toProtoTableName(tableName));
if (!admin.tableExists(tableName)) {
throw new TableNotFoundException(tableName);
}
}
try {
stub.moveServersAndTables(null, builder.build());
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.68 |
hudi_HoodieTable_clean | /**
* Executes a new clean action.
*
* @return information on cleaned file slices
*/
@Deprecated
public HoodieCleanMetadata clean(HoodieEngineContext context, String cleanInstantTime, boolean skipLocking) {
return clean(context, cleanInstantTime);
} | 3.68 |
framework_WindowMoveListener_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Tests that windows send their updated position "
+ "to server-side after being moved by user";
} | 3.68 |
hudi_MarkerDirState_getNextFileIndexToUse | /**
* @return the next file index to use in a round-robin fashion,
* or empty if no file is available.
*/
public Option<Integer> getNextFileIndexToUse() {
int fileIndex = -1;
synchronized (markerCreationProcessingLock) {
// Scans for the next free file index to use after {@code lastFileIndexUsed}
for (int i = 0; i < threadUseStatus.size(); i++) {
int index = (lastFileIndexUsed + 1 + i) % threadUseStatus.size();
if (!threadUseStatus.get(index)) {
fileIndex = index;
threadUseStatus.set(index, true);
break;
}
}
if (fileIndex >= 0) {
lastFileIndexUsed = fileIndex;
return Option.of(fileIndex);
}
}
return Option.empty();
} | 3.68 |
framework_AbstractBeanContainer_addBeanAt | /**
* Adds a bean at a specified (filtered view) position in the container
* using the bean item id resolver to find its identifier.
*
* A bean id resolver must be set before calling this method.
*
* @see #addItemAfter(Object, Object, Object)
*
* @param index
* the index (in the filtered view) at which to add the item
* @param bean
* the bean to add
* @return BeanItem<BEANTYPE> item added or null
* @throws IllegalStateException
* if no bean identifier resolver has been set
* @throws IllegalArgumentException
* if an identifier cannot be resolved for the bean
*/
protected BeanItem<BEANTYPE> addBeanAt(int index, BEANTYPE bean)
throws IllegalStateException, IllegalArgumentException {
if (bean == null) {
return null;
}
IDTYPE itemId = resolveBeanId(bean);
if (itemId == null) {
throw new IllegalArgumentException(
"Resolved identifier for a bean must not be null");
}
return addItemAt(index, itemId, bean);
} | 3.68 |
flink_SqlJsonUtils_serializeJson | /** Serializes the given {@link JsonNode} to a JSON string. */
public static String serializeJson(JsonNode node) {
try {
// For JSON functions to have deterministic output, we need to sort the keys. However,
// Jackson's built-in features don't work on the tree representation, so we need to
// convert the tree first.
final Object convertedNode = MAPPER.treeToValue(node, Object.class);
return MAPPER.writeValueAsString(convertedNode);
} catch (JsonProcessingException e) {
throw new TableException("JSON object could not be serialized: " + node.asText(), e);
}
} | 3.68 |
framework_AbstractSelect_removePropertySetChangeListener | /**
* Removes a previously registered Property set change listener.
*
* @see Container.PropertySetChangeNotifier#removeListener(Container.PropertySetChangeListener)
*/
@Override
public void removePropertySetChangeListener(
Container.PropertySetChangeListener listener) {
if (propertySetEventListeners != null) {
propertySetEventListeners.remove(listener);
if (propertySetEventListeners.isEmpty()) {
propertySetEventListeners = null;
}
}
} | 3.68 |
dubbo_ApplicationConfig_getQosEnableCompatible | /**
* The format is the same as the springboot, including: getQosEnableCompatible(), getQosPortCompatible(), getQosAcceptForeignIpCompatible().
*
* @return
*/
@Parameter(key = QOS_ENABLE_COMPATIBLE, excluded = true, attribute = false)
public Boolean getQosEnableCompatible() {
return getQosEnable();
} | 3.68 |
graphhopper_RoundaboutInstruction_getTurnAngle | /**
* @return radian of angle -2PI < x < 2PI between roundabout entrance and exit values
* <ul>
* <li>> 0 is for clockwise rotation</li>
* <li>< 0 is for counterclockwise rotation</li>
* <li>NaN if direction of rotation is unclear</li>
* </ul>
*/
public double getTurnAngle() {
if (Math.abs(clockwise) != 1)
return Double.NaN;
else
return Math.PI * clockwise - radian;
} | 3.68 |
hudi_AbstractStreamWriteFunction_lastPendingInstant | /**
* Returns the last pending instant time.
*/
protected String lastPendingInstant() {
return this.ckpMetadata.lastPendingInstant();
} | 3.68 |
morf_Upgrade_selectUpgradeAuditTableCount | /**
* Creates a select statement which can be used to count the number of upgrade steps that have already been run
*/
private SelectStatement selectUpgradeAuditTableCount() {
TableReference upgradeAuditTable = tableRef(DatabaseUpgradeTableContribution.UPGRADE_AUDIT_NAME);
return select(count(upgradeAuditTable.field("upgradeUUID")))
.from(upgradeAuditTable)
.build();
} | 3.68 |
framework_VRichTextArea_getSanitizedValue | /**
* Browsers differ in what they return as the content of a visually empty
* rich text area. This method is used to normalize these to an empty
* string. See #8004.
*
* @return cleaned html string
*/
public String getSanitizedValue() {
BrowserInfo browser = BrowserInfo.get();
String result = getValue();
if (browser.isFirefox()) {
if ("<br>".equals(result) || "<div><br></div>".equals(result)) {
result = "";
}
} else if (browser.isWebkit() || browser.isEdge()) {
if ("<br>".equals(result) || "<div><br></div>".equals(result)) {
result = "";
}
} else if (browser.isIE()) {
if ("<P> </P>".equals(result) || "<p><br></p>".equals(result)) {
result = "";
}
} else if (browser.isOpera()) {
if ("<br>".equals(result) || "<p><br></p>".equals(result)) {
result = "";
}
}
return result;
} | 3.68 |
hudi_InstantStateHandler_getAllInstantStates | /**
* Read instant states from cache of file system.
*
* @return Instant states under the input instant state path.
*/
public List<InstantStateDTO> getAllInstantStates(String instantStatePath) {
if (requestCount.incrementAndGet() >= timelineServiceConfig.instantStateForceRefreshRequestNumber) {
// Do refresh for every N requests to ensure the writers won't be blocked forever
refresh(instantStatePath);
}
return cachedInstantStates.computeIfAbsent(instantStatePath, k -> scanInstantState(new Path(k)));
} | 3.68 |
querydsl_DateTimeExpression_dayOfYear | /**
* Create a day of year expression (range 1-356)
* <p>NOT supported in JDOQL and not in Derby</p>
*
* @return day of year
*/
public NumberExpression<Integer> dayOfYear() {
if (dayOfYear == null) {
dayOfYear = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.DAY_OF_YEAR, mixin);
}
return dayOfYear;
} | 3.68 |
hbase_CellUtil_matchingRows | /** Compares the row of two keyvalues for equality */
public static boolean matchingRows(final Cell left, final short lrowlength, final Cell right,
final short rrowlength) {
if (lrowlength != rrowlength) return false;
if (left instanceof ByteBufferExtendedCell && right instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.equals(((ByteBufferExtendedCell) left).getRowByteBuffer(),
((ByteBufferExtendedCell) left).getRowPosition(), lrowlength,
((ByteBufferExtendedCell) right).getRowByteBuffer(),
((ByteBufferExtendedCell) right).getRowPosition(), rrowlength);
}
if (left instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.equals(((ByteBufferExtendedCell) left).getRowByteBuffer(),
((ByteBufferExtendedCell) left).getRowPosition(), lrowlength, right.getRowArray(),
right.getRowOffset(), rrowlength);
}
if (right instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.equals(((ByteBufferExtendedCell) right).getRowByteBuffer(),
((ByteBufferExtendedCell) right).getRowPosition(), rrowlength, left.getRowArray(),
left.getRowOffset(), lrowlength);
}
return Bytes.equals(left.getRowArray(), left.getRowOffset(), lrowlength, right.getRowArray(),
right.getRowOffset(), rrowlength);
} | 3.68 |
flink_StreamTask_closeAllOperators | /** Closes all the operators if not closed before. */
private void closeAllOperators() throws Exception {
if (operatorChain != null && !closedOperators) {
closedOperators = true;
operatorChain.closeAllOperators();
}
} | 3.68 |
flink_DataType_getFieldDataTypes | /**
* Returns the first-level field data types for the provided {@link DataType}.
*
* <p>Note: This method returns an empty list for every {@link DataType} that is not a composite
* type.
*/
public static List<DataType> getFieldDataTypes(DataType dataType) {
final LogicalType type = dataType.getLogicalType();
if (type.is(LogicalTypeRoot.DISTINCT_TYPE)) {
return getFieldDataTypes(dataType.getChildren().get(0));
} else if (isCompositeType(type)) {
return dataType.getChildren();
}
return Collections.emptyList();
} | 3.68 |
hbase_ScheduledChore_getPeriod | /** Returns period to execute chore in getTimeUnit() units */
public int getPeriod() {
return period;
} | 3.68 |
hbase_ChoreService_getNumberOfScheduledChores | /** Returns number of chores that this service currently has scheduled */
int getNumberOfScheduledChores() {
return scheduledChores.size();
} | 3.68 |
rocketmq-connect_ConnectMetrics_group | /**
* get metrics group
*
* @param tagKeyValues
* @return
*/
public MetricGroup group(String... tagKeyValues) {
return new MetricGroup(getTags(tagKeyValues));
} | 3.68 |
AreaShop_FriendsFeature_deleteFriend | /**
* Delete a friend from the region.
* @param player The UUID of the player to delete
* @param by The CommandSender that is adding the friend, or null
* @return true if the friend has been added, false if adding a friend was cancelled by another plugin
*/
public boolean deleteFriend(UUID player, CommandSender by) {
// Fire and check event
DeletedFriendEvent event = new DeletedFriendEvent(getRegion(), Bukkit.getOfflinePlayer(player), by);
Bukkit.getPluginManager().callEvent(event);
if(event.isCancelled()) {
plugin.message(by, "general-cancelled", event.getReason(), this);
return false;
}
Set<String> friends = new HashSet<>(getRegion().getConfig().getStringList("general.friends"));
friends.remove(player.toString());
List<String> list = new ArrayList<>(friends);
if(list.isEmpty()) {
getRegion().setSetting("general.friends", null);
} else {
getRegion().setSetting("general.friends", list);
}
return true;
} | 3.68 |
framework_CompositeValidator_getSubValidators | /**
* Gets sub-validators by class.
*
* <p>
* If the component contains directly or recursively (it contains another
* composite containing the validator) validators compatible with given type
* they are returned. This only applies to <code>AND</code> mode composite
* validators.
* </p>
*
* <p>
* If the validator is in <code>OR</code> mode or does not contain any
* validators of given type null is returned.
* </p>
*
* @param validatorType
* The type of validators to return
*
* @return Collection<Validator> of validators compatible with given type
* that must apply or null if none found.
*/
public Collection<Validator> getSubValidators(Class validatorType) {
if (mode != CombinationMode.AND) {
return null;
}
final HashSet<Validator> found = new HashSet<Validator>();
for (Validator v : validators) {
if (validatorType.isAssignableFrom(v.getClass())) {
found.add(v);
}
if (v instanceof CompositeValidator
&& ((CompositeValidator) v).getMode() == MODE_AND) {
final Collection<Validator> c = ((CompositeValidator) v)
.getSubValidators(validatorType);
if (c != null) {
found.addAll(c);
}
}
}
return found.isEmpty() ? null : found;
} | 3.68 |
AreaShop_Analytics_start | /**
* Start analytics tracking.
*/
public static void start() {
// bStats statistics
try {
Metrics metrics = new Metrics(AreaShop.getInstance());
// Number of regions
metrics.addCustomChart(new Metrics.SingleLineChart("region_count") {
@Override
public int getValue() {
return AreaShop.getInstance().getFileManager().getRegions().size();
}
});
// Number of rental regions
metrics.addCustomChart(new Metrics.SingleLineChart("rental_region_count") {
@Override
public int getValue() {
return AreaShop.getInstance().getFileManager().getRents().size();
}
});
// Number of buy regions
metrics.addCustomChart(new Metrics.SingleLineChart("buy_region_count") {
@Override
public int getValue() {
return AreaShop.getInstance().getFileManager().getBuys().size();
}
});
// Language
metrics.addCustomChart(new Metrics.SimplePie("language") {
@Override
public String getValue() {
return AreaShop.getInstance().getConfig().getString("language");
}
});
// Pie with region states
metrics.addCustomChart(new Metrics.AdvancedPie("region_state") {
@Override
public HashMap<String, Integer> getValues(HashMap<String, Integer> result) {
RegionStateStats stats = getStateStats();
result.put("For Rent", stats.forrent);
result.put("Rented", stats.rented);
result.put("For Sale", stats.forsale);
result.put("Sold", stats.sold);
result.put("Reselling", stats.reselling);
return result;
}
});
// Time series of each region state
metrics.addCustomChart(new Metrics.SingleLineChart("forrent_region_count") {
@Override
public int getValue() {
return getStateStats().forrent;
}
});
metrics.addCustomChart(new Metrics.SingleLineChart("rented_region_count") {
@Override
public int getValue() {
return getStateStats().rented;
}
});
metrics.addCustomChart(new Metrics.SingleLineChart("forsale_region_count") {
@Override
public int getValue() {
return getStateStats().forsale;
}
});
metrics.addCustomChart(new Metrics.SingleLineChart("sold_region_count") {
@Override
public int getValue() {
return getStateStats().sold;
}
});
metrics.addCustomChart(new Metrics.SingleLineChart("reselling_region_count") {
@Override
public int getValue() {
return getStateStats().reselling;
}
});
// TODO track rent/buy/unrent/sell/resell actions (so that it can be reported per collection interval)
AreaShop.debug("Started bstats.org statistics service");
} catch(Exception e) {
AreaShop.debug("Could not start bstats.org statistics service");
}
} | 3.68 |
hudi_BaseHoodieWriteClient_deleteSavepoint | /**
* Delete a savepoint that was created. Once the savepoint is deleted, the commit can be rolledback and cleaner may
* clean up data files.
*
* @param savepointTime Savepoint time to delete
*/
public void deleteSavepoint(String savepointTime) {
HoodieTable<T, I, K, O> table = createTable(config, hadoopConf);
SavepointHelpers.deleteSavepoint(table, savepointTime);
} | 3.68 |
morf_DataSetUtils_dataSetProducer | /**
* Build a data set producer.
*
* @see DataSetProducerBuilder
* @param schema The schema backing the dataset
* @return A {@link DataSetProducerBuilder}.
*/
public static DataSetProducerBuilder dataSetProducer(Schema schema) {
return new DataSetProducerBuilderImpl(schema);
} | 3.68 |
framework_CacheFlushNotifier_notifyOfCacheFlush | /**
* Iterates through the instances and notifies containers which are
* connected to the same table or are using the same query string.
*
* @param c
* SQLContainer that issued the cache flush notification
*/
public static void notifyOfCacheFlush(SQLContainer c) {
removeDeadReferences();
for (WeakReference<SQLContainer> wr : allInstances) {
if (wr.get() != null) {
SQLContainer wrc = wr.get();
if (wrc == null) {
continue;
}
/*
* If the reference points to the container sending the
* notification, do nothing.
*/
if (wrc.equals(c)) {
continue;
}
/* Compare QueryDelegate types and tableName/queryString */
QueryDelegate wrQd = wrc.getQueryDelegate();
QueryDelegate qd = c.getQueryDelegate();
if (wrQd instanceof TableQuery && qd instanceof TableQuery
&& ((TableQuery) wrQd).getTableName()
.equals(((TableQuery) qd).getTableName())) {
wrc.refresh();
} else if (wrQd instanceof FreeformQuery
&& qd instanceof FreeformQuery
&& ((FreeformQuery) wrQd).getQueryString().equals(
((FreeformQuery) qd).getQueryString())) {
wrc.refresh();
}
}
}
} | 3.68 |
flink_AsyncSinkBaseBuilder_setMaxBufferedRequests | /**
* @param maxBufferedRequests the maximum buffer length. Callbacks to add elements to the buffer
* and calls to write will block if this length has been reached and will only unblock if
* elements from the buffer have been removed for flushing.
* @return {@link ConcreteBuilderT} itself
*/
public ConcreteBuilderT setMaxBufferedRequests(int maxBufferedRequests) {
this.maxBufferedRequests = maxBufferedRequests;
return (ConcreteBuilderT) this;
} | 3.68 |
flink_ShowCreateUtil_buildShowCreateViewRow | /** Show create view statement only for views. */
public static String buildShowCreateViewRow(
ResolvedCatalogBaseTable<?> view,
ObjectIdentifier viewIdentifier,
boolean isTemporary) {
if (view.getTableKind() != CatalogBaseTable.TableKind.VIEW) {
throw new TableException(
String.format(
"SHOW CREATE VIEW is only supported for views, but %s is a table. Please use SHOW CREATE TABLE instead.",
viewIdentifier.asSerializableString()));
}
StringBuilder stringBuilder = new StringBuilder();
if (view.getOrigin() instanceof QueryOperationCatalogView) {
throw new TableException(
"SHOW CREATE VIEW is not supported for views registered by Table API.");
} else {
stringBuilder.append(
String.format(
"CREATE %sVIEW %s%s as%s%s",
isTemporary ? "TEMPORARY " : "",
viewIdentifier.asSerializableString(),
String.format("(%s)", extractFormattedColumnNames(view)),
System.lineSeparator(),
((CatalogView) view.getOrigin()).getExpandedQuery()));
}
extractFormattedComment(view)
.ifPresent(
c ->
stringBuilder.append(
String.format(
" COMMENT '%s'%s", c, System.lineSeparator())));
return stringBuilder.toString();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.