name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_AsyncAdmin_getMasterInfoPort | /**
* Get the info port of the current master if one is available.
* @return master info port
*/
default CompletableFuture<Integer> getMasterInfoPort() {
return getClusterMetrics(EnumSet.of(Option.MASTER_INFO_PORT))
.thenApply(ClusterMetrics::getMasterInfoPort);
} | 3.68 |
framework_UIConnector_init | /**
* Initialize UIConnector and attach UI to RootPanel for rootPanelId
* element.
*
* @param rootPanelId
* root panel element id
* @param applicationConnection
* application connection
*/
public void init(String rootPanelId,
ApplicationConnection applicationConnection) {
initConnector(RootPanel.get(rootPanelId), applicationConnection);
} | 3.68 |
framework_Color_setAlpha | /**
* Sets the alpha value of the color. Value must be within the range [0,
* 255].
*
* @param alpha
* new alpha value
*/
public void setAlpha(int alpha) {
if (withinRange(alpha)) {
this.alpha = alpha;
} else {
throw new IllegalArgumentException(OUTOFRANGE + alpha);
}
} | 3.68 |
hadoop_BlockManagerParameters_withMaxBlocksCount | /**
* Sets the max blocks count to be kept in cache at any time.
*
* @param blocksCount The max blocks count.
* @return The builder.
*/
public BlockManagerParameters withMaxBlocksCount(
final int blocksCount) {
this.maxBlocksCount = blocksCount;
return this;
} | 3.68 |
querydsl_AliasFactory_setCurrent | /**
* Set the thread bound expression to the given value
*
* @param expr expression to be set to current
*/
public void setCurrent(Expression<?> expr) {
current.set(expr);
} | 3.68 |
querydsl_SQLExpressions_union | /**
* Create a new UNION clause
*
* @param sq subqueries
* @param <T>
* @return union
*/
public static <T> Union<T> union(List<SubQueryExpression<T>> sq) {
return new SQLQuery<Void>().union(sq);
} | 3.68 |
hmily_YamlProcessor_getMostSpecific | /**
* Compare two {@link MatchStatus} items, returning the most specific status.
*
* @param a the a
* @param b the b
* @return the most specific
*/
public static MatchStatus getMostSpecific(final MatchStatus a, final MatchStatus b) {
return a.ordinal() < b.ordinal() ? a : b;
} | 3.68 |
morf_AbstractSqlDialectTest_testChangeIndexFollowedByChangeOfAssociatedColumn | /**
* Tests that after changing an index, column from that index can be changed afterwards.
*/
@Test
@SuppressWarnings("unchecked")
public void testChangeIndexFollowedByChangeOfAssociatedColumn() {
Schema schema;
// alter an index
// note the different case
ChangeIndex changeIndex = new ChangeIndex(TEST_TABLE,
index(TEST_1).columns(INT_FIELD, FLOAT_FIELD).unique(),
index(TEST_1).columns("INTFIELD"));
schema = changeIndex.apply(metadata);
Table tableAfterChangeIndex = schema.getTable(TEST_TABLE);
Collection<String> dropIndexStatements = testDialect.indexDropStatements(tableAfterChangeIndex, index(TEST_1).columns(INT_FIELD, FLOAT_FIELD).unique());
Collection<String> addIndexStatements = testDialect.addIndexStatements(tableAfterChangeIndex, index(TEST_1).columns(INT_FIELD));
// then alter a column in that index
ChangeColumn changeColumn = new ChangeColumn(TEST_TABLE,
column(INT_FIELD, DataType.INTEGER).nullable(),
column(INT_FIELD, DataType.INTEGER));
schema = changeColumn.apply(schema);
Table tableAfterModifyColumn = schema.getTable(TEST_TABLE);
Collection<String> changeColumnStatements = testDialect.alterTableChangeColumnStatements(tableAfterModifyColumn,
column(INT_FIELD, DataType.INTEGER).nullable(),
column(INT_FIELD, DataType.INTEGER));
compareStatements(expectedChangeIndexFollowedByChangeOfAssociatedColumnStatement(),
dropIndexStatements, addIndexStatements, changeColumnStatements);
} | 3.68 |
hadoop_AuxServiceFile_toIndentedString | /**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
} | 3.68 |
flink_StaticFileServerHandler_checkFileValidity | /**
* Checks various conditions for file access. If all checks pass this method returns, and
* processing of the request may continue. If any check fails this method throws a {@link
* RestHandlerException}, and further processing of the request must be limited to sending an
* error response.
*/
public static void checkFileValidity(File file, File rootPath, Logger logger)
throws IOException, RestHandlerException {
// this check must be done first to prevent probing for arbitrary files
if (!file.getCanonicalFile().toPath().startsWith(rootPath.toPath())) {
if (logger.isDebugEnabled()) {
logger.debug(
"Requested path {} points outside the root directory.",
file.getAbsolutePath());
}
throw new RestHandlerException("Forbidden.", FORBIDDEN);
}
if (!file.exists() || file.isHidden()) {
if (logger.isDebugEnabled()) {
logger.debug("Requested path {} cannot be found.", file.getAbsolutePath());
}
throw new RestHandlerException("File not found.", NOT_FOUND);
}
if (file.isDirectory() || !file.isFile()) {
if (logger.isDebugEnabled()) {
logger.debug("Requested path {} does not point to a file.", file.getAbsolutePath());
}
throw new RestHandlerException("File not found.", METHOD_NOT_ALLOWED);
}
} | 3.68 |
hadoop_RegexMountPoint_getVarListInString | /**
* Get $var1 and $var2 style variables in string.
*
* @param input - the string to be process.
* @return
*/
public static Map<String, Set<String>> getVarListInString(String input) {
Map<String, Set<String>> varMap = new HashMap<>();
Matcher matcher = VAR_PATTERN_IN_DEST.matcher(input);
while (matcher.find()) {
// $var or ${var}
String varName = matcher.group(0);
// var or {var}
String strippedVarName = matcher.group(1);
if (strippedVarName.startsWith("{")) {
// {varName} = > varName
strippedVarName =
strippedVarName.substring(1, strippedVarName.length() - 1);
}
varMap.putIfAbsent(strippedVarName, new HashSet<>());
varMap.get(strippedVarName).add(varName);
} | 3.68 |
hudi_HoodieBloomIndex_isImplicitWithStorage | /**
* Bloom filters are stored, into the same data files.
*/
@Override
public boolean isImplicitWithStorage() {
return true;
} | 3.68 |
morf_UpgradePathFinder_getSchemaChangeSequence | /**
* Returns a {@link SchemaChangeSequence} from all steps to apply.
* @return All the steps to apply
*/
public SchemaChangeSequence getSchemaChangeSequence() {
List<UpgradeStep> upgradeSteps = Lists.newArrayList();
for (CandidateStep upgradeStepClass : stepsToApply) {
upgradeSteps.add(upgradeStepClass.createStep());
}
return new SchemaChangeSequence(upgradeSteps);
} | 3.68 |
framework_VCalendar_recalculateWidths | /**
* Recalculates the widths of the sub-components in the calendar.
*/
protected void recalculateWidths() {
if (!isWidthUndefined) {
nameToolbar.setWidthPX(intWidth);
dayToolbar.setWidthPX(intWidth);
if (monthGrid != null) {
monthGrid.updateCellSizes(
intWidth - weekToolbar.getOffsetWidth(),
intHeight - nameToolbar.getOffsetHeight());
} else if (weekGrid != null) {
weekGrid.setWidthPX(intWidth);
weeklyLongEvents.setWidthPX(weekGrid.getInternalWidth());
}
} else {
dayToolbar.setWidthPX(intWidth);
nameToolbar.setWidthPX(intWidth);
if (monthGrid != null) {
if (intWidth == -1) {
monthGrid.addStyleDependentName("sizedwidth");
} else {
monthGrid.removeStyleDependentName("sizedwidth");
}
} else if (weekGrid != null) {
weekGrid.setWidthPX(intWidth);
weeklyLongEvents.setWidthPX(weekGrid.getInternalWidth());
}
}
} | 3.68 |
hudi_BaseWriteHelper_deduplicateRecords | /**
* Deduplicate Hoodie records, using the given deduplication function.
*
* @param records hoodieRecords to deduplicate
* @param parallelism parallelism or partitions to be used while reducing/deduplicating
* @return Collection of HoodieRecord already be deduplicated
*/
public I deduplicateRecords(I records, HoodieTable<T, I, K, O> table, int parallelism) {
HoodieRecordMerger recordMerger = HoodieRecordUtils.mergerToPreCombineMode(table.getConfig().getRecordMerger());
return deduplicateRecords(records, table.getIndex(), parallelism, table.getConfig().getSchema(), table.getConfig().getProps(), recordMerger);
} | 3.68 |
framework_VAbstractCalendarPanel_isInitialRenderDone | /**
* Returns the value of initialRenderDone.
*
* @since 8.7
*
* @return {@code true} if the initial render has been marked as done,
* {@code false} otherwise
*/
public boolean isInitialRenderDone() {
return initialRenderDone;
} | 3.68 |
flink_VertexThreadInfoTrackerBuilder_setJobVertexStatsCache | /**
* Sets {@code jobVertexStatsCache}. This is currently only used for testing.
*
* @param jobVertexStatsCache The Cache instance to use for caching statistics. Will use the
* default defined in {@link VertexThreadInfoTrackerBuilder#defaultCache()} if not set.
* @return Builder.
*/
@VisibleForTesting
VertexThreadInfoTrackerBuilder setJobVertexStatsCache(
Cache<VertexThreadInfoTracker.JobVertexKey, VertexThreadInfoStats>
jobVertexStatsCache) {
this.jobVertexStatsCache = jobVertexStatsCache;
return this;
} | 3.68 |
druid_MySQL8DateTimeResultSetMetaData_getColumnClassName | /**
* 针对8.0.24版本开始,如果把mysql DATETIME映射回Timestamp,就需要把javaClass的类型也改回去
* 相关类在com.mysql.cj.MysqlType 中
* 旧版本jdbc为
* DATETIME("DATETIME", Types.TIMESTAMP, Timestamp.class, 0, MysqlType.IS_NOT_DECIMAL, 26L, "[(fsp)]"),
* 8.0.24及以上版本jdbc实现改为
* DATETIME("DATETIME", Types.TIMESTAMP, LocalDateTime.class, 0, MysqlType.IS_NOT_DECIMAL, 26L, "[(fsp)]"),
* @param column 列的索引位
* @return
* @see java.sql.ResultSetMetaData#getColumnClassName(int)
* @throws SQLException
*/
@Override
public String getColumnClassName(int column) throws SQLException {
String className = resultSetMetaData.getColumnClassName(column);
if (LocalDateTime.class.getName().equals(className)) {
return Timestamp.class.getName();
}
return className;
} | 3.68 |
flink_ClusterClient_getAccumulators | /**
* Requests and returns the accumulators for the given job identifier. Accumulators can be
* requested while a is running or after it has finished. The default class loader is used to
* deserialize the incoming accumulator results.
*
* @param jobID The job identifier of a job.
* @return A Map containing the accumulator's name and its value.
*/
default CompletableFuture<Map<String, Object>> getAccumulators(JobID jobID) {
return getAccumulators(jobID, ClassLoader.getSystemClassLoader());
} | 3.68 |
hbase_IPCUtil_createRemoteException | /**
* @param e exception to be wrapped
* @return RemoteException made from passed <code>e</code>
*/
static RemoteException createRemoteException(final ExceptionResponse e) {
String innerExceptionClassName = e.getExceptionClassName();
boolean doNotRetry = e.getDoNotRetry();
boolean serverOverloaded = e.hasServerOverloaded() && e.getServerOverloaded();
return e.hasHostname() ?
// If a hostname then add it to the RemoteWithExtrasException
new RemoteWithExtrasException(innerExceptionClassName, e.getStackTrace(), e.getHostname(),
e.getPort(), doNotRetry, serverOverloaded)
: new RemoteWithExtrasException(innerExceptionClassName, e.getStackTrace(), doNotRetry,
serverOverloaded);
} | 3.68 |
hbase_FSTableDescriptors_remove | /**
* Removes the table descriptor from the local cache and returns it. If not in read only mode, it
* also deletes the entire table directory(!) from the FileSystem.
*/
@Override
public TableDescriptor remove(final TableName tablename) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot remove a table descriptor - in read only mode");
}
Path tabledir = getTableDir(tablename);
if (this.fs.exists(tabledir)) {
if (!this.fs.delete(tabledir, true)) {
throw new IOException("Failed delete of " + tabledir.toString());
}
}
TableDescriptor descriptor = this.cache.remove(tablename);
return descriptor;
} | 3.68 |
hbase_BulkLoadObserver_prePrepareBulkLoad | /**
* Called as part of SecureBulkLoadEndpoint.prepareBulkLoad() RPC call. It can't bypass the
* default action, e.g., ctx.bypass() won't have effect. If you need to get the region or table
* name, get it from the <code>ctx</code> as follows:
* <code>code>ctx.getEnvironment().getRegion()</code>. Use getRegionInfo to fetch the encodedName
* and use getDescriptor() to get the tableName.
* @param ctx the environment to interact with the framework and master
*/
default void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
} | 3.68 |
framework_GridSingleSelect_addSelectionListener | /**
* Adds a generic listener to this selection model, accepting both single
* and multiselection events.
* <p>
* Use {@link #addSingleSelectionListener(SingleSelectionListener)} for more
* specific single selection event.
*
* @see #addSingleSelectionListener(SingleSelectionListener)
*
* @param listener
* the listener to add
* @return a registration handle for removing the listener
*/
public Registration addSelectionListener(SelectionListener<T> listener) {
return model.addSelectionListener(listener);
} | 3.68 |
dubbo_SingleRouterChain_setHeadStateRouter | /**
* for uts only
*/
@Deprecated
public void setHeadStateRouter(StateRouter<T> headStateRouter) {
this.headStateRouter = headStateRouter;
} | 3.68 |
hbase_QuotaSettingsFactory_unthrottleUser | /**
* Remove the throttling for the specified user on the specified namespace.
* @param userName the user
* @param namespace the namespace
* @return the quota settings
*/
public static QuotaSettings unthrottleUser(final String userName, final String namespace) {
return throttle(userName, null, namespace, null, null, 0, null, QuotaScope.MACHINE);
} | 3.68 |
hbase_Query_getIsolationLevel | /**
* Returns The isolation level of this query. If no isolation level was set for this query object,
* then it returns READ_COMMITTED.
*/
public IsolationLevel getIsolationLevel() {
byte[] attr = getAttribute(ISOLATION_LEVEL);
return attr == null ? IsolationLevel.READ_COMMITTED : IsolationLevel.fromBytes(attr);
} | 3.68 |
flink_TaskInfo_getNumberOfParallelSubtasks | /**
* Gets the parallelism with which the parallel task runs.
*
* @return The parallelism with which the parallel task runs.
*/
public int getNumberOfParallelSubtasks() {
return this.numberOfParallelSubtasks;
} | 3.68 |
hbase_RSGroupInfoManagerImpl_checkForDeadOrOnlineServers | /**
* Check if the set of servers are belong to dead servers list or online servers list.
* @param servers servers to remove
*/
private void checkForDeadOrOnlineServers(Set<Address> servers) throws IOException {
// This ugliness is because we only have Address, not ServerName.
Set<Address> onlineServers = new HashSet<>();
List<ServerName> drainingServers = masterServices.getServerManager().getDrainingServersList();
for (ServerName server : masterServices.getServerManager().getOnlineServers().keySet()) {
// Only online but not decommissioned servers are really online
if (!drainingServers.contains(server)) {
onlineServers.add(server.getAddress());
}
}
Set<Address> deadServers = new HashSet<>();
for (ServerName server : masterServices.getServerManager().getDeadServers().copyServerNames()) {
deadServers.add(server.getAddress());
}
for (Address address : servers) {
if (onlineServers.contains(address)) {
throw new DoNotRetryIOException(
"Server " + address + " is an online server, not allowed to remove.");
}
if (deadServers.contains(address)) {
throw new DoNotRetryIOException("Server " + address + " is on the dead servers list,"
+ " Maybe it will come back again, not allowed to remove.");
}
}
} | 3.68 |
pulsar_BrokerDiscoveryProvider_getAvailableBrokers | /**
* Access the list of available brokers.
* Used by Protocol Handlers
* @return the list of available brokers
* @throws PulsarServerException
*/
public List<? extends ServiceLookupData> getAvailableBrokers() throws PulsarServerException {
return metadataStoreCacheLoader.getAvailableBrokers();
} | 3.68 |
hudi_InternalSchemaBuilder_buildIdToField | /**
* Build a mapping from id to field for a internal Type.
*
* @param type hoodie internal type
* @return a mapping from id to field
*/
public Map<Integer, Types.Field> buildIdToField(Type type) {
Map<Integer, Types.Field> idToField = new HashMap<>();
visitIdToField(type, idToField);
return idToField;
} | 3.68 |
hudi_Table_flip | /**
* API to let the table know writing is over and reading is going to start.
*/
public Table flip() {
this.finishedAdding = true;
sortAndLimit();
return this;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithExceptStatementsWithDbLinkFormer | /**
* Tests the generation of SQL string for a query with EXCEPT operator where a
* former select uses DB-link.
*/
@Test
public void testSelectWithExceptStatementsWithDbLinkFormer() {
assumeTrue("for dialects with no EXCEPT operation support the test will be skipped.", expectedSelectWithExceptAndDbLinkFormer() != null);
SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD))
.from(new TableReference(null, TEST_TABLE, DBLINK_NAME))
.except(new SelectStatement(new FieldReference(STRING_FIELD)).from(new TableReference(null, OTHER_TABLE)))
.orderBy(new FieldReference(STRING_FIELD));
String result = testDialect.convertStatementToSQL(stmt);
assertEquals("Select script should match expected", expectedSelectWithExceptAndDbLinkFormer(), result);
} | 3.68 |
framework_UIProvider_isPreservedOnRefresh | /**
* Checks whether the same UI state should be reused if the framework can
* detect that the application is opened in a browser window where it has
* previously been open. The framework attempts to discover this by checking
* the value of window.name in the browser.
* <p>
* Whenever a preserved UI is reused, its
* {@link UI#refresh(com.vaadin.server.VaadinRequest) refresh} method is
* invoked by the framework first.
*
*
* @param event
* the UI create event with information about the UI and the
* current request.
*
* @return <code>true</code>if the same UI instance should be reused e.g.
* when the browser window is refreshed.
*/
public boolean isPreservedOnRefresh(UICreateEvent event) {
PreserveOnRefresh preserveOnRefresh = getAnnotationFor(
event.getUIClass(), PreserveOnRefresh.class);
return preserveOnRefresh != null;
} | 3.68 |
hadoop_ReverseZoneUtils_getReverseZoneNetworkAddress | /**
* Given a baseIp, range and index, return the network address for the
* reverse zone.
*
* @param baseIp base ip address to perform calculations against.
* @param range number of ip addresses per subnet.
* @param index the index of the subnet to calculate.
* @return the calculated ip address.
* @throws UnknownHostException if an invalid ip is provided.
*/
protected static String getReverseZoneNetworkAddress(String baseIp, int range,
int index) throws UnknownHostException {
if (index < 0) {
throw new IllegalArgumentException(
String.format("Invalid index provided, must be positive: %d", index));
}
if (range < 0) {
throw new IllegalArgumentException(
String.format("Invalid range provided, cannot be negative: %d",
range));
}
return calculateIp(baseIp, range, index);
} | 3.68 |
hbase_RpcExecutor_resizeQueues | /**
* Update current soft limit for executor's call queues
* @param conf updated configuration
*/
public void resizeQueues(Configuration conf) {
String configKey = RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH;
if (name != null) {
if (name.toLowerCase(Locale.ROOT).contains("priority")) {
configKey = RpcScheduler.IPC_SERVER_PRIORITY_MAX_CALLQUEUE_LENGTH;
} else if (name.toLowerCase(Locale.ROOT).contains("replication")) {
configKey = RpcScheduler.IPC_SERVER_REPLICATION_MAX_CALLQUEUE_LENGTH;
} else if (name.toLowerCase(Locale.ROOT).contains("bulkload")) {
configKey = RpcScheduler.IPC_SERVER_BULKLOAD_MAX_CALLQUEUE_LENGTH;
}
}
final int queueLimit = currentQueueLimit;
currentQueueLimit = conf.getInt(configKey, queueLimit);
} | 3.68 |
querydsl_CurveExpression_startPoint | /**
* The start Point of this Curve.
*
* @return start point
*/
public PointExpression<Point> startPoint() {
if (startPoint == null) {
startPoint = GeometryExpressions.pointOperation(SpatialOps.START_POINT, mixin);
}
return startPoint;
} | 3.68 |
hadoop_LogAggregationWebUtils_getLogStartIndex | /**
* Parse start index from html.
* @param html the html
* @param startStr the start index string
* @return the startIndex
*/
public static long getLogStartIndex(Block html, String startStr)
throws NumberFormatException {
long start = -4096;
if (startStr != null && !startStr.isEmpty()) {
start = Long.parseLong(startStr);
}
return start;
} | 3.68 |
hudi_HoodieTableFactory_setupTableOptions | /**
* Supplement the table config options if not specified.
*/
private void setupTableOptions(String basePath, Configuration conf) {
StreamerUtil.getTableConfig(basePath, HadoopConfigurations.getHadoopConf(conf))
.ifPresent(tableConfig -> {
if (tableConfig.contains(HoodieTableConfig.RECORDKEY_FIELDS)
&& !conf.contains(FlinkOptions.RECORD_KEY_FIELD)) {
conf.setString(FlinkOptions.RECORD_KEY_FIELD, tableConfig.getString(HoodieTableConfig.RECORDKEY_FIELDS));
}
if (tableConfig.contains(HoodieTableConfig.PRECOMBINE_FIELD)
&& !conf.contains(FlinkOptions.PRECOMBINE_FIELD)) {
conf.setString(FlinkOptions.PRECOMBINE_FIELD, tableConfig.getString(HoodieTableConfig.PRECOMBINE_FIELD));
}
if (tableConfig.contains(HoodieTableConfig.HIVE_STYLE_PARTITIONING_ENABLE)
&& !conf.contains(FlinkOptions.HIVE_STYLE_PARTITIONING)) {
conf.setBoolean(FlinkOptions.HIVE_STYLE_PARTITIONING, tableConfig.getBoolean(HoodieTableConfig.HIVE_STYLE_PARTITIONING_ENABLE));
}
if (tableConfig.contains(HoodieTableConfig.TYPE)
&& !conf.contains(FlinkOptions.TABLE_TYPE)) {
conf.setString(FlinkOptions.TABLE_TYPE, tableConfig.getString(HoodieTableConfig.TYPE));
}
if (tableConfig.contains(HoodieTableConfig.PAYLOAD_CLASS_NAME)
&& !conf.contains(FlinkOptions.PAYLOAD_CLASS_NAME)) {
conf.setString(FlinkOptions.PAYLOAD_CLASS_NAME, tableConfig.getString(HoodieTableConfig.PAYLOAD_CLASS_NAME));
}
if (tableConfig.contains(HoodieTableConfig.PAYLOAD_TYPE)
&& !conf.contains(FlinkOptions.PAYLOAD_CLASS_NAME)) {
conf.setString(FlinkOptions.PAYLOAD_CLASS_NAME, tableConfig.getPayloadClass());
}
});
} | 3.68 |
hudi_ExpressionEvaluators_fromExpression | /**
* Converts specific call expression to the evaluator.
* <p>Two steps to bind the call:
* 1. map the evaluator instance;
* 2. bind the field reference;
*
* <p>Normalize the expression to simplify the subsequent decision logic:
* always put the literal expression in the RHS.
*/
public static Evaluator fromExpression(CallExpression expr) {
FunctionDefinition funDef = expr.getFunctionDefinition();
List<Expression> childExprs = expr.getChildren();
boolean normalized = childExprs.get(0) instanceof FieldReferenceExpression;
if (BuiltInFunctionDefinitions.NOT.equals(funDef)) {
Not evaluator = Not.getInstance();
Evaluator childEvaluator = fromExpression((CallExpression) childExprs.get(0));
return evaluator.bindEvaluator(childEvaluator);
}
if (BuiltInFunctionDefinitions.AND.equals(funDef)) {
And evaluator = And.getInstance();
Evaluator evaluator1 = fromExpression((CallExpression) childExprs.get(0));
Evaluator evaluator2 = fromExpression((CallExpression) childExprs.get(1));
return evaluator.bindEvaluator(evaluator1, evaluator2);
}
if (BuiltInFunctionDefinitions.OR.equals(funDef)) {
Or evaluator = Or.getInstance();
Evaluator evaluator1 = fromExpression((CallExpression) childExprs.get(0));
Evaluator evaluator2 = fromExpression((CallExpression) childExprs.get(1));
return evaluator.bindEvaluator(evaluator1, evaluator2);
}
// handle unary operators
if (BuiltInFunctionDefinitions.IS_NULL.equals(funDef)) {
FieldReferenceExpression rExpr = (FieldReferenceExpression) childExprs.get(0);
return IsNull.getInstance()
.bindFieldReference(rExpr);
} else if (BuiltInFunctionDefinitions.IS_NOT_NULL.equals(funDef)) {
FieldReferenceExpression rExpr = (FieldReferenceExpression) childExprs.get(0);
return IsNotNull.getInstance()
.bindFieldReference(rExpr);
}
boolean hasNullLiteral =
childExprs.stream().anyMatch(e ->
e instanceof ValueLiteralExpression
&& ExpressionUtils.getValueFromLiteral((ValueLiteralExpression) e) == null);
if (hasNullLiteral) {
return AlwaysFalse.getInstance();
}
// handle IN specifically
if (BuiltInFunctionDefinitions.IN.equals(funDef)) {
ValidationUtils.checkState(normalized, "The IN expression expects to be normalized");
In in = In.getInstance();
FieldReferenceExpression rExpr = (FieldReferenceExpression) childExprs.get(0);
in.bindFieldReference(rExpr);
in.bindVals(getInLiteralVals(childExprs));
return in;
}
NullFalseEvaluator evaluator;
// handle binary operators
if (BuiltInFunctionDefinitions.EQUALS.equals(funDef)) {
evaluator = EqualTo.getInstance();
} else if (BuiltInFunctionDefinitions.NOT_EQUALS.equals(funDef)) {
evaluator = NotEqualTo.getInstance();
} else if (BuiltInFunctionDefinitions.LESS_THAN.equals(funDef)) {
evaluator = normalized ? LessThan.getInstance() : GreaterThan.getInstance();
} else if (BuiltInFunctionDefinitions.GREATER_THAN.equals(funDef)) {
evaluator = normalized ? GreaterThan.getInstance() : LessThan.getInstance();
} else if (BuiltInFunctionDefinitions.LESS_THAN_OR_EQUAL.equals(funDef)) {
evaluator = normalized ? LessThanOrEqual.getInstance() : GreaterThanOrEqual.getInstance();
} else if (BuiltInFunctionDefinitions.GREATER_THAN_OR_EQUAL.equals(funDef)) {
evaluator = normalized ? GreaterThanOrEqual.getInstance() : LessThanOrEqual.getInstance();
} else {
throw new AssertionError("Unexpected function definition " + funDef);
}
FieldReferenceExpression rExpr = normalized
? (FieldReferenceExpression) childExprs.get(0)
: (FieldReferenceExpression) childExprs.get(1);
ValueLiteralExpression vExpr = normalized
? (ValueLiteralExpression) childExprs.get(1)
: (ValueLiteralExpression) childExprs.get(0);
evaluator
.bindVal(vExpr)
.bindFieldReference(rExpr);
return evaluator;
} | 3.68 |
hbase_ByteBufferWriterOutputStream_write | /**
* Writes len bytes from the specified ByteBuffer starting at offset off to this OutputStream. If
* off is negative or larger than the ByteBuffer then an ArrayIndexOutOfBoundsException is thrown.
* If len is greater than the length of the ByteBuffer, then an ArrayIndexOutOfBoundsException is
* thrown. This method does not change the position of the ByteBuffer.
* @param b the ByteBuffer
* @param off the start offset in the data
* @param len the number of bytes to write
* @throws IOException if an I/O error occurs. In particular, an IOException is thrown if
* the output stream is closed.
* @throws NullPointerException if {@code b} is {@code null}
*/
@Override
public void write(ByteBuffer b, int off, int len) throws IOException {
Objects.requireNonNull(b);
// Lazily load in the event that this version of 'write' is not invoked
if (this.buf == null) {
this.buf = new byte[this.bufSize];
}
int totalCopied = 0;
while (totalCopied < len) {
int bytesToCopy = Math.min((len - totalCopied), this.bufSize);
ByteBufferUtils.copyFromBufferToArray(this.buf, b, off + totalCopied, 0, bytesToCopy);
this.os.write(this.buf, 0, bytesToCopy);
totalCopied += bytesToCopy;
}
} | 3.68 |
flink_RecordCounter_of | /**
* Creates a {@link RecordCounter} depends on the index of count(*). If index is less than zero,
* returns {@link AccumulationRecordCounter}, otherwise, {@link RetractionRecordCounter}.
*
* @param indexOfCountStar The index of COUNT(*) in the aggregates. -1 when the input doesn't
* contain COUNT(*), i.e. doesn't contain retraction messages. We make sure there is a
* COUNT(*) if input stream contains retraction.
*/
public static RecordCounter of(int indexOfCountStar) {
if (indexOfCountStar >= 0) {
return new RetractionRecordCounter(indexOfCountStar);
} else {
return new AccumulationRecordCounter();
}
} | 3.68 |
hbase_ReplicationPeerManager_isStringEquals | /**
* For replication peer cluster key or endpoint class, null and empty string is same. So here
* don't use {@link StringUtils#equals(CharSequence, CharSequence)} directly.
*/
private boolean isStringEquals(String s1, String s2) {
if (StringUtils.isBlank(s1)) {
return StringUtils.isBlank(s2);
}
return s1.equals(s2);
} | 3.68 |
hadoop_ConfigRedactor_configIsSensitive | /**
* Matches given config key against patterns and determines whether or not
* it should be considered sensitive enough to redact in logs and other
* plaintext displays.
*
* @param key
* @return True if parameter is considered sensitive
*/
private boolean configIsSensitive(String key) {
for (Pattern regex : compiledPatterns) {
if (regex.matcher(key).find()) {
return true;
}
}
return false;
} | 3.68 |
pulsar_PublicSuffixMatcher_getDomainRoot | /**
* Returns registrable part of the domain for the given domain name or {@code null}
* if given domain represents a public suffix.
*
* @param domain
* @param expectedType expected domain type or {@code null} if any.
* @return domain root
*
* @since 4.5
*/
public String getDomainRoot(final String domain, final DomainType expectedType) {
if (domain == null) {
return null;
}
if (domain.startsWith(".")) {
return null;
}
String domainName = null;
String segment = domain.toLowerCase(Locale.ROOT);
while (segment != null) {
// An exception rule takes priority over any other matching rule.
if (hasException(IDN.toUnicode(segment), expectedType)) {
return segment;
}
if (hasRule(IDN.toUnicode(segment), expectedType)) {
break;
}
final int nextdot = segment.indexOf('.');
final String nextSegment = nextdot != -1 ? segment.substring(nextdot + 1) : null;
if (nextSegment != null) {
if (hasRule("*." + IDN.toUnicode(nextSegment), expectedType)) {
break;
}
}
if (nextdot != -1) {
domainName = segment;
}
segment = nextSegment;
}
return domainName;
} | 3.68 |
hbase_AbstractRpcClient_getPoolType | /**
* Return the pool type specified in the configuration, which must be set to either
* {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#RoundRobin} or
* {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#ThreadLocal}, otherwise default to the
* former. For applications with many user threads, use a small round-robin pool. For applications
* with few user threads, you may want to try using a thread-local pool. In any case, the number
* of {@link org.apache.hadoop.hbase.ipc.RpcClient} instances should not exceed the operating
* system's hard limit on the number of connections.
* @param config configuration
* @return either a {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#RoundRobin} or
* {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#ThreadLocal}
*/
private static PoolMap.PoolType getPoolType(Configuration config) {
return PoolMap.PoolType.valueOf(config.get(HConstants.HBASE_CLIENT_IPC_POOL_TYPE),
PoolMap.PoolType.RoundRobin);
} | 3.68 |
framework_RangeValidator_isValid | /**
* Returns whether the given value lies in the valid range.
*
* @param value
* the value to validate
* @return true if the value is valid, false otherwise
*/
protected boolean isValid(T value) {
if (value == null) {
return true;
}
if (getMinValue() != null) {
int result = comparator.compare(value, getMinValue());
if (result < 0) {
return false;
} else if (result == 0 && !isMinValueIncluded()) {
return false;
}
}
if (getMaxValue() != null) {
int result = comparator.compare(value, getMaxValue());
if (result > 0) {
return false;
} else if (result == 0 && !isMaxValueIncluded()) {
return false;
}
}
return true;
} | 3.68 |
hudi_BulkInsertPartitioner_getFileIdPfx | /**
* Return file group id prefix for the given data partition.
* By default, return a new file group id prefix, so that incoming records will route to a fresh new file group
*
* @param partitionId data partition
* @return
*/
default String getFileIdPfx(int partitionId) {
return FSUtils.createNewFileIdPfx();
} | 3.68 |
hudi_PartialUpdateAvroPayload_getInsertValue | /**
* return itself as long as it called by preCombine
* @param schema
* @param isPreCombining
* @return
* @throws IOException
*/
public Option<IndexedRecord> getInsertValue(Schema schema, boolean isPreCombining) throws IOException {
if (recordBytes.length == 0 || (!isPreCombining && isDeletedRecord)) {
return Option.empty();
}
return Option.of((IndexedRecord) HoodieAvroUtils.bytesToAvro(recordBytes, schema));
} | 3.68 |
hadoop_PathLocation_orderedNamespaces | /**
* Prioritize a location/destination by its name space/nameserviceId.
* This destination might be used by other threads, so the source is not
* modifiable.
*
* @param original List of destinations to order.
* @param nsId The name space/nameserviceID to prioritize.
* @return Prioritized list of detinations that cannot be modified.
*/
private static List<RemoteLocation> orderedNamespaces(
final List<RemoteLocation> original, final String nsId) {
if (original.size() <= 1) {
return original;
}
LinkedList<RemoteLocation> newDestinations = new LinkedList<>();
boolean found = false;
for (RemoteLocation dest : original) {
if (dest.getNameserviceId().equals(nsId)) {
found = true;
newDestinations.addFirst(dest);
} else {
newDestinations.add(dest);
}
}
if (!found) {
LOG.debug("Cannot find location with namespace {} in {}",
nsId, original);
}
return Collections.unmodifiableList(newDestinations);
} | 3.68 |
hadoop_AbstractTask_readFields | /**
* Read Fields from file.
* @param in : datainput object.
* @throws IOException : Throws IOException in case of error.
*/
@Override
public final void readFields(final DataInput in) throws IOException {
this.taskID = new TaskId();
taskID.readFields(in);
IntWritable envSize = new IntWritable(0);
envSize.readFields(in);
for (int i = 0; i < envSize.get(); i++) {
Text key = new Text();
Text value = new Text();
key.readFields(in);
value.readFields(in);
environment.put(key.toString(), value.toString());
}
Text taskCmdText = new Text();
taskCmdText.readFields(in);
taskCmd = taskCmdText.toString();
taskType = WritableUtils.readEnum(in, TaskType.class);
timeout = WritableUtils.readVLong(in);
} | 3.68 |
flink_ExecNodeContext_getName | /** The type identifying an ExecNode in the JSON plan. See {@link ExecNodeMetadata#name()}. */
public String getName() {
return name;
} | 3.68 |
pulsar_TxnLogBufferedWriter_trigFlushIfReachMaxRecordsOrMaxSize | /**
* If reach the thresholds {@link #batchedWriteMaxRecords} or {@link #batchedWriteMaxSize}, do flush.
*/
private void trigFlushIfReachMaxRecordsOrMaxSize(){
if (flushContext.asyncAddArgsList.size() >= batchedWriteMaxRecords) {
metrics.triggerFlushByRecordsCount(flushContext.asyncAddArgsList.size(), bytesSize,
System.currentTimeMillis() - flushContext.asyncAddArgsList.get(0).addedTime);
doFlush();
return;
}
if (bytesSize >= batchedWriteMaxSize) {
metrics.triggerFlushByBytesSize(flushContext.asyncAddArgsList.size(), bytesSize,
System.currentTimeMillis() - flushContext.asyncAddArgsList.get(0).addedTime);
doFlush();
}
} | 3.68 |
hudi_Transient_eager | /**
* Creates instance of {@link Transient} by eagerly setting it to provided {@code value},
* while given {@code initializer} will be used to re-instantiate the value after original
* one being dropped during serialization/deserialization cycle
*/
public static <T> Transient<T> eager(T value, SerializableSupplier<T> initializer) {
return new Transient<>(value, initializer);
} | 3.68 |
hudi_HoodiePipeline_builder | /**
* Returns the builder for hoodie pipeline construction.
*/
public static Builder builder(String tableName) {
return new Builder(tableName);
} | 3.68 |
flink_NonClosingCheckpointOutputStream_acquireLease | /**
* Returns a {@link org.apache.flink.util.ResourceGuard.Lease} that prevents closing this
* stream. To allow the system to close this stream, each of the acquired leases need to call
* {@link Lease#close()}, on their acquired leases.
*/
public final ResourceGuard.Lease acquireLease() throws IOException {
return resourceGuard.acquireResource();
} | 3.68 |
Activiti_TreeBuilderException_getPosition | /**
* @return the error position
*/
public int getPosition() {
return position;
} | 3.68 |
hadoop_DiskBalancerWorkItem_getTolerancePercent | /**
* Allowed deviation from ideal storage in percentage.
*
* @return long
*/
public long getTolerancePercent() {
return tolerancePercent;
} | 3.68 |
hadoop_MRJobConfUtil_setLocalDirectoriesConfigForTesting | /**
* Set local directories so that the generated folders is subdirectory of the
* test directories.
* @param conf
* @param testRootDir
* @return
*/
public static Configuration setLocalDirectoriesConfigForTesting(
Configuration conf, File testRootDir) {
Configuration config =
(conf == null) ? new Configuration(): conf;
final File hadoopLocalDir = new File(testRootDir, "hadoop-dir");
// create the directory
if (!hadoopLocalDir.getAbsoluteFile().mkdirs()) {
LOG.info("{} directory already exists", hadoopLocalDir.getPath());
}
Path mapredHadoopTempDir = new Path(hadoopLocalDir.getPath());
Path mapredSystemDir = new Path(mapredHadoopTempDir, "system");
Path stagingDir = new Path(mapredHadoopTempDir, "tmp/staging");
// Set the temp directories a subdir of the test directory.
config.set("mapreduce.jobtracker.staging.root.dir", stagingDir.toString());
config.set("mapreduce.jobtracker.system.dir", mapredSystemDir.toString());
config.set("mapreduce.cluster.temp.dir", mapredHadoopTempDir.toString());
config.set("mapreduce.cluster.local.dir",
new Path(mapredHadoopTempDir, "local").toString());
return config;
} | 3.68 |
hbase_ColumnPrefixFilter_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() {
FilterProtos.ColumnPrefixFilter.Builder builder = FilterProtos.ColumnPrefixFilter.newBuilder();
if (this.prefix != null) builder.setPrefix(UnsafeByteOperations.unsafeWrap(this.prefix));
return builder.build().toByteArray();
} | 3.68 |
morf_SqlServerDialect_getSqlForYYYYMMDDToDate | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForYYYYMMDDToDate(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForYYYYMMDDToDate(Function function) {
return "CONVERT(date, " + getSqlFrom(function.getArguments().get(0)) + ", 112)";
} | 3.68 |
graphhopper_ResponsePath_addPathDetails | /**
* Adds the given PathDetails to the existing ones. If there are already PathDetails set, the number
* details has to be equal to <code>details</code>.
*
* @param details The PathDetails to add
*/
public void addPathDetails(Map<String, List<PathDetail>> details) {
if (!this.pathDetails.isEmpty() && !details.isEmpty() && this.pathDetails.size() != details.size()) {
throw new IllegalStateException("Details have to be the same size");
}
for (Map.Entry<String, List<PathDetail>> detailEntry : details.entrySet()) {
String key = detailEntry.getKey();
if (this.pathDetails.containsKey(key)) {
this.pathDetails.get(key).addAll(detailEntry.getValue());
} else {
this.pathDetails.put(key, detailEntry.getValue());
}
}
} | 3.68 |
hadoop_ResourceSet_storeSharedCacheUploadPolicy | /**
* Store the resource's shared cache upload policies
* Given LocalResourceRequest can be shared across containers in
* LocalResourcesTrackerImpl, we preserve the upload policies here.
* In addition, it is possible for the application to create several
* "identical" LocalResources as part of
* ContainerLaunchContext.setLocalResources with different symlinks.
* There is a corner case where these "identical" local resources have
* different upload policies. For that scenario, upload policy will be set to
* true as long as there is at least one LocalResource entry with
* upload policy set to true.
*/
private void storeSharedCacheUploadPolicy(
LocalResourceRequest resourceRequest, Boolean uploadPolicy) {
Boolean storedUploadPolicy = resourcesUploadPolicies.get(resourceRequest);
if (storedUploadPolicy == null || (!storedUploadPolicy && uploadPolicy)) {
resourcesUploadPolicies.put(resourceRequest, uploadPolicy);
}
} | 3.68 |
hbase_TableSchemaModel_setName | /**
* @param name the table name
*/
public void setName(String name) {
this.name = name;
} | 3.68 |
framework_MockApplicationConnection_getLastCsrfTokenSent | /**
* Provide the last token sent to the server. <br/>
* We added this to test the change done on CSRF token.
*
* @see CsrfTokenDisabled
*/
public String getLastCsrfTokenSent() {
return getMessageSender().lastCsrfTokenSent;
} | 3.68 |
hbase_KeyValue_getTagsOffset | /** Return the offset where the tag data starts. */
@Override
public int getTagsOffset() {
int tagsLen = getTagsLength();
if (tagsLen == 0) {
return this.offset + this.length;
}
return this.offset + this.length - tagsLen;
} | 3.68 |
framework_GridLayout_setHideEmptyRowsAndColumns | /**
* Sets whether empty rows and columns should be considered as non-existent
* when rendering or not. If this is set to true then the spacing between
* multiple empty columns (or rows) will be collapsed.
*
* The default behavior is to consider all rows and columns as visible
*
* NOTE that this must be set before the initial rendering takes place.
* Updating this on the fly is not supported.
*
* @since 7.3
* @param hideEmptyRowsAndColumns
* true to hide empty rows and columns, false to leave them as-is
*/
public void setHideEmptyRowsAndColumns(boolean hideEmptyRowsAndColumns) {
getState().hideEmptyRowsAndColumns = hideEmptyRowsAndColumns;
} | 3.68 |
flink_MemorySegment_free | /**
* Frees this memory segment.
*
* <p>After this operation has been called, no further operations are possible on the memory
* segment and will fail. The actual memory (heap or off-heap) will only be released after this
* memory segment object has become garbage collected.
*/
public void free() {
if (isFreedAtomic.getAndSet(true)) {
// the segment has already been freed
if (checkMultipleFree) {
throw new IllegalStateException("MemorySegment can be freed only once!");
}
} else {
// this ensures we can place no more data and trigger
// the checks for the freed segment
address = addressLimit + 1;
offHeapBuffer = null; // to enable GC of unsafe memory
if (cleaner != null) {
cleaner.run();
cleaner = null;
}
}
} | 3.68 |
hadoop_SelectTool_getLinesRead | /**
* Number of lines read, when printing to the console.
* @return line count. 0 if writing direct to a file.
*/
public long getLinesRead() {
return linesRead;
} | 3.68 |
framework_NestedMethodProperty_getGetMethods | /**
* Returns an unmodifiable list of getter methods to call in sequence to get
* the property value.
*
* This API may change in future versions.
*
* @return unmodifiable list of getter methods corresponding to each segment
* of the property name
*/
protected List<Method> getGetMethods() {
return Collections.unmodifiableList(getMethods);
} | 3.68 |
framework_FormLayout_setExpandRatio | /**
* @deprecated This method currently has no effect as expand ratios are not
* implemented in FormLayout
*/
@Override
@Deprecated
public void setExpandRatio(Component component, float ratio) {
super.setExpandRatio(component, ratio);
} | 3.68 |
flink_RocksDBMemoryConfiguration_validate | /** Validates if the configured options are valid with respect to one another. */
public void validate() {
// As FLINK-15512 introduce a new mechanism to calculate the cache capacity,
// the relationship of write_buffer_manager_capacity and cache_capacity has changed to:
// write_buffer_manager_capacity / cache_capacity = 2 * writeBufferRatio / (3 -
// writeBufferRatio)
// we should ensure the sum of write buffer manager capacity and high priority pool less
// than cache capacity.
// TODO change the formula once FLINK-15532 resolved.
if (writeBufferRatio != null
&& highPriorityPoolRatio != null
&& 2 * writeBufferRatio / (3 - writeBufferRatio) + highPriorityPoolRatio >= 1.0) {
throw new IllegalArgumentException(
String.format(
"Invalid configuration: writeBufferRatio %s with highPriPoolRatio %s",
writeBufferRatio, highPriorityPoolRatio));
}
} | 3.68 |
hbase_RegionCoprocessorHost_preIncrementAfterRowLock | /**
* Supports Coprocessor 'bypass'.
* @param increment increment object
* @return result to return to client if default operation should be bypassed, null otherwise
* @throws IOException if an error occurred on the coprocessor
*/
public Result preIncrementAfterRowLock(final Increment increment) throws IOException {
boolean bypassable = true;
Result defaultResult = null;
if (coprocEnvironments.isEmpty()) {
return defaultResult;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Result>(
regionObserverGetter, defaultResult, bypassable) {
@Override
public Result call(RegionObserver observer) throws IOException {
return observer.preIncrementAfterRowLock(this, increment);
}
});
} | 3.68 |
framework_OptionGroupElement_setValue | /**
* Select option in the option group with the specified value.
*
* @param chars
* value of the option in the option group which will be selected
*/
public void setValue(CharSequence chars) throws ReadOnlyException {
selectByText((String) chars);
} | 3.68 |
framework_VSlider_getNavigationRightKey | /**
* Get the key that increases the horizontal slider. By default it is the
* right arrow key but by overriding this you can change the key to whatever
* you want.
*
* @return The keycode of the key
*/
protected int getNavigationRightKey() {
return KeyCodes.KEY_RIGHT;
} | 3.68 |
framework_SQLContainer_setPageLength | /**
* Sets the page length used in lazy fetching of items from the data source.
* Also resets the cache size to match the new page length.
*
* As a side effect the container will be refreshed.
*
* @param pageLength
* new page length
*/
public void setPageLength(int pageLength) {
setPageLengthInternal(pageLength);
refresh();
} | 3.68 |
AreaShop_AreaShop_setReady | /**
* Set if the plugin is ready to be used or not (not to be used from another plugin!).
* @param ready Indicate if the plugin is ready to be used
*/
public void setReady(boolean ready) {
this.ready = ready;
} | 3.68 |
flink_ResourceGuard_isClosed | /** Returns true if the resource guard is closed, i.e. after {@link #close()} was called. */
public boolean isClosed() {
return closed;
} | 3.68 |
hadoop_EncryptionSecrets_getEncryptionMethod | /**
* Get the encryption method.
* @return the encryption method
*/
public S3AEncryptionMethods getEncryptionMethod() {
return encryptionMethod;
} | 3.68 |
hadoop_AbstractS3ACommitter_startOperation | /**
* Start an operation; retrieve an audit span.
*
* All operation names <i>SHOULD</i> come from
* {@code StoreStatisticNames} or
* {@code StreamStatisticNames}.
* @param name operation name.
* @param path1 first path of operation
* @param path2 second path of operation
* @return a span for the audit
* @throws IOException failure
*/
protected AuditSpan startOperation(String name,
@Nullable String path1,
@Nullable String path2)
throws IOException {
return getAuditSpanSource().createSpan(name, path1, path2);
} | 3.68 |
hbase_HFileArchiver_getThreadFactory | // We need this method instead of Threads.getNamedThreadFactory() to pass some tests.
// The difference from Threads.getNamedThreadFactory() is that it doesn't fix ThreadGroup for
// new threads. If we use Threads.getNamedThreadFactory(), we will face ThreadGroup related
// issues in some tests.
private static ThreadFactory getThreadFactory() {
return new ThreadFactory() {
final AtomicInteger threadNumber = new AtomicInteger(1);
@Override
public Thread newThread(Runnable r) {
final String name = "HFileArchiver-" + threadNumber.getAndIncrement();
Thread t = new Thread(r, name);
t.setDaemon(true);
return t;
}
};
} | 3.68 |
hadoop_BlockManagerParameters_withLocalDirAllocator | /**
* Sets the local dir allocator for round-robin disk allocation
* while creating files.
*
* @param dirAllocator The local dir allocator object.
* @return The builder.
*/
public BlockManagerParameters withLocalDirAllocator(
final LocalDirAllocator dirAllocator) {
this.localDirAllocator = dirAllocator;
return this;
} | 3.68 |
flink_Task_startTaskThread | /** Starts the task's thread. */
public void startTaskThread() {
executingThread.start();
} | 3.68 |
hadoop_QueryResult_getRecords | /**
* Get the result of the query.
*
* @return List of records.
*/
public List<T> getRecords() {
return this.records;
} | 3.68 |
flink_Tuple23_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22>
Tuple23<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22>
of(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12,
T13 f13,
T14 f14,
T15 f15,
T16 f16,
T17 f17,
T18 f18,
T19 f19,
T20 f20,
T21 f21,
T22 f22) {
return new Tuple23<>(
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18,
f19, f20, f21, f22);
} | 3.68 |
framework_DesignFormatter_parse | /**
* Parses a given string as a value of given type.
*
* @param value
* String value to convert.
* @param type
* Expected result type.
* @return String converted to the expected result type using a registered
* converter for that type.
*/
public <T> T parse(String value, Class<? extends T> type) {
Converter<String, T> converter = findConverterFor(type);
if (converter != null) {
Result<T> result = converter.convertToModel(value,
new ValueContext());
return result.getOrThrow(msg -> new IllegalArgumentException(msg));
} else {
return null;
}
} | 3.68 |
morf_ColumnTypeBean_getScale | /**
* @return the scale
*/
@Override
public int getScale() {
return scale;
} | 3.68 |
hadoop_TimestampGenerator_getUniqueTimestamp | /**
* Returns a timestamp value unique within the scope of this
* {@code TimestampGenerator} instance. For usage by HBase
* {@code RegionObserver} coprocessors, this normally means unique within a
* given region.
*
* Unlikely scenario of generating a non-unique timestamp: if there is a
* sustained rate of more than 1M hbase writes per second AND if region fails
* over within that time range of timestamps being generated then there may be
* collisions writing to a cell version of the same column.
*
* @return unique timestamp.
*/
public long getUniqueTimestamp() {
long lastTs;
long nextTs;
do {
lastTs = lastTimestamp.get();
nextTs = Math.max(lastTs + 1, currentTime());
} while (!lastTimestamp.compareAndSet(lastTs, nextTs));
return nextTs;
} | 3.68 |
hbase_PrivateCellUtil_createCell | /** Returns A new cell which is having the extra tags also added to it. */
public static Cell createCell(Cell cell, byte[] tags) {
if (cell instanceof ByteBufferExtendedCell) {
return new TagRewriteByteBufferExtendedCell((ByteBufferExtendedCell) cell, tags);
}
return new TagRewriteCell(cell, tags);
} | 3.68 |
morf_Criterion_and | /**
* Helper method to create a new "AND" expression.
*
* <blockquote><pre>
* Criterion.and(listOfCriterions);</pre></blockquote>
*
* @param criteria the criteria
* @return a new Criterion object
*/
public static Criterion and(Iterable<Criterion> criteria) {
return new Criterion(Operator.AND, criteria);
} | 3.68 |
flink_RichSqlInsert_getStaticPartitions | /**
* @return the list of partition key-value pairs, returns empty if there is no partition
* specifications.
*/
public SqlNodeList getStaticPartitions() {
return staticPartitions;
} | 3.68 |
hadoop_ChainReducer_setReducer | /**
* Sets the {@link Reducer} class to the chain job.
*
* <p>
* The key and values are passed from one element of the chain to the next, by
* value. For the added Reducer the configuration given for it,
* <code>reducerConf</code>, have precedence over the job's Configuration.
* This precedence is in effect when the task is running.
* </p>
* <p>
* IMPORTANT: There is no need to specify the output key/value classes for the
* ChainReducer, this is done by the setReducer or the addMapper for the last
* element in the chain.
* </p>
*
* @param job
* the job
* @param klass
* the Reducer class to add.
* @param inputKeyClass
* reducer input key class.
* @param inputValueClass
* reducer input value class.
* @param outputKeyClass
* reducer output key class.
* @param outputValueClass
* reducer output value class.
* @param reducerConf
* a configuration for the Reducer class. It is recommended to use a
* Configuration without default values using the
* <code>Configuration(boolean loadDefaults)</code> constructor with
* FALSE.
*/
public static void setReducer(Job job, Class<? extends Reducer> klass,
Class<?> inputKeyClass, Class<?> inputValueClass,
Class<?> outputKeyClass, Class<?> outputValueClass,
Configuration reducerConf) {
job.setReducerClass(ChainReducer.class);
job.setOutputKeyClass(outputKeyClass);
job.setOutputValueClass(outputValueClass);
Chain.setReducer(job, klass, inputKeyClass, inputValueClass,
outputKeyClass, outputValueClass, reducerConf);
} | 3.68 |
morf_AbstractSqlDialectTest_testSimpleDelete | /**
* Tests that a simple delete string is created correctly.
*/
@Test
public void testSimpleDelete() {
DeleteStatement stmt = new DeleteStatement(new TableReference(TEST_TABLE));
String expectedSql = "DELETE FROM " + tableName(TEST_TABLE);
assertEquals("Simple delete", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
querydsl_SimpleExpression_eqAll | /**
* Create a {@code this == all right} expression
*
* @param right
* @return this == all right
*/
public BooleanExpression eqAll(SubQueryExpression<? extends T> right) {
return eq(ExpressionUtils.all(right));
} | 3.68 |
hadoop_ActiveAuditManagerS3A_beforeUnmarshalling | /**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void beforeUnmarshalling(Context.BeforeUnmarshalling context,
ExecutionAttributes executionAttributes) {
span.beforeUnmarshalling(context, executionAttributes);
} | 3.68 |
hbase_MasterObserver_postGetRSGroupInfoOfTable | /**
* Called after getting region server group info of the passed tableName.
* @param ctx the environment to interact with the framework and master
* @param tableName name of the table to get RSGroupInfo for
*/
default void postGetRSGroupInfoOfTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName) throws IOException {
} | 3.68 |
pulsar_SimpleLoadManagerImpl_fromLoadReport | /*
* temp method, remove it in future, in-place to make this glue code to make load balancing work
*/
private PulsarResourceDescription fromLoadReport(LoadReport report) {
SystemResourceUsage sru = report.getSystemResourceUsage();
PulsarResourceDescription resourceDescription = new PulsarResourceDescription();
if (sru == null) {
return resourceDescription;
}
if (sru.bandwidthIn != null) {
resourceDescription.put("bandwidthIn", sru.bandwidthIn);
}
if (sru.bandwidthOut != null) {
resourceDescription.put("bandwidthOut", sru.bandwidthOut);
}
if (sru.memory != null) {
resourceDescription.put("memory", sru.memory);
}
if (sru.cpu != null) {
resourceDescription.put("cpu", sru.cpu);
}
return resourceDescription;
} | 3.68 |
graphhopper_Snap_isValid | /**
* @return true if a closest node was found
*/
public boolean isValid() {
return closestNode >= 0;
} | 3.68 |
flink_OptimizableHashSet_nextPowerOfTwo | /**
* Return the least power of two greater than or equal to the specified value.
*
* <p>Note that this function will return 1 when the argument is 0.
*
* @param x a long integer smaller than or equal to 2<sup>62</sup>.
* @return the least power of two greater than or equal to the specified value.
*/
public static long nextPowerOfTwo(long x) {
if (x == 0L) {
return 1L;
} else {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return (x | x >> 32) + 1L;
}
} | 3.68 |
open-banking-gateway_Xs2aConsentInfo_isOauthEmbeddedPreStepDone | /**
* Is Oauth Embedded pre step already Done.
*/
public boolean isOauthEmbeddedPreStepDone(Xs2aContext ctx) {
return ctx.isEmbeddedPreAuthDone();
} | 3.68 |
hbase_SlowLogQueueService_persistAll | /**
* Add all slowLog events to system table. This is only for slowLog event's persistence on system
* table.
*/
@Override
public void persistAll(Connection connection) {
if (!isOnlineLogProviderEnabled) {
return;
}
if (slowLogPersistentService != null) {
slowLogPersistentService.addAllLogsToSysTable(connection);
}
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.