name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_SinglePendingCommit_getUri | /** @return path URI of the destination. */
public String getUri() {
return uri;
} | 3.68 |
hadoop_BufferedIOStatisticsInputStream_hasCapability | /**
* If the inner stream supports {@link StreamCapabilities},
* forward the probe to it.
* Otherwise: return false.
*
* @param capability string to query the stream support for.
* @return true if a capability is known to be supported.
*/
@Override
public boolean hasCapability(final String capability) {
if (in instanceof StreamCapabilities) {
return ((StreamCapabilities) in).hasCapability(capability);
} else {
return false;
}
} | 3.68 |
flink_EnvironmentInformation_getMaxJvmHeapMemory | /**
* The maximum JVM heap size, in bytes.
*
* <p>This method uses the <i>-Xmx</i> value of the JVM, if set. If not set, it returns (as a
* heuristic) 1/4th of the physical memory size.
*
* @return The maximum JVM heap size, in bytes.
*/
public static long getMaxJvmHeapMemory() {
final long maxMemory = Runtime.getRuntime().maxMemory();
if (maxMemory != Long.MAX_VALUE) {
// we have the proper max memory
return maxMemory;
} else {
// max JVM heap size is not set - use the heuristic to use 1/4th of the physical memory
final long physicalMemory = Hardware.getSizeOfPhysicalMemory();
if (physicalMemory != -1) {
// got proper value for physical memory
return physicalMemory / 4;
} else {
throw new RuntimeException(
"Could not determine the amount of free memory.\n"
+ "Please set the maximum memory for the JVM, e.g. -Xmx512M for 512 megabytes.");
}
}
} | 3.68 |
framework_LegacyApplication_getWindow | /**
* <p>
* Gets a UI by name. Returns <code>null</code> if the application is not
* running or it does not contain a window corresponding to the name.
* </p>
*
* @param name
* the name of the requested window
* @return a UI corresponding to the name, or <code>null</code> to use the
* default window
*/
public LegacyWindow getWindow(String name) {
return legacyUINames.get(name);
} | 3.68 |
hmily_HmilyQuoteCharacter_getQuoteCharacter | /**
* Get quote character.
*
* @param value value to be get quote character
* @return value of quote character
*/
public static HmilyQuoteCharacter getQuoteCharacter(final String value) {
if (Strings.isNullOrEmpty(value)) {
return NONE;
}
return Arrays.stream(values()).filter(each -> NONE != each && each.startDelimiter.charAt(0) == value.charAt(0)).findFirst().orElse(NONE);
} | 3.68 |
hadoop_CommitContext_maybeResetIOStatisticsContext | /**
* Reset the IOStatistics context if statistics are being
* collected.
* Logs at info.
*/
public void maybeResetIOStatisticsContext() {
if (collectIOStatistics) {
LOG.info("Resetting IO statistics context {}",
ioStatisticsContext.getID());
ioStatisticsContext.reset();
}
} | 3.68 |
dubbo_ArrayUtils_of | /**
* Convert from variable arguments to array
*
* @param values variable arguments
* @param <T> The class
* @return array
* @since 2.7.9
*/
public static <T> T[] of(T... values) {
return values;
} | 3.68 |
morf_SqlDialect_viewDeploymentStatementsAsLiteral | /**
* Creates SQL script to deploy a database view.
*
* @param view The meta data for the view to deploy.
* @return The statements required to deploy the view joined into a script and prepared as literals.
*/
public AliasedField viewDeploymentStatementsAsLiteral(View view) {
return SqlUtils.clobLiteral(viewDeploymentStatementsAsScript(view));
} | 3.68 |
framework_EmptyTable_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Empty Table should not cause JS exception";
} | 3.68 |
flink_SqlUniqueSpec_symbol | /**
* Creates a parse-tree node representing an occurrence of this keyword at a particular position
* in the parsed text.
*/
public SqlLiteral symbol(SqlParserPos pos) {
return SqlLiteral.createSymbol(this, pos);
} | 3.68 |
dubbo_StringUtils_isNoneEmpty | /**
* <p>Checks if the strings contain empty or null elements. <p/>
*
* <pre>
* StringUtils.isNoneEmpty(null) = false
* StringUtils.isNoneEmpty("") = false
* StringUtils.isNoneEmpty(" ") = true
* StringUtils.isNoneEmpty("abc") = true
* StringUtils.isNoneEmpty("abc", "def") = true
* StringUtils.isNoneEmpty("abc", null) = false
* StringUtils.isNoneEmpty("abc", "") = false
* StringUtils.isNoneEmpty("abc", " ") = true
* </pre>
*
* @param ss the strings to check
* @return {@code true} if all strings are not empty or null
*/
public static boolean isNoneEmpty(final String... ss) {
if (ArrayUtils.isEmpty(ss)) {
return false;
}
for (final String s : ss) {
if (isEmpty(s)) {
return false;
}
}
return true;
} | 3.68 |
flink_HiveParserCalcitePlanner_genGBLogicalPlan | // Generate GB plan.
private RelNode genGBLogicalPlan(HiveParserQB qb, RelNode srcRel) throws SemanticException {
RelNode gbRel = null;
HiveParserQBParseInfo qbp = qb.getParseInfo();
// 1. Gather GB Expressions (AST) (GB + Aggregations)
// NOTE: Multi Insert is not supported
String detsClauseName = qbp.getClauseNames().iterator().next();
HiveParserASTNode selExprList = qb.getParseInfo().getSelForClause(detsClauseName);
HiveParserSubQueryUtils.checkForTopLevelSubqueries(selExprList);
if (selExprList.getToken().getType() == HiveASTParser.TOK_SELECTDI
&& selExprList.getChildCount() == 1
&& selExprList.getChild(0).getChildCount() == 1) {
HiveParserASTNode node = (HiveParserASTNode) selExprList.getChild(0).getChild(0);
if (node.getToken().getType() == HiveASTParser.TOK_ALLCOLREF) {
srcRel = genSelectLogicalPlan(qb, srcRel, srcRel, null, null);
HiveParserRowResolver rr = relToRowResolver.get(srcRel);
qbp.setSelExprForClause(detsClauseName, HiveParserUtils.genSelectDIAST(rr));
}
}
// Select DISTINCT + windowing; GBy handled by genSelectForWindowing
if (selExprList.getToken().getType() == HiveASTParser.TOK_SELECTDI
&& !qb.getAllWindowingSpecs().isEmpty()) {
return null;
}
List<HiveParserASTNode> gbAstExprs = getGroupByForClause(qbp, detsClauseName);
HashMap<String, HiveParserASTNode> aggregationTrees =
qbp.getAggregationExprsForClause(detsClauseName);
boolean hasGrpByAstExprs = !gbAstExprs.isEmpty();
boolean hasAggregationTrees = aggregationTrees != null && !aggregationTrees.isEmpty();
final boolean cubeRollupGrpSetPresent =
!qbp.getDestRollups().isEmpty()
|| !qbp.getDestGroupingSets().isEmpty()
|| !qbp.getDestCubes().isEmpty();
// 2. Sanity check
if (semanticAnalyzer.getConf().getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)
&& qbp.getDistinctFuncExprsForClause(detsClauseName).size() > 1) {
throw new SemanticException(ErrorMsg.UNSUPPORTED_MULTIPLE_DISTINCTS.getMsg());
}
if (hasGrpByAstExprs || hasAggregationTrees) {
ArrayList<ExprNodeDesc> gbExprNodeDescs = new ArrayList<>();
ArrayList<String> outputColNames = new ArrayList<>();
// 3. Input, Output Row Resolvers
HiveParserRowResolver inputRR = relToRowResolver.get(srcRel);
HiveParserRowResolver outputRR = new HiveParserRowResolver();
outputRR.setIsExprResolver(true);
if (hasGrpByAstExprs) {
// 4. Construct GB Keys (ExprNode)
for (HiveParserASTNode gbAstExpr : gbAstExprs) {
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc =
semanticAnalyzer.genAllExprNodeDesc(gbAstExpr, inputRR);
ExprNodeDesc grpbyExprNDesc = astToExprNodeDesc.get(gbAstExpr);
if (grpbyExprNDesc == null) {
throw new SemanticException(
"Invalid Column Reference: " + gbAstExpr.dump());
}
addToGBExpr(
outputRR,
inputRR,
gbAstExpr,
grpbyExprNDesc,
gbExprNodeDescs,
outputColNames);
}
}
// 5. GroupingSets, Cube, Rollup
int numGroupCols = gbExprNodeDescs.size();
List<Integer> groupingSets = null;
if (cubeRollupGrpSetPresent) {
if (qbp.getDestRollups().contains(detsClauseName)) {
groupingSets = getGroupingSetsForRollup(gbAstExprs.size());
} else if (qbp.getDestCubes().contains(detsClauseName)) {
groupingSets = getGroupingSetsForCube(gbAstExprs.size());
} else if (qbp.getDestGroupingSets().contains(detsClauseName)) {
groupingSets = getGroupingSets(gbAstExprs, qbp, detsClauseName);
}
}
// 6. Construct aggregation function Info
ArrayList<AggInfo> aggInfos = new ArrayList<>();
if (hasAggregationTrees) {
for (HiveParserASTNode value : aggregationTrees.values()) {
// 6.1 Determine type of UDAF
// This is the GenericUDAF name
String aggName = unescapeIdentifier(value.getChild(0).getText());
boolean isDistinct = value.getType() == HiveASTParser.TOK_FUNCTIONDI;
boolean isAllColumns = value.getType() == HiveASTParser.TOK_FUNCTIONSTAR;
// 6.2 Convert UDAF Params to ExprNodeDesc
ArrayList<ExprNodeDesc> aggParameters = new ArrayList<>();
for (int i = 1; i < value.getChildCount(); i++) {
HiveParserASTNode paraExpr = (HiveParserASTNode) value.getChild(i);
ExprNodeDesc paraExprNode =
semanticAnalyzer.genExprNodeDesc(paraExpr, inputRR);
aggParameters.add(paraExprNode);
}
GenericUDAFEvaluator.Mode aggMode =
HiveParserUtils.groupByDescModeToUDAFMode(
GroupByDesc.Mode.COMPLETE, isDistinct);
GenericUDAFEvaluator genericUDAFEvaluator =
HiveParserUtils.getGenericUDAFEvaluator(
aggName,
aggParameters,
value,
isDistinct,
isAllColumns,
frameworkConfig.getOperatorTable());
assert (genericUDAFEvaluator != null);
HiveParserBaseSemanticAnalyzer.GenericUDAFInfo udaf =
HiveParserUtils.getGenericUDAFInfo(
genericUDAFEvaluator, aggMode, aggParameters);
String aggAlias = null;
if (value.getParent().getType() == HiveASTParser.TOK_SELEXPR
&& value.getParent().getChildCount() == 2) {
aggAlias =
unescapeIdentifier(
value.getParent().getChild(1).getText().toLowerCase());
}
AggInfo aggInfo =
new AggInfo(
aggParameters,
udaf.returnType,
aggName,
isDistinct,
isAllColumns,
aggAlias);
aggInfos.add(aggInfo);
String field =
aggAlias == null
? getColumnInternalName(numGroupCols + aggInfos.size() - 1)
: aggAlias;
outputColNames.add(field);
outputRR.putExpression(
value, new ColumnInfo(field, aggInfo.getReturnType(), "", false));
}
}
// 7. If GroupingSets, Cube, Rollup were used, we account grouping__id
// GROUPING__ID is also required by the GROUPING function, so let's always add it for
// grouping sets
if (groupingSets != null && !groupingSets.isEmpty()) {
String field = getColumnInternalName(numGroupCols + aggInfos.size());
outputColNames.add(field);
outputRR.put(
null,
VirtualColumn.GROUPINGID.getName(),
new ColumnInfo(
field,
// flink grouping_id's return type is bigint
TypeInfoFactory.longTypeInfo,
null,
true));
}
// 8. We create the group_by operator
gbRel = genGBRelNode(gbExprNodeDescs, aggInfos, groupingSets, srcRel);
relToHiveColNameCalcitePosMap.put(gbRel, buildHiveToCalciteColumnMap(outputRR));
relToRowResolver.put(gbRel, outputRR);
}
return gbRel;
} | 3.68 |
hudi_BaseHoodieWriteClient_deleteColumns | /**
* delete columns to table.
*
* @param colNames col name to be deleted. if we want to delete col from a nested filed, the fullName should be specified
*/
public void deleteColumns(String... colNames) {
Pair<InternalSchema, HoodieTableMetaClient> pair = getInternalSchemaAndMetaClient();
InternalSchema newSchema = new InternalSchemaChangeApplier(pair.getLeft()).applyDeleteChange(colNames);
commitTableChange(newSchema, pair.getRight());
} | 3.68 |
framework_Escalator_getSpacersForRowAndAfter | /**
* Calculates the sum of all spacers from one row index onwards.
*
* @param logicalRowIndex
* the spacer to include as the first calculated spacer
* @return the sum of all spacers from {@code logicalRowIndex} and
* onwards, or 0 if no suitable spacers were found
*/
@SuppressWarnings("boxing")
public Collection<SpacerImpl> getSpacersForRowAndAfter(
int logicalRowIndex) {
return new ArrayList<SpacerImpl>(
rowIndexToSpacer.tailMap(logicalRowIndex, true).values());
} | 3.68 |
framework_MethodProperty_getSetters | /**
* Returns a list of all setters found in the beanType or its parent class
*
* @param beanType
* The type to check
* @param getters
* Set that will be filled with names of getters.
* @return A list of setter methods from the class and its parents
*/
private static List<JMethod> getSetters(JClassType beanType,
Set<String> getters) {
List<JMethod> setterMethods = new ArrayList<>();
while (beanType != null && !beanType.getQualifiedSourceName()
.equals(Object.class.getName())) {
for (JMethod method : beanType.getMethods()) {
// Process all setters that have corresponding fields
if (!method.isPublic() || method.isStatic()) {
// Not getter/setter, skip to next method
continue;
}
String methodName = method.getName();
if (methodName.startsWith("set")
&& method.getParameterTypes().length == 1) {
setterMethods.add(method);
} else if (method.getParameterTypes().length == 0
&& methodName.startsWith("is")
|| methodName.startsWith("get")) {
getters.add(methodName);
}
}
beanType = beanType.getSuperclass();
}
return setterMethods;
} | 3.68 |
flink_PhysicalFile_isOpen | /** @return whether this physical file is still open for writing. */
public boolean isOpen() {
return !closed && outputStream != null;
} | 3.68 |
pulsar_TimeEvictionPolicy_evict | /**
* {@inheritDoc}
*/
@Override
public EvictionPolicy.Action evict(Event<T> event) {
long now =
evictionContext == null ? System.currentTimeMillis() : evictionContext.getReferenceTime();
long diff = now - event.getTimestamp();
if (diff >= (windowLength + delta)) {
return EvictionPolicy.Action.EXPIRE;
} else if (diff < 0) { // do not process events beyond current ts
return Action.KEEP;
}
return Action.PROCESS;
} | 3.68 |
hmily_DubboHmilyOrderApplication_main | /**
* main.
*
* @param args args
*/
public static void main(final String[] args) {
SpringApplication.run(DubboHmilyOrderApplication.class, args);
} | 3.68 |
hadoop_DynoInfraUtils_fetchNameNodeJMXValue | /**
* Fetch a value from the launched NameNode's JMX.
*
* @param nameNodeProperties The set of properties containing information
* about the NameNode.
* @param jmxBeanQuery The JMX bean query to execute; should return a
* JMX property matching {@code jmxProperty}.
* @param property The name of the JMX property whose value should be polled.
* @return The value associated with the property.
*/
static String fetchNameNodeJMXValue(Properties nameNodeProperties,
String jmxBeanQuery, String property) throws IOException {
URI nnWebUri = getNameNodeWebUri(nameNodeProperties);
URL queryURL;
try {
queryURL = new URL(nnWebUri.getScheme(), nnWebUri.getHost(),
nnWebUri.getPort(), "/jmx?qry=" + jmxBeanQuery);
} catch (MalformedURLException e) {
throw new IllegalArgumentException("Invalid JMX query: \"" + jmxBeanQuery
+ "\" against " + "NameNode URI: " + nnWebUri);
}
HttpURLConnection conn = (HttpURLConnection) queryURL.openConnection();
if (conn.getResponseCode() != 200) {
throw new IOException(
"Unable to retrieve JMX: " + conn.getResponseMessage());
}
InputStream in = conn.getInputStream();
JsonFactory fac = new JsonFactory();
JsonParser parser = fac.createParser(in);
if (parser.nextToken() != JsonToken.START_OBJECT
|| parser.nextToken() != JsonToken.FIELD_NAME
|| !parser.getCurrentName().equals("beans")
|| parser.nextToken() != JsonToken.START_ARRAY
|| parser.nextToken() != JsonToken.START_OBJECT) {
throw new IOException(
"Unexpected format of JMX JSON response for: " + jmxBeanQuery);
}
int objectDepth = 1;
String ret = null;
while (objectDepth > 0) {
JsonToken tok = parser.nextToken();
if (tok == JsonToken.START_OBJECT) {
objectDepth++;
} else if (tok == JsonToken.END_OBJECT) {
objectDepth--;
} else if (tok == JsonToken.FIELD_NAME) {
if (parser.getCurrentName().equals(property)) {
parser.nextToken();
ret = parser.getText();
break;
}
}
}
parser.close();
in.close();
conn.disconnect();
if (ret == null) {
throw new IOException(
"Property " + property + " not found within " + jmxBeanQuery);
} else {
return ret;
}
} | 3.68 |
framework_VCaption_isCaptionAsHtml | /**
* Checks whether captions are rendered as HTML.
* <p>
* Default is false
*
* @return true if the captions are rendered as HTML, false if rendered as
* plain text
*/
public boolean isCaptionAsHtml() {
return captionAsHtml;
} | 3.68 |
hadoop_AbstractMultipartUploader_checkPartHandles | /**
* Utility method to validate partHandles.
* @param partHandles handles
* @throws IllegalArgumentException if the parts are invalid
*/
protected void checkPartHandles(Map<Integer, PartHandle> partHandles) {
checkArgument(!partHandles.isEmpty(),
"Empty upload");
partHandles.keySet()
.stream()
.forEach(key ->
checkArgument(key > 0,
"Invalid part handle index %s", key));
} | 3.68 |
flink_FromElementsGeneratorFunction_setOutputType | // For backward compatibility: Supports legacy usage of
// StreamExecutionEnvironment#fromElements() which lacked type information and relied on the
// returns() method. See FLINK-21386 for details.
@Override
public void setOutputType(TypeInformation<OUT> outTypeInfo, ExecutionConfig executionConfig) {
Preconditions.checkState(
elements != null,
"The output type should've been specified before shipping the graph to the cluster");
checkIterable(elements, outTypeInfo.getTypeClass());
TypeSerializer<OUT> newSerializer = outTypeInfo.createSerializer(executionConfig);
if (Objects.equals(serializer, newSerializer)) {
return;
}
serializer = newSerializer;
try {
serializeElements(elements);
} catch (IOException e) {
throw new RuntimeException(e.getMessage(), e);
}
} | 3.68 |
hudi_MetadataMigrator_upgradeToLatest | /**
* Upgrade Metadata version to its latest.
*
* @param metadata Metadata
* @param metadataVersion Current version of metadata
* @return Metadata conforming to the latest version of this metadata
*/
public T upgradeToLatest(T metadata, int metadataVersion) {
if (metadataVersion == latestVersion) {
return metadata;
}
int newVersion = metadataVersion + 1;
while (newVersion <= latestVersion) {
VersionMigrator<T> upgrader = migrators.get(newVersion);
metadata = upgrader.upgradeFrom(metadata);
newVersion += 1;
}
return metadata;
} | 3.68 |
hbase_QuotaCache_getTableLimiter | /**
* Returns the limiter associated to the specified table.
* @param table the table to limit
* @return the limiter associated to the specified table
*/
public QuotaLimiter getTableLimiter(final TableName table) {
return getQuotaState(this.tableQuotaCache, table).getGlobalLimiter();
} | 3.68 |
hbase_SyncFuture_done | /**
* @param txid the transaction id at which this future 'completed'.
* @param t Can be null. Set if we are 'completing' on error (and this 't' is the error).
* @return True if we successfully marked this outstanding future as completed/done. Returns false
* if this future is already 'done' when this method called.
*/
boolean done(final long txid, final Throwable t) {
doneLock.lock();
try {
if (doneTxid != NOT_DONE) {
return false;
}
this.throwable = t;
if (txid < this.txid) {
// Something badly wrong.
if (throwable == null) {
this.throwable =
new IllegalStateException("done txid=" + txid + ", my txid=" + this.txid);
}
}
// Mark done.
this.doneTxid = txid;
doneCondition.signalAll();
return true;
} finally {
doneLock.unlock();
}
} | 3.68 |
hbase_LogRollBackupSubprocedurePool_waitForOutstandingTasks | /**
* Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)}
* @return <tt>true</tt> on success, <tt>false</tt> otherwise
* @throws ForeignException exception
*/
public boolean waitForOutstandingTasks() throws ForeignException {
LOG.debug("Waiting for backup procedure to finish.");
try {
for (Future<Void> f : futures) {
f.get();
}
return true;
} catch (InterruptedException e) {
if (aborted) {
throw new ForeignException("Interrupted and found to be aborted while waiting for tasks!",
e);
}
Thread.currentThread().interrupt();
} catch (ExecutionException e) {
if (e.getCause() instanceof ForeignException) {
throw (ForeignException) e.getCause();
}
throw new ForeignException(name, e.getCause());
} finally {
// close off remaining tasks
for (Future<Void> f : futures) {
if (!f.isDone()) {
f.cancel(true);
}
}
}
return false;
} | 3.68 |
hudi_HoodieDeltaWriteStat_setRecordsStats | // keep for serialization efficiency
public void setRecordsStats(Map<String, HoodieColumnRangeMetadata<Comparable>> stats) {
recordsStats = Option.of(stats);
} | 3.68 |
hudi_CompactionAdminClient_renameLogFile | /**
* Rename log files. This is done for un-scheduling a pending compaction operation NOTE: Can only be used safely when
* no writer (ingestion/compaction) is running.
*
* @param metaClient Hoodie Table Meta-Client
* @param oldLogFile Old Log File
* @param newLogFile New Log File
*/
protected static void renameLogFile(HoodieTableMetaClient metaClient, HoodieLogFile oldLogFile,
HoodieLogFile newLogFile) throws IOException {
FileStatus[] statuses = metaClient.getFs().listStatus(oldLogFile.getPath());
ValidationUtils.checkArgument(statuses.length == 1, "Only one status must be present");
ValidationUtils.checkArgument(statuses[0].isFile(), "Source File must exist");
ValidationUtils.checkArgument(oldLogFile.getPath().getParent().equals(newLogFile.getPath().getParent()),
"Log file must only be moved within the parent directory");
metaClient.getFs().rename(oldLogFile.getPath(), newLogFile.getPath());
} | 3.68 |
hbase_MetricSampleQuantiles_allowableError | /**
* Specifies the allowable error for this rank, depending on which quantiles are being targeted.
* This is the f(r_i, n) function from the CKMS paper. It's basically how wide the range of this
* rank can be. the index in the list of samples
*/
private double allowableError(int rank) {
int size = samples.size();
double minError = size + 1;
for (MetricQuantile q : quantiles) {
double error;
if (rank <= q.quantile * size) {
error = (2.0 * q.error * (size - rank)) / (1.0 - q.quantile);
} else {
error = (2.0 * q.error * rank) / q.quantile;
}
if (error < minError) {
minError = error;
}
}
return minError;
} | 3.68 |
flink_SequenceGeneratorSource_incrementAndGet | /** Increments and returns the current sequence number for the given key. */
long incrementAndGet(int key) {
return ++statesPerKey[key - startKey];
} | 3.68 |
dubbo_RestRPCInvocationUtil_createPathMatcher | /**
* create path matcher by request
*
* @param request
* @return
*/
public static PathMatcher createPathMatcher(RequestFacade request) {
String path = request.getPath();
String version = request.getHeader(RestHeaderEnum.VERSION.getHeader());
String group = request.getHeader(RestHeaderEnum.GROUP.getHeader());
String method = request.getMethod();
return PathMatcher.getInvokeCreatePathMatcher(path, version, group, null, method);
} | 3.68 |
flink_KeyContextHandler_hasKeyContext1 | /**
* Whether the first input of {@link StreamOperator} has "KeyContext". If false, we can omit the
* call of {@link StreamOperator#setKeyContextElement1} for each record arrived on the first
* input.
*
* @return True if the first input has "KeyContext", false otherwise.
*/
default boolean hasKeyContext1() {
return true;
} | 3.68 |
morf_DummyXmlOutputStreamProvider_close | /**
* @see org.alfasoftware.morf.xml.XmlStreamProvider#close()
*/
@Override
public void close() {
// Nothing to do
} | 3.68 |
flink_GenericWriteAheadSink_saveHandleInState | /**
* Called when a checkpoint barrier arrives. It closes any open streams to the backend and marks
* them as pending for committing to the external, third-party storage system.
*
* @param checkpointId the id of the latest received checkpoint.
* @throws IOException in case something went wrong when handling the stream to the backend.
*/
private void saveHandleInState(final long checkpointId, final long timestamp) throws Exception {
// only add handle if a new OperatorState was created since the last snapshot
if (out != null) {
int subtaskIdx = getRuntimeContext().getIndexOfThisSubtask();
StreamStateHandle handle = out.closeAndGetHandle();
PendingCheckpoint pendingCheckpoint =
new PendingCheckpoint(checkpointId, subtaskIdx, timestamp, handle);
if (pendingCheckpoints.contains(pendingCheckpoint)) {
// we already have a checkpoint stored for that ID that may have been partially
// written,
// so we discard this "alternate version" and use the stored checkpoint
handle.discardState();
} else {
pendingCheckpoints.add(pendingCheckpoint);
}
out = null;
}
} | 3.68 |
rocketmq-connect_IdentifierRules_identifierDelimiter | /**
* Get the delimiter that is used to delineate segments within fully-qualified identifiers.
*
* @return the identifier delimiter; never null
*/
public String identifierDelimiter() {
return identifierDelimiter;
} | 3.68 |
streampipes_Formats_cborFormat | /**
* Defines the transport format CBOR used by a data stream at runtime.
*
* @return The {@link org.apache.streampipes.model.grounding.TransportFormat} of type CBOR.
*/
public static TransportFormat cborFormat() {
return new TransportFormat(MessageFormat.CBOR);
} | 3.68 |
framework_ScrollbarBundle_setOffsetSizeAndScrollSize | /**
* Sets the length of the scrollbar and the amount of pixels the scrollbar
* needs to be able to scroll through.
*
* @param offsetPx
* the length of the scrollbar in pixels
* @param scrollPx
* the number of pixels the scrollbar should be able to scroll
* through
*/
public final void setOffsetSizeAndScrollSize(final double offsetPx,
final double scrollPx) {
boolean newOffsetSizeIsGreaterThanScrollSize = offsetPx > scrollPx;
boolean offsetSizeBecomesGreaterThanScrollSize = showsScrollHandle()
&& newOffsetSizeIsGreaterThanScrollSize;
boolean needsMoreHandling = false;
if (offsetSizeBecomesGreaterThanScrollSize && getScrollPos() != 0) {
setScrollPos(0);
if (offsetPx != getOffsetSize()) {
internalSetOffsetSize(Math.max(0, offsetPx));
}
if (scrollPx != getScrollSize()) {
internalSetScrollSize(Math.max(0, scrollPx));
}
needsMoreHandling = true;
} else {
if (offsetPx != getOffsetSize()) {
internalSetOffsetSize(Math.max(0, offsetPx));
needsMoreHandling = true;
}
if (scrollPx != getScrollSize()) {
internalSetScrollSize(Math.max(0, scrollPx));
needsMoreHandling = true;
}
}
if (needsMoreHandling) {
recalculateMaxScrollPos();
forceScrollbar(showsScrollHandle());
fireVisibilityChangeIfNeeded();
}
} | 3.68 |
pulsar_TopKBundles_update | /**
* Update the topK bundles from the input bundleStats.
*
* @param bundleStats bundle stats.
* @param topk top k bundle stats to select.
*/
public void update(Map<String, NamespaceBundleStats> bundleStats, int topk) {
arr.clear();
try {
var isLoadBalancerSheddingBundlesWithPoliciesEnabled =
pulsar.getConfiguration().isLoadBalancerSheddingBundlesWithPoliciesEnabled();
for (var etr : bundleStats.entrySet()) {
String bundle = etr.getKey();
// TODO: do not filter system topic while shedding
if (NamespaceService.isSystemServiceNamespace(NamespaceBundle.getBundleNamespace(bundle))) {
continue;
}
if (!isLoadBalancerSheddingBundlesWithPoliciesEnabled && hasPolicies(bundle)) {
continue;
}
arr.add(etr);
}
var topKBundlesLoadData = loadData.getTopBundlesLoadData();
topKBundlesLoadData.clear();
if (arr.isEmpty()) {
return;
}
topk = Math.min(topk, arr.size());
partitionSort(arr, topk);
for (int i = topk - 1; i >= 0; i--) {
var etr = arr.get(i);
topKBundlesLoadData.add(
new TopBundlesLoadData.BundleLoadData(etr.getKey(), (NamespaceBundleStats) etr.getValue()));
}
} finally {
arr.clear();
}
} | 3.68 |
hudi_BootstrapUtils_getAllLeafFoldersWithFiles | /**
* Returns leaf folders with files under a path.
* @param baseFileFormat Hoodie base file format
* @param fs File System
* @param context JHoodieEngineContext
* @return list of partition paths with files under them.
* @throws IOException
*/
public static List<Pair<String, List<HoodieFileStatus>>> getAllLeafFoldersWithFiles(HoodieFileFormat baseFileFormat,
FileSystem fs, String basePathStr, HoodieEngineContext context) throws IOException {
final Path basePath = new Path(basePathStr);
final String baseFileExtension = baseFileFormat.getFileExtension();
final Map<Integer, List<String>> levelToPartitions = new HashMap<>();
final Map<String, List<HoodieFileStatus>> partitionToFiles = new HashMap<>();
PathFilter filePathFilter = getFilePathFilter(baseFileExtension);
PathFilter metaPathFilter = getExcludeMetaPathFilter();
FileStatus[] topLevelStatuses = fs.listStatus(basePath);
List<String> subDirectories = new ArrayList<>();
List<Pair<HoodieFileStatus, Pair<Integer, String>>> result = new ArrayList<>();
for (FileStatus topLevelStatus: topLevelStatuses) {
if (topLevelStatus.isFile() && filePathFilter.accept(topLevelStatus.getPath())) {
String relativePath = FSUtils.getRelativePartitionPath(basePath, topLevelStatus.getPath().getParent());
Integer level = (int) relativePath.chars().filter(ch -> ch == '/').count();
HoodieFileStatus hoodieFileStatus = FileStatusUtils.fromFileStatus(topLevelStatus);
result.add(Pair.of(hoodieFileStatus, Pair.of(level, relativePath)));
} else if (topLevelStatus.isDirectory() && metaPathFilter.accept(topLevelStatus.getPath())) {
subDirectories.add(topLevelStatus.getPath().toString());
}
}
if (subDirectories.size() > 0) {
result.addAll(context.flatMap(subDirectories, directory -> {
PathFilter pathFilter = getFilePathFilter(baseFileExtension);
Path path = new Path(directory);
FileSystem fileSystem = path.getFileSystem(new Configuration());
RemoteIterator<LocatedFileStatus> itr = fileSystem.listFiles(path, true);
List<Pair<HoodieFileStatus, Pair<Integer, String>>> res = new ArrayList<>();
while (itr.hasNext()) {
FileStatus status = itr.next();
if (pathFilter.accept(status.getPath())) {
String relativePath = FSUtils.getRelativePartitionPath(new Path(basePathStr), status.getPath().getParent());
Integer level = (int) relativePath.chars().filter(ch -> ch == '/').count();
HoodieFileStatus hoodieFileStatus = FileStatusUtils.fromFileStatus(status);
res.add(Pair.of(hoodieFileStatus, Pair.of(level, relativePath)));
}
}
return res.stream();
}, subDirectories.size()));
}
result.forEach(val -> {
String relativePath = val.getRight().getRight();
List<HoodieFileStatus> statusList = partitionToFiles.get(relativePath);
if (null == statusList) {
Integer level = val.getRight().getLeft();
List<String> dirs = levelToPartitions.get(level);
if (null == dirs) {
dirs = new ArrayList<>();
levelToPartitions.put(level, dirs);
}
dirs.add(relativePath);
statusList = new ArrayList<>();
partitionToFiles.put(relativePath, statusList);
}
statusList.add(val.getLeft());
});
OptionalInt maxLevelOpt = levelToPartitions.keySet().stream().mapToInt(x -> x).max();
int maxLevel = maxLevelOpt.orElse(-1);
return maxLevel >= 0 ? levelToPartitions.get(maxLevel).stream()
.map(d -> Pair.of(d, partitionToFiles.get(d))).collect(Collectors.toList()) : new ArrayList<>();
} | 3.68 |
dubbo_AbstractDynamicConfiguration_getDefaultGroup | /**
* @return the default group
* @since 2.7.8
*/
@Override
public String getDefaultGroup() {
return getGroup();
} | 3.68 |
flink_MemoryLogger_getGarbageCollectorStatsAsString | /**
* Gets the garbage collection statistics from the JVM.
*
* @param gcMXBeans The collection of garbage collector beans.
* @return A string denoting the number of times and total elapsed time in garbage collection.
*/
public static String getGarbageCollectorStatsAsString(List<GarbageCollectorMXBean> gcMXBeans) {
StringBuilder bld = new StringBuilder("Garbage collector stats: ");
for (GarbageCollectorMXBean bean : gcMXBeans) {
bld.append('[')
.append(bean.getName())
.append(", GC TIME (ms): ")
.append(bean.getCollectionTime());
bld.append(", GC COUNT: ").append(bean.getCollectionCount()).append(']');
bld.append(", ");
}
if (!gcMXBeans.isEmpty()) {
bld.setLength(bld.length() - 2);
}
return bld.toString();
} | 3.68 |
hbase_AnnotationReadingPriorityFunction_getAnnotatedPriority | /**
* See if the method has an annotation.
* @return Return the priority from the annotation. If there isn't an annotation, this returns
* something below zero.
*/
protected int getAnnotatedPriority(RequestHeader header) {
String methodName = header.getMethodName();
Integer priorityByAnnotation = annotatedQos.get(methodName);
if (priorityByAnnotation != null) {
return normalizePriority(priorityByAnnotation);
}
return -1;
} | 3.68 |
Activiti_BaseEntityEventListener_onDelete | /**
* Called when an entity delete event is received.
*/
protected void onDelete(ActivitiEvent event) {
// Default implementation is a NO-OP
} | 3.68 |
starts_AnnotationVisitor_visitArray | /**
* Visits an array value of the annotation. Note that arrays of primitive
* types (such as byte, boolean, short, char, int, long, float or double)
* can be passed as value to {@link #visit visit}. This is what
* {@link ClassReader} does.
*
* @param name
* the value name.
* @return a visitor to visit the actual array value elements, or
* <code>null</code> if this visitor is not interested in visiting these
* values. The 'name' parameters passed to the methods of this
* visitor are ignored. <i>All the array values must be visited
* before calling other methods on this annotation visitor</i>.
*/
public AnnotationVisitor visitArray(String name) {
if (av != null) {
return av.visitArray(name);
}
return null;
} | 3.68 |
framework_TestBenchElementRightClick_fillTable | // fill the table with some random data
private void fillTable(Table table) {
initProperties(table);
for (int i = 0; i < ROWS; i++) {
String[] line = new String[COLUMNS];
for (int j = 0; j < COLUMNS; j++) {
line[j] = "col=" + j + " row=" + i;
}
table.addItem(line, null);
}
} | 3.68 |
dubbo_AdaptiveClassCodeGenerator_generateScopeModelAssignment | /**
* @return
*/
private String generateScopeModelAssignment() {
return String.format(CODE_SCOPE_MODEL_ASSIGNMENT, type.getName());
} | 3.68 |
hbase_AbstractHBaseSaslRpcClient_dispose | /** Release resources used by wrapped saslClient */
public void dispose() {
SaslUtil.safeDispose(saslClient);
} | 3.68 |
hbase_CoprocessorHost_getExternalClassLoaders | /**
* Retrieves the set of classloaders used to instantiate Coprocessor classes defined in external
* jar files.
* @return A set of ClassLoader instances
*/
Set<ClassLoader> getExternalClassLoaders() {
Set<ClassLoader> externalClassLoaders = new HashSet<>();
final ClassLoader systemClassLoader = this.getClass().getClassLoader();
for (E env : coprocEnvironments) {
ClassLoader cl = env.getInstance().getClass().getClassLoader();
if (cl != systemClassLoader) {
// do not include system classloader
externalClassLoaders.add(cl);
}
}
return externalClassLoaders;
} | 3.68 |
rocketmq-connect_PositionManagementService_configure | /**
* Configure class with the given key-value pairs
*
* @param config can be DistributedConfig or StandaloneConfig
*/
default void configure(WorkerConfig config) {
} | 3.68 |
hbase_Result_createCompleteResult | /**
* Forms a single result from the partial results in the partialResults list. This method is
* useful for reconstructing partial results on the client side.
* @param partialResults list of partial results
* @return The complete result that is formed by combining all of the partial results together
* @throws IOException A complete result cannot be formed because the results in the partial list
* come from different rows
*/
public static Result createCompleteResult(Iterable<Result> partialResults) throws IOException {
if (partialResults == null) {
return Result.create(Collections.emptyList(), null, false);
}
List<Cell> cells = new ArrayList<>();
boolean stale = false;
byte[] prevRow = null;
byte[] currentRow = null;
for (Iterator<Result> iter = partialResults.iterator(); iter.hasNext();) {
Result r = iter.next();
currentRow = r.getRow();
if (prevRow != null && !Bytes.equals(prevRow, currentRow)) {
throw new IOException("Cannot form complete result. Rows of partial results do not match."
+ " Partial Results: " + partialResults);
}
// Ensure that all Results except the last one are marked as partials. The last result
// may not be marked as a partial because Results are only marked as partials when
// the scan on the server side must be stopped due to reaching the maxResultSize.
// Visualizing it makes it easier to understand:
// maxResultSize: 2 cells
// (-x-) represents cell number x in a row
// Example: row1: -1- -2- -3- -4- -5- (5 cells total)
// How row1 will be returned by the server as partial Results:
// Result1: -1- -2- (2 cells, size limit reached, mark as partial)
// Result2: -3- -4- (2 cells, size limit reached, mark as partial)
// Result3: -5- (1 cell, size limit NOT reached, NOT marked as partial)
if (iter.hasNext() && !r.mayHaveMoreCellsInRow()) {
throw new IOException("Cannot form complete result. Result is missing partial flag. "
+ "Partial Results: " + partialResults);
}
prevRow = currentRow;
stale = stale || r.isStale();
for (Cell c : r.rawCells()) {
cells.add(c);
}
}
return Result.create(cells, null, stale);
} | 3.68 |
flink_ContinuousFileMonitoringFunction_listEligibleFiles | /**
* Returns the paths of the files not yet processed.
*
* @param fileSystem The filesystem where the monitored directory resides.
*/
private Map<Path, FileStatus> listEligibleFiles(FileSystem fileSystem, Path path) {
final FileStatus[] statuses;
try {
statuses = fileSystem.listStatus(path);
} catch (IOException e) {
// we may run into an IOException if files are moved while listing their status
// delay the check for eligible files in this case
return Collections.emptyMap();
}
if (statuses == null) {
LOG.warn("Path does not exist: {}", path);
return Collections.emptyMap();
} else {
Map<Path, FileStatus> files = new HashMap<>();
// handle the new files
for (FileStatus status : statuses) {
if (!status.isDir()) {
Path filePath = status.getPath();
long modificationTime = status.getModificationTime();
if (!shouldIgnore(filePath, modificationTime)) {
files.put(filePath, status);
}
} else if (format.getNestedFileEnumeration() && format.acceptFile(status)) {
files.putAll(listEligibleFiles(fileSystem, status.getPath()));
}
}
return files;
}
} | 3.68 |
hudi_ParquetUtils_filterRowKeys | /**
* Read the rowKey list matching the given filter, from the given parquet file. If the filter is empty, then this will
* return all the rowkeys and corresponding positions.
*
* @param filePath The parquet file path.
* @param configuration configuration to build fs object
* @param filter record keys filter
* @return Set Set of pairs of row key and position matching candidateRecordKeys
*/
@Override
public Set<Pair<String, Long>> filterRowKeys(Configuration configuration, Path filePath, Set<String> filter) {
return filterParquetRowKeys(configuration, filePath, filter, HoodieAvroUtils.getRecordKeySchema());
} | 3.68 |
flink_FlinkUserCodeClassLoader_loadClassWithoutExceptionHandling | /**
* Same as {@link #loadClass(String, boolean)} but without exception handling.
*
* <p>Extending concrete class loaders should implement this instead of {@link
* #loadClass(String, boolean)}.
*/
protected Class<?> loadClassWithoutExceptionHandling(String name, boolean resolve)
throws ClassNotFoundException {
return super.loadClass(name, resolve);
} | 3.68 |
hudi_HoodieHeartbeatClient_stopHeartbeatTimer | /**
* Stops the timer of the given heartbeat.
*
* @param heartbeat The heartbeat to stop.
*/
private void stopHeartbeatTimer(Heartbeat heartbeat) {
LOG.info("Stopping heartbeat for instant " + heartbeat.getInstantTime());
heartbeat.getTimer().cancel();
heartbeat.setHeartbeatStopped(true);
LOG.info("Stopped heartbeat for instant " + heartbeat.getInstantTime());
} | 3.68 |
flink_AbstractHeapPriorityQueue_iterator | /**
* Returns an iterator over the elements in this queue. The iterator does not return the
* elements in any particular order.
*
* @return an iterator over the elements in this queue.
*/
@Nonnull
@Override
public CloseableIterator<T> iterator() {
return new HeapIterator();
} | 3.68 |
flink_Tuple9_setFields | /**
* Sets new values to all fields of the tuple.
*
* @param f0 The value for field 0
* @param f1 The value for field 1
* @param f2 The value for field 2
* @param f3 The value for field 3
* @param f4 The value for field 4
* @param f5 The value for field 5
* @param f6 The value for field 6
* @param f7 The value for field 7
* @param f8 The value for field 8
*/
public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
} | 3.68 |
flink_ConfigurationUtils_convertValue | /**
* Tries to convert the raw value into the provided type.
*
* @param rawValue rawValue to convert into the provided type clazz
* @param clazz clazz specifying the target type
* @param <T> type of the result
* @return the converted value if rawValue is of type clazz
* @throws IllegalArgumentException if the rawValue cannot be converted in the specified target
* type clazz
*/
@SuppressWarnings("unchecked")
public static <T> T convertValue(Object rawValue, Class<?> clazz) {
if (Integer.class.equals(clazz)) {
return (T) convertToInt(rawValue);
} else if (Long.class.equals(clazz)) {
return (T) convertToLong(rawValue);
} else if (Boolean.class.equals(clazz)) {
return (T) convertToBoolean(rawValue);
} else if (Float.class.equals(clazz)) {
return (T) convertToFloat(rawValue);
} else if (Double.class.equals(clazz)) {
return (T) convertToDouble(rawValue);
} else if (String.class.equals(clazz)) {
return (T) convertToString(rawValue);
} else if (clazz.isEnum()) {
return (T) convertToEnum(rawValue, (Class<? extends Enum<?>>) clazz);
} else if (clazz == Duration.class) {
return (T) convertToDuration(rawValue);
} else if (clazz == MemorySize.class) {
return (T) convertToMemorySize(rawValue);
} else if (clazz == Map.class) {
return (T) convertToProperties(rawValue);
}
throw new IllegalArgumentException("Unsupported type: " + clazz);
} | 3.68 |
hbase_MasterObserver_postSetTableQuota | /**
* Called after the quota for the table is stored.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
* @param quotas the resulting quota for the table
*/
default void postSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final GlobalQuotaSettings quotas) throws IOException {
} | 3.68 |
flink_JoinOperator_projectTuple1 | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0> ProjectJoin<I1, I2, Tuple1<T0>> projectTuple1() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple1<T0>> tType = new TupleTypeInfo<Tuple1<T0>>(fTypes);
return new ProjectJoin<I1, I2, Tuple1<T0>>(
this.ds1,
this.ds2,
this.keys1,
this.keys2,
this.hint,
this.fieldIndexes,
this.isFieldInFirst,
tType,
this);
} | 3.68 |
hbase_CoprocessorRpcUtils_getServiceName | /**
* Returns the name to use for coprocessor service calls. For core HBase services (in the hbase.pb
* protobuf package), this returns the unqualified name in order to provide backward compatibility
* across the package name change. For all other services, the fully-qualified service name is
* used.
*/
public static String getServiceName(Descriptors.ServiceDescriptor service) {
if (service.getFullName().startsWith(hbaseServicePackage)) {
return service.getName();
}
return service.getFullName();
} | 3.68 |
flink_EnvironmentSettings_fromConfiguration | /**
* Creates an instance of {@link EnvironmentSettings} from configuration.
*
* @deprecated use {@link Builder#withConfiguration(Configuration)} instead.
*/
@Deprecated
public static EnvironmentSettings fromConfiguration(ReadableConfig configuration) {
return new EnvironmentSettings(
(Configuration) configuration, Thread.currentThread().getContextClassLoader());
} | 3.68 |
pulsar_ManagedCursorMetrics_aggregate | /**
* Aggregation by namespace, ledger, cursor.
*
* @return List<Metrics>
*/
private List<Metrics> aggregate() {
metricsCollection.clear();
for (Map.Entry<String, ManagedLedgerImpl> e : getManagedLedgers().entrySet()) {
String ledgerName = e.getKey();
ManagedLedgerImpl ledger = e.getValue();
String namespace = parseNamespaceFromLedgerName(ledgerName);
ManagedCursorContainer cursorContainer = ledger.getCursors();
Iterator<ManagedCursor> cursorIterator = cursorContainer.iterator();
while (cursorIterator.hasNext()) {
ManagedCursorImpl cursor = (ManagedCursorImpl) cursorIterator.next();
ManagedCursorMXBean cStats = cursor.getStats();
dimensionMap.clear();
dimensionMap.put("namespace", namespace);
dimensionMap.put("ledger_name", ledgerName);
dimensionMap.put("cursor_name", cursor.getName());
Metrics metrics = createMetrics(dimensionMap);
metrics.put("brk_ml_cursor_nonContiguousDeletedMessagesRange",
(long) cursor.getTotalNonContiguousDeletedMessagesRange());
metrics.put("brk_ml_cursor_persistLedgerSucceed", cStats.getPersistLedgerSucceed());
metrics.put("brk_ml_cursor_persistLedgerErrors", cStats.getPersistLedgerErrors());
metrics.put("brk_ml_cursor_persistZookeeperSucceed", cStats.getPersistZookeeperSucceed());
metrics.put("brk_ml_cursor_persistZookeeperErrors", cStats.getPersistZookeeperErrors());
metrics.put("brk_ml_cursor_writeLedgerSize", cStats.getWriteCursorLedgerSize());
metrics.put("brk_ml_cursor_writeLedgerLogicalSize", cStats.getWriteCursorLedgerLogicalSize());
metrics.put("brk_ml_cursor_readLedgerSize", cStats.getReadCursorLedgerSize());
metricsCollection.add(metrics);
}
}
return metricsCollection;
} | 3.68 |
framework_StringToBooleanConverter_convertToModel | /*
* (non-Javadoc)
*
* @see
* com.vaadin.data.util.converter.Converter#convertToModel(java.lang.Object,
* java.lang.Class, java.util.Locale)
*/
@Override
public Boolean convertToModel(String value,
Class<? extends Boolean> targetType, Locale locale)
throws ConversionException {
if (value == null || value.isEmpty()) {
return null;
}
// Remove leading and trailing white space
value = value.trim();
if (getTrueString().equals(value)) {
return true;
} else if (getFalseString().equals(value)) {
return false;
} else {
throw new ConversionException("Cannot convert " + value + " to "
+ getModelType().getName());
}
} | 3.68 |
morf_GraphBasedUpgradeNode_getModifies | /**
* @return all the tables which are modified by this upgrade node
*/
public Set<String> getModifies() {
return modifies;
} | 3.68 |
hudi_HashFunction_clear | /**
* Clears <i>this</i> hash function. A NOOP
*/
public void clear() {
} | 3.68 |
hibernate-validator_ConstraintHelper_getDefaultValidatorDescriptors | /**
* Returns the default validators for the given constraint type.
*
* @param annotationType The constraint annotation type.
*
* @return A list with the default validators as retrieved from
* {@link Constraint#validatedBy()} or the list of validators for
* built-in constraints.
*/
@SuppressWarnings("unchecked")
private <A extends Annotation> List<ConstraintValidatorDescriptor<A>> getDefaultValidatorDescriptors(Class<A> annotationType) {
//safe cause all CV for a given annotation A are CV<A, ?>
final List<ConstraintValidatorDescriptor<A>> builtInValidators = (List<ConstraintValidatorDescriptor<A>>) enabledBuiltinConstraints
.get( annotationType );
if ( builtInValidators != null ) {
return builtInValidators;
}
Class<? extends ConstraintValidator<A, ?>>[] validatedBy = (Class<? extends ConstraintValidator<A, ?>>[]) annotationType
.getAnnotation( Constraint.class )
.validatedBy();
return Stream.of( validatedBy )
.map( c -> ConstraintValidatorDescriptor.forClass( c, annotationType ) )
.collect( Collectors.collectingAndThen( Collectors.toList(), CollectionHelper::toImmutableList ) );
} | 3.68 |
flink_BridgingSqlAggFunction_of | /** Creates an instance of a aggregate function during translation. */
public static BridgingSqlAggFunction of(
FlinkContext context,
FlinkTypeFactory typeFactory,
ContextResolvedFunction resolvedFunction) {
final DataTypeFactory dataTypeFactory = context.getCatalogManager().getDataTypeFactory();
final TypeInference typeInference =
resolvedFunction.getDefinition().getTypeInference(dataTypeFactory);
return of(
dataTypeFactory,
typeFactory,
SqlKind.OTHER_FUNCTION,
resolvedFunction,
typeInference);
} | 3.68 |
hadoop_HttpFSExceptionProvider_log | /**
* Logs the HTTP status code and exception in HttpFSServer's log.
*
* @param status HTTP status code.
* @param throwable exception thrown.
*/
@Override
protected void log(Response.Status status, Throwable throwable) {
String method = MDC.get("method");
String path = MDC.get("path");
String message = getOneLineMessage(throwable);
AUDIT_LOG.warn("FAILED [{}:{}] response [{}] {}", new Object[]{method, path, status, message});
LOG.warn("[{}:{}] response [{}] {}", method, path, status, message, throwable);
} | 3.68 |
hadoop_IOStatisticsLogging_mapToSortedString | /**
* Given a map, produce a string with all the values, sorted.
* Needs to create a treemap and insert all the entries.
* @param sb string buffer to append to
* @param type type (for output)
* @param map map to evaluate
* @param <E> type of values of the map
*/
private static <E> void mapToSortedString(StringBuilder sb,
final String type,
final Map<String, E> map,
final Predicate<E> isEmpty) {
mapToString(sb, type, sortedMap(map, isEmpty), "\n");
} | 3.68 |
flink_FileStateHandle_getStateSize | /**
* Returns the file size in bytes.
*
* @return The file size in bytes.
*/
@Override
public long getStateSize() {
return stateSize;
} | 3.68 |
flink_DataSet_iterateDelta | /**
* Initiates a delta iteration. A delta iteration is similar to a regular iteration (as started
* by {@link #iterate(int)}, but maintains state across the individual iteration steps. The
* Solution set, which represents the current state at the beginning of each iteration can be
* obtained via {@link org.apache.flink.api.java.operators.DeltaIteration#getSolutionSet()}. It
* can be be accessed by joining (or CoGrouping) with it. The DataSet that represents the
* workset of an iteration can be obtained via {@link
* org.apache.flink.api.java.operators.DeltaIteration#getWorkset()}. The solution set is updated
* by producing a delta for it, which is merged into the solution set at the end of each
* iteration step.
*
* <p>The delta iteration must be closed by calling {@link
* org.apache.flink.api.java.operators.DeltaIteration#closeWith(DataSet, DataSet)}. The two
* parameters are the delta for the solution set and the new workset (the data set that will be
* fed back). The return value of the {@code closeWith(DataSet, DataSet)} method is the
* resulting data set after the iteration has terminated. Delta iterations terminate when the
* feed back data set (the workset) is empty. In addition, a maximum number of steps is given as
* a fall back termination guard.
*
* <p>Elements in the solution set are uniquely identified by a key. When merging the solution
* set delta, contained elements with the same key are replaced.
*
* <p><b>NOTE:</b> Delta iterations currently support only tuple valued data types. This
* restriction will be removed in the future. The key is specified by the tuple position.
*
* <p>A code example for a delta iteration is as follows
*
* <pre>{@code
* DeltaIteration<Tuple2<Long, Long>, Tuple2<Long, Long>> iteration =
* initialState.iterateDelta(initialFeedbackSet, 100, 0);
*
* DataSet<Tuple2<Long, Long>> delta = iteration.groupBy(0).aggregate(Aggregations.AVG, 1)
* .join(iteration.getSolutionSet()).where(0).equalTo(0)
* .flatMap(new ProjectAndFilter());
*
* DataSet<Tuple2<Long, Long>> feedBack = delta.join(someOtherSet).where(...).equalTo(...).with(...);
*
* // close the delta iteration (delta and new workset are identical)
* DataSet<Tuple2<Long, Long>> result = iteration.closeWith(delta, feedBack);
* }</pre>
*
* @param workset The initial version of the data set that is fed back to the next iteration
* step (the workset).
* @param maxIterations The maximum number of iteration steps, as a fall back safeguard.
* @param keyPositions The position of the tuple fields that is used as the key of the solution
* set.
* @return The DeltaIteration that marks the start of a delta iteration.
* @see org.apache.flink.api.java.operators.DeltaIteration
*/
public <R> DeltaIteration<T, R> iterateDelta(
DataSet<R> workset, int maxIterations, int... keyPositions) {
Preconditions.checkNotNull(workset);
Preconditions.checkNotNull(keyPositions);
Keys.ExpressionKeys<T> keys = new Keys.ExpressionKeys<>(keyPositions, getType());
return new DeltaIteration<>(
getExecutionEnvironment(), getType(), this, workset, keys, maxIterations);
} | 3.68 |
flink_TieredStorageConfiguration_getAccumulatorExclusiveBuffers | /**
* Get exclusive buffer number of accumulator.
*
* <p>The buffer number is used to compare with the subpartition number to determine the type of
* {@link BufferAccumulator}.
*
* <p>If the exclusive buffer number is larger than (subpartitionNum + 1), the accumulator will
* use {@link HashBufferAccumulator}. If the exclusive buffer number is equal to or smaller than
* (subpartitionNum + 1), the accumulator will use {@link SortBufferAccumulator}
*
* @return the buffer number.
*/
public int getAccumulatorExclusiveBuffers() {
return accumulatorExclusiveBuffers;
} | 3.68 |
hbase_ZKProcedureMemberRpcs_sendMemberCompleted | /**
* This acts as the ack for a completed procedure
*/
@Override
public void sendMemberCompleted(Subprocedure sub, byte[] data) throws IOException {
String procName = sub.getName();
LOG.debug(
"Marking procedure '" + procName + "' completed for member '" + memberName + "' in zk");
String joinPath =
ZNodePaths.joinZNode(zkController.getReachedBarrierNode(procName), memberName);
// ProtobufUtil.prependPBMagic does not take care of null
if (data == null) {
data = new byte[0];
}
try {
ZKUtil.createAndFailSilent(zkController.getWatcher(), joinPath,
ProtobufUtil.prependPBMagic(data));
} catch (KeeperException e) {
member.controllerConnectionFailure(
"Failed to post zk node:" + joinPath + " to join procedure barrier.", e, procName);
}
} | 3.68 |
flink_RichSqlInsert_getTableHints | /** Returns the table hints as list of {@code SqlNode} for current insert node. */
public SqlNodeList getTableHints() {
return this.tableHints;
} | 3.68 |
flink_JobID_fromByteArray | /**
* Creates a new JobID from the given byte sequence. The byte sequence must be exactly 16 bytes
* long. The first eight bytes make up the lower part of the ID, while the next 8 bytes make up
* the upper part of the ID.
*
* @param bytes The byte sequence.
* @return A new JobID corresponding to the ID encoded in the bytes.
*/
public static JobID fromByteArray(byte[] bytes) {
return new JobID(bytes);
} | 3.68 |
flink_GSCommitRecoverable_getComponentBlobIds | /**
* Returns the list of component blob ids, which have to be resolved from the temporary bucket
* name, prefix, and component ids. Resolving them this way vs. storing the blob ids directly
* allows us to move in-progress blobs by changing options to point to new in-progress
* locations.
*
* @param options The GS file system options
* @return The list of component blob ids
*/
List<GSBlobIdentifier> getComponentBlobIds(GSFileSystemOptions options) {
String temporaryBucketName = BlobUtils.getTemporaryBucketName(finalBlobIdentifier, options);
List<GSBlobIdentifier> componentBlobIdentifiers =
componentObjectIds.stream()
.map(
temporaryObjectId ->
BlobUtils.getTemporaryObjectName(
finalBlobIdentifier, temporaryObjectId))
.map(
temporaryObjectName ->
new GSBlobIdentifier(
temporaryBucketName, temporaryObjectName))
.collect(Collectors.toList());
LOGGER.trace(
"Resolved component blob identifiers for blob {}: {}",
finalBlobIdentifier,
componentBlobIdentifiers);
return componentBlobIdentifiers;
} | 3.68 |
hadoop_AbfsOutputStream_getActiveBlock | /**
* Synchronized accessor to the active block.
*
* @return the active block; null if there isn't one.
*/
private synchronized DataBlocks.DataBlock getActiveBlock() {
return activeBlock;
} | 3.68 |
framework_ThemeResource_hashCode | /**
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
return resourceID.hashCode();
} | 3.68 |
hbase_HBaseTestingUtility_assertRegionOnlyOnServer | /**
* Check to make sure the region is open on the specified region server, but not on any other one.
*/
public void assertRegionOnlyOnServer(final RegionInfo hri, final ServerName server,
final long timeout) throws IOException, InterruptedException {
long timeoutTime = EnvironmentEdgeManager.currentTime() + timeout;
while (true) {
List<RegionInfo> regions = getAdmin().getRegions(server);
if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) {
List<JVMClusterUtil.RegionServerThread> rsThreads =
getHBaseCluster().getLiveRegionServerThreads();
for (JVMClusterUtil.RegionServerThread rsThread : rsThreads) {
HRegionServer rs = rsThread.getRegionServer();
if (server.equals(rs.getServerName())) {
continue;
}
Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
for (HRegion r : hrs) {
if (r.getRegionInfo().getRegionId() == hri.getRegionId()) {
throw new AssertionError("Region should not be double assigned");
}
}
}
return; // good, we are happy
}
long now = EnvironmentEdgeManager.currentTime();
if (now > timeoutTime) break;
Thread.sleep(10);
}
throw new AssertionError(
"Could not find region " + hri.getRegionNameAsString() + " on server " + server);
} | 3.68 |
flink_FlinkAssertions_assertThatFuture | /**
* Create assertion for {@link java.util.concurrent.CompletionStage}.
*
* @param actual the actual value.
* @param <T> the type of the value contained in the {@link
* java.util.concurrent.CompletionStage}.
* @return the created assertion object.
*/
public static <T> FlinkCompletableFutureAssert<T> assertThatFuture(CompletionStage<T> actual) {
return new FlinkCompletableFutureAssert<>(actual);
} | 3.68 |
hbase_StorageClusterStatusModel_setMaxHeapSizeMB | /**
* @param maxHeapSizeMB the maximum heap size, in MB
*/
public void setMaxHeapSizeMB(int maxHeapSizeMB) {
this.maxHeapSizeMB = maxHeapSizeMB;
} | 3.68 |
flink_MiniCluster_createMetricRegistry | /**
* Factory method to create the metric registry for the mini cluster.
*
* @param config The configuration of the mini cluster
* @param maximumMessageSizeInBytes the maximum message size
*/
protected MetricRegistryImpl createMetricRegistry(
Configuration config, long maximumMessageSizeInBytes) {
return new MetricRegistryImpl(
MetricRegistryConfiguration.fromConfiguration(config, maximumMessageSizeInBytes),
ReporterSetup.fromConfiguration(
config, miniClusterConfiguration.getPluginManager()));
} | 3.68 |
flink_MetricStore_getSubtaskMetricStore | /**
* Returns the {@link ComponentMetricStore} for the given job/task ID and subtask index.
*
* @param jobID job ID
* @param taskID task ID
* @param subtaskIndex subtask index
* @return SubtaskMetricStore for the given IDs and index, or null if no store for the given
* arguments exists
*/
public synchronized ComponentMetricStore getSubtaskMetricStore(
String jobID, String taskID, int subtaskIndex) {
JobMetricStore job = jobID == null ? null : jobs.get(jobID);
if (job == null) {
return null;
}
TaskMetricStore task = job.getTaskMetricStore(taskID);
if (task == null) {
return null;
}
return SubtaskMetricStore.unmodifiable(task.getSubtaskMetricStore(subtaskIndex));
} | 3.68 |
hbase_FilterBase_isFamilyEssential | /**
* By default, we require all scan's column families to be present. Our subclasses may be more
* precise. {@inheritDoc}
*/
@Override
public boolean isFamilyEssential(byte[] name) throws IOException {
return true;
} | 3.68 |
hadoop_AbfsClientThrottlingIntercept_getWriteThrottler | /**
* Returns the analyzer for write operations.
* @return AbfsClientThrottlingAnalyzer for write.
*/
AbfsClientThrottlingAnalyzer getWriteThrottler() {
return writeThrottler;
} | 3.68 |
morf_XmlDataSetConsumer_outputTableMetaData | /**
* Serialise the meta data for a table.
*
* @param table The meta data to serialise.
* @param contentHandler Content handler to receive the meta data xml.
* @throws SAXException Propagates from SAX API calls.
*/
private void outputTableMetaData(Table table, ContentHandler contentHandler) throws SAXException {
AttributesImpl tableAttributes = new AttributesImpl();
tableAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.NAME_ATTRIBUTE, XmlDataSetNode.NAME_ATTRIBUTE,
XmlDataSetNode.STRING_TYPE, table.getName());
contentHandler.startElement(XmlDataSetNode.URI, XmlDataSetNode.METADATA_NODE, XmlDataSetNode.METADATA_NODE, tableAttributes);
for (Column column : table.columns()) {
emptyElement(contentHandler, XmlDataSetNode.COLUMN_NODE, buildColumnAttributes(column));
}
// we need to sort the indexes by name to ensure consistency, since indexes don't have an explicit "sequence" in databases.
List<Index> indexes = new ArrayList<>(table.indexes());
Collections.sort(indexes, new Comparator<Index>() {
@Override
public int compare(Index o1, Index o2) {
return o1.getName().compareTo(o2.getName());
}
});
for (Index index : indexes) {
emptyElement(contentHandler, XmlDataSetNode.INDEX_NODE, buildIndexAttributes(index));
}
contentHandler.endElement(XmlDataSetNode.URI, XmlDataSetNode.METADATA_NODE, XmlDataSetNode.METADATA_NODE);
} | 3.68 |
framework_Tree_expandItem | /**
* Expands an item.
*
* @param itemId
* the item id.
* @param sendChildTree
* flag to indicate if client needs subtree or not (may be
* cached)
* @return True if the expand operation succeeded
*/
private boolean expandItem(Object itemId, boolean sendChildTree) {
// Succeeds if the node is already expanded
if (isExpanded(itemId)) {
return true;
}
// Nodes that can not have children are not expandable
if (!areChildrenAllowed(itemId)) {
return false;
}
// Expands
expanded.add(itemId);
expandedItemId = itemId;
if (initialPaint) {
markAsDirty();
} else if (sendChildTree) {
requestPartialRepaint();
}
fireExpandEvent(itemId);
return true;
} | 3.68 |
cron-utils_ConstantsMapper_weekDayMapping | /**
* Performs weekday mapping between two weekday definitions.
*
* @param source - source
* @param target - target weekday definition
* @param weekday - value in source range.
* @return int - mapped value
*/
public static int weekDayMapping(final WeekDay source, final WeekDay target, final int weekday) {
return source.mapTo(weekday, target);
} | 3.68 |
framework_VAbstractSplitPanel_convertToPercentage | /**
* Converts given split position string (in pixels or percentage) to a float
* percentage value.
*
* @param pos
* @return
*/
private float convertToPercentage(String pos) {
if (pos.endsWith("px")) {
float pixelPosition = Float
.parseFloat(pos.substring(0, pos.length() - 2));
int offsetLength = orientation == Orientation.HORIZONTAL
? getOffsetWidth()
: getOffsetHeight();
// Take splitter size into account at the edge
if (pixelPosition + getSplitterSize() >= offsetLength) {
return 100;
}
return pixelPosition / offsetLength * 100;
} else {
assert pos.endsWith("%");
return Float.parseFloat(pos.substring(0, pos.length() - 1));
}
} | 3.68 |
hadoop_StageConfig_checkOpen | /**
* Verify that the config is not yet frozen.
*/
private void checkOpen() {
Preconditions.checkState(!frozen,
"StageConfig is now read-only");
} | 3.68 |
framework_TextFileProperty_isReadOnly | /*
* (non-Javadoc)
*
* @see com.vaadin.data.Property#isReadOnly()
*/
@Override
public boolean isReadOnly() {
return file == null || super.isReadOnly() || !file.canWrite();
} | 3.68 |
querydsl_AntMetaDataExporter_addCustomType | /**
* Adds custom type to ant
*/
public void addCustomType(CustomType customType) {
customTypes.add(customType);
} | 3.68 |
framework_WebBrowser_isLinux | /**
* Tests whether the user is using Linux.
*
* @return true if the user is using Linux, false if the user is not using
* Linux or if no information on the browser is present
*/
public boolean isLinux() {
return browserDetails.isLinux();
} | 3.68 |
hadoop_DiskBalancerWorkItem_getStartTime | /**
* Records the Start time of execution.
* @return startTime
*/
public long getStartTime() {
return startTime;
} | 3.68 |
flink_HsFullSpillingStrategy_onMemoryUsageChanged | // When the amount of memory used exceeds the threshold, decide action based on global
// information. Otherwise, no need to take action.
@Override
public Optional<Decision> onMemoryUsageChanged(
int numTotalRequestedBuffers, int currentPoolSize) {
return numTotalRequestedBuffers < currentPoolSize * releaseThreshold
? Optional.of(Decision.NO_ACTION)
: Optional.empty();
} | 3.68 |
hbase_ImmutableBytesWritable_toArray | /**
* Convert a list of byte arrays into an array of byte arrays
* @param array List of byte [].
* @return Array of byte [].
*/
public static byte[][] toArray(final List<byte[]> array) {
// List#toArray doesn't work on lists of byte [].
byte[][] results = new byte[array.size()][];
for (int i = 0; i < array.size(); i++) {
results[i] = array.get(i);
}
return results;
} | 3.68 |
hmily_ExtensionLoaderFactory_loadAll | /**
* Load all list.
*
* @param <T> the type parameter
* @param service the service
* @return the list
*/
public static <T> List<T> loadAll(final Class<T> service) {
return ExtensionLoader.getExtensionLoader(service).loadAll(findClassLoader());
} | 3.68 |
framework_DownloadStream_getParameterNames | /**
* Gets the names of the parameters.
*
* @return Iterator of names or null if no parameters are set.
*/
public Iterator<String> getParameterNames() {
if (params != null) {
return params.keySet().iterator();
}
return null;
} | 3.68 |
graphhopper_Entity_getTimeField | /**
* Fetch the given column of the current row, and interpret it as a time in the format HH:MM:SS.
* @return the time value in seconds since midnight
*/
protected int getTimeField(String column, boolean required) throws IOException {
String str = getFieldCheckRequired(column, required);
int val = INT_MISSING;
if (str != null) {
String[] fields = str.split(":");
if (fields.length != 3) {
feed.errors.add(new TimeParseError(tableName, row, column));
} else {
try {
int hours = Integer.parseInt(fields[0]);
int minutes = Integer.parseInt(fields[1]);
int seconds = Integer.parseInt(fields[2]);
checkRangeInclusive(0, 72, hours); // GTFS hours can go past midnight. Some trains run for 3 days.
checkRangeInclusive(0, 59, minutes);
checkRangeInclusive(0, 59, seconds);
val = (hours * 60 * 60) + minutes * 60 + seconds;
} catch (NumberFormatException nfe) {
feed.errors.add(new TimeParseError(tableName, row, column));
}
}
}
return val;
} | 3.68 |
flink_CompileUtils_compile | /**
* Compiles a generated code to a Class.
*
* @param cl the ClassLoader used to load the class
* @param name the class name
* @param code the generated code
* @param <T> the class type
* @return the compiled class
*/
@SuppressWarnings("unchecked")
public static <T> Class<T> compile(ClassLoader cl, String name, String code) {
try {
// The class name is part of the "code" and makes the string unique,
// to prevent class leaks we don't cache the class loader directly
// but only its hash code
final ClassKey classKey = new ClassKey(cl.hashCode(), code);
return (Class<T>) COMPILED_CLASS_CACHE.get(classKey, () -> doCompile(cl, name, code));
} catch (Exception e) {
throw new FlinkRuntimeException(e.getMessage(), e);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.