name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_Utils_compatibleWith | /**
* Test compatibility.
*
* @param other
* The Version object to test compatibility with.
* @return true if both versions have the same major version number; false
* otherwise.
*/
public boolean compatibleWith(Version other) {
return major == other.major;
} | 3.68 |
hmily_GsonUtils_fromList | /**
* From list list.
*
* @param <T> the type parameter
* @param json the json
* @param clazz the clazz
* @return the list
*/
public <T> List<T> fromList(final String json, final Class<T> clazz) {
return GSON.fromJson(json, TypeToken.getParameterized(List.class, clazz).getType());
} | 3.68 |
hadoop_IrqHandler_bind | /**
* Bind to the interrupt handler.
* @throws IllegalArgumentException if the exception could not be set
*/
public void bind() {
Preconditions.checkState(signal == null, "Handler already bound");
try {
signal = new Signal(name);
Signal.handle(signal, this);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(
"Could not set handler for signal \"" + name + "\"."
+ "This can happen if the JVM has the -Xrs set.",
e);
}
} | 3.68 |
rocketmq-connect_JdbcDriverInfo_jdbcVersionAtLeast | /**
* Determine if the JDBC driver supports at least the specified major and minor version of the
* JDBC specifications. This can be used to determine whether or not to call JDBC methods.
*
* @param jdbcMajorVersion the required major version of the JDBC specification
* @param jdbcMinorVersion the required minor version of the JDBC specification
* @return true if the driver supports at least the specified version of the JDBC specification,
* or false if the driver supports an older version of the JDBC specification
*/
public boolean jdbcVersionAtLeast(
int jdbcMajorVersion,
int jdbcMinorVersion
) {
if (this.jdbcMajorVersion() > jdbcMajorVersion) {
return true;
}
if (jdbcMajorVersion == jdbcMajorVersion() && jdbcMinorVersion() >= jdbcMinorVersion) {
return true;
}
return false;
} | 3.68 |
morf_RenameTable_isTemporary | /**
* @see org.alfasoftware.morf.metadata.Table#isTemporary()
*/
@Override
public boolean isTemporary() {
return baseTable.isTemporary();
} | 3.68 |
hudi_HoodieHeartbeatClient_isHeartbeatStarted | /**
* Whether the given heartbeat is started.
*
* @param heartbeat The heartbeat to check whether is started.
* @return Whether the heartbeat is started.
* @throws IOException
*/
private boolean isHeartbeatStarted(Heartbeat heartbeat) {
return heartbeat != null && heartbeat.isHeartbeatStarted() && !heartbeat.isHeartbeatStopped();
} | 3.68 |
flink_StreamingRuntimeContext_getTaskManagerRuntimeInfo | /**
* Returns the task manager runtime info of the task manager running this stream task.
*
* @return The task manager runtime info.
*/
public TaskManagerRuntimeInfo getTaskManagerRuntimeInfo() {
return taskEnvironment.getTaskManagerInfo();
} | 3.68 |
flink_Schema_resolve | /** Resolves the given {@link Schema} to a validated {@link ResolvedSchema}. */
public ResolvedSchema resolve(SchemaResolver resolver) {
return resolver.resolve(this);
} | 3.68 |
hbase_RegionInfo_getStartKey | /**
* Gets the start key from the specified region name.
* @return Start key.
*/
static byte[] getStartKey(final byte[] regionName) throws IOException {
return parseRegionName(regionName)[1];
} | 3.68 |
hbase_TableRegionModel_toString | /*
* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(getName());
sb.append(" [\n id=");
sb.append(id);
sb.append("\n startKey='");
sb.append(Bytes.toString(startKey));
sb.append("'\n endKey='");
sb.append(Bytes.toString(endKey));
if (location != null) {
sb.append("'\n location='");
sb.append(location);
}
sb.append("'\n]\n");
return sb.toString();
} | 3.68 |
rocketmq-connect_StringConverter_fromConnectData | /**
* Convert a rocketmq Connect data object to a native object for serialization.
*
* @param topic the topic associated with the data
* @param schema the schema for the value
* @param value the value to convert
* @return the serialized value
*/
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
try {
return serializer.serialize(topic, value == null ? null : value.toString());
} catch (Exception e) {
throw new ConnectException("Failed to serialize to a string: ", e);
}
} | 3.68 |
graphhopper_VectorTile_getValuesOrBuilder | /**
* <pre>
* Dictionary encoding for values
* </pre>
*
* <code>repeated .vector_tile.Tile.Value values = 4;</code>
*/
public vector_tile.VectorTile.Tile.ValueOrBuilder getValuesOrBuilder(
int index) {
if (valuesBuilder_ == null) {
return values_.get(index); } else {
return valuesBuilder_.getMessageOrBuilder(index);
}
} | 3.68 |
hbase_MobFileCache_closeFile | /**
* Closes a mob file.
* @param file The mob file that needs to be closed.
*/
public void closeFile(MobFile file) {
IdLock.Entry lockEntry = null;
try {
if (!isCacheEnabled) {
file.close();
} else {
lockEntry = keyLock.getLockEntry(hashFileName(file.getFileName()));
file.close();
}
} catch (IOException e) {
LOG.error("MobFileCache, Exception happen during close " + file.getFileName(), e);
} finally {
if (lockEntry != null) {
keyLock.releaseLockEntry(lockEntry);
}
}
} | 3.68 |
framework_AbstractSelect_setNewItemHandler | /**
* TODO refine doc Setter for new item handler, which is called when user
* adds new item in {@code newItemAllowed} mode.
*
* @param newItemHandler
* The new item handler
*/
public void setNewItemHandler(NewItemHandler newItemHandler) {
this.newItemHandler = newItemHandler;
} | 3.68 |
flink_DefaultCheckpointPlanCalculator_checkTasksStarted | /**
* Checks if all tasks to trigger have already been in RUNNING state. This method should be
* called from JobMaster main thread executor.
*
* @throws CheckpointException if some tasks to trigger have not turned into RUNNING yet.
*/
private void checkTasksStarted(List<Execution> toTrigger) throws CheckpointException {
for (Execution execution : toTrigger) {
if (execution.getState() != ExecutionState.RUNNING) {
throw new CheckpointException(
String.format(
"Checkpoint triggering task %s of job %s is not being executed at the moment. "
+ "Aborting checkpoint.",
execution.getVertex().getTaskNameWithSubtaskIndex(), jobId),
CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING);
}
}
} | 3.68 |
hbase_CheckAndMutate_getAction | /** Returns the action done if check succeeds */
public Row getAction() {
return action;
} | 3.68 |
framework_VComboBox_selectNextPage | /*
* Show the next page.
*/
private void selectNextPage() {
if (hasNextPage()) {
dataReceivedHandler.setNavigationCallback(
() -> suggestionPopup.selectFirstItem());
filterOptions(currentPage + 1, lastFilter);
}
} | 3.68 |
flink_SourceOperatorStreamTask_maybeResumeProcessing | /** Resumes processing if it was blocked before or else is a no-op. */
private void maybeResumeProcessing() {
assert (mailboxProcessor.isMailboxThread());
if (triggeredCheckpoints.isEmpty()) {
waitForRPC.complete(null);
}
} | 3.68 |
morf_H2_extractJdbcUrl | /**
* We don't need to support extracting connection details from H2. It's only
* used for in-memory databases currently.
*
* @see org.alfasoftware.morf.jdbc.DatabaseType#extractJdbcUrl(java.lang.String)
*/
@Override
public Optional<JdbcUrlElements> extractJdbcUrl(String url) {
return Optional.empty();
} | 3.68 |
querydsl_ExpressionUtils_neConst | /**
* Create a {@code left != constant} expression
*
* @param <D> type of expression
* @param left lhs of expression
* @param constant rhs of expression
* @return left != constant
*/
public static <D> Predicate neConst(Expression<D> left, D constant) {
return ne(left, ConstantImpl.create(constant));
} | 3.68 |
hbase_OrderedFloat32_encodeFloat | /**
* Write instance {@code val} into buffer {@code buff}.
* @param dst the {@link PositionedByteRange} to write to
* @param val the value to write to {@code dst}
* @return the number of bytes written
*/
public int encodeFloat(PositionedByteRange dst, float val) {
return OrderedBytes.encodeFloat32(dst, val, order);
} | 3.68 |
hadoop_AbstractS3ACommitter_precommitCheckPendingFiles | /**
* Run a precommit check that all files are loadable.
* This check avoids the situation where the inability to read
* a file only surfaces partway through the job commit, so
* results in the destination being tainted.
* @param commitContext commit context
* @param pending the pending operations
* @throws IOException any failure
*/
protected void precommitCheckPendingFiles(
final CommitContext commitContext,
final ActiveCommit pending) throws IOException {
FileSystem sourceFS = pending.getSourceFS();
try (DurationInfo ignored =
new DurationInfo(LOG, "Preflight Load of pending files")) {
TaskPool.foreach(pending.getSourceFiles())
.stopOnFailure()
.suppressExceptions(false)
.executeWith(commitContext.getOuterSubmitter())
.run(status -> PersistentCommitData.load(sourceFS, status,
commitContext.getPendingSetSerializer()));
}
} | 3.68 |
hbase_TBoundedThreadPoolServer_run | /**
* Loops on processing a client forever
*/
@Override
public void run() {
TProcessor processor = null;
TTransport inputTransport = null;
TTransport outputTransport = null;
TProtocol inputProtocol = null;
TProtocol outputProtocol = null;
try {
processor = processorFactory_.getProcessor(client);
inputTransport = inputTransportFactory_.getTransport(client);
outputTransport = outputTransportFactory_.getTransport(client);
inputProtocol = inputProtocolFactory_.getProtocol(inputTransport);
outputProtocol = outputProtocolFactory_.getProtocol(outputTransport);
// we check stopped_ first to make sure we're not supposed to be shutting
// down. this is necessary for graceful shutdown.
while (true) {
if (stopped) {
break;
}
processor.process(inputProtocol, outputProtocol);
}
} catch (TTransportException ttx) {
// Assume the client died and continue silently
} catch (TException tx) {
LOG.error("Thrift error occurred during processing of message.", tx);
} catch (Exception x) {
LOG.error("Error occurred during processing of message.", x);
}
if (inputTransport != null) {
inputTransport.close();
}
if (outputTransport != null) {
outputTransport.close();
}
} | 3.68 |
framework_Escalator_findRowContainer | /**
* Returns the {@link RowContainer} which contains the element.
*
* @param element
* the element to check for
* @return the container the element is in or <code>null</code> if element
* is not present in any container.
*/
public RowContainer findRowContainer(Element element) {
if (getHeader().getElement() != element
&& getHeader().getElement().isOrHasChild(element)) {
return getHeader();
} else if (getBody().getElement() != element
&& getBody().getElement().isOrHasChild(element)) {
return getBody();
} else if (getFooter().getElement() != element
&& getFooter().getElement().isOrHasChild(element)) {
return getFooter();
}
return null;
} | 3.68 |
cron-utils_SecondsDescriptor_createAndDescription | /**
* Creates human readable description for And element.
*
* @param builder - StringBuilder instance to which description will be appended
* @param expressions - field expressions
* @return same StringBuilder instance as parameter
*/
@VisibleForTesting
StringBuilder createAndDescription(final StringBuilder builder, final List<FieldExpression> expressions) {
if ((expressions.size() - 2) >= 0) {
for (int j = 0; j < expressions.size() - 2; j++) {
builder.append(String.format(" %s, ", describe(expressions.get(j), true)));
}
builder.append(String.format(" %s ", describe(expressions.get(expressions.size() - 2), true)));
}
builder.append(String.format(" %s ", bundle.getString("and")));
builder.append(describe(expressions.get(expressions.size() - 1), true));
return builder;
} | 3.68 |
hbase_MetricsConnection_getMetaCacheNumClearServer | /** metaCacheNumClearServer metric */
public Counter getMetaCacheNumClearServer() {
return metaCacheNumClearServer;
} | 3.68 |
hadoop_ExecutingStoreOperation_apply | /**
* Apply calls {@link #execute()}.
* @return the result.
* @throws IOException IO problem
*/
@Override
public final T apply() throws IOException {
return execute();
} | 3.68 |
flink_InPlaceMutableHashTable_compactOrThrow | /**
* If there is wasted space (due to updated records not fitting in their old places), then do a
* compaction. Else, throw EOFException to indicate that memory ran out.
*
* @throws IOException
*/
private void compactOrThrow() throws IOException {
if (holes > (double) recordArea.getTotalSize() * 0.05) {
rebuild();
} else {
throw new EOFException(
"InPlaceMutableHashTable memory ran out. " + getMemoryConsumptionString());
}
} | 3.68 |
framework_DownloadStream_getParameter | /**
* Gets a paramater for download stream. Parameters are optional information
* about the downloadable stream and their meaning depends on the used
* adapter. For example in WebAdapter they are interpreted as HTTP response
* headers.
*
* @param name
* the Name of the parameter to set.
* @return Value of the parameter or null if the parameter does not exist.
*/
public String getParameter(String name) {
if (params != null) {
return params.get(name);
}
return null;
} | 3.68 |
hbase_HRegionFileSystem_cleanupDaughterRegion | /**
* Remove daughter region
* @param regionInfo daughter {@link RegionInfo}
*/
void cleanupDaughterRegion(final RegionInfo regionInfo) throws IOException {
Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
if (this.fs.exists(regionDir) && !deleteDir(regionDir)) {
throw new IOException("Failed delete of " + regionDir);
}
} | 3.68 |
morf_DataValueLookupBuilderImpl_setAtIndex | /**
* Writes the given value at the specified index. Assumes that the array
* has sufficient size.
*/
private void setAtIndex(int index, Object value) {
data[index] = value;
} | 3.68 |
flink_TypeInferenceExtractor_forTableFunction | /** Extracts a type inference from a {@link TableFunction}. */
public static TypeInference forTableFunction(
DataTypeFactory typeFactory, Class<? extends TableFunction<?>> function) {
final FunctionMappingExtractor mappingExtractor =
new FunctionMappingExtractor(
typeFactory,
function,
UserDefinedFunctionHelper.TABLE_EVAL,
createParameterSignatureExtraction(0),
null,
createGenericResultExtraction(TableFunction.class, 0, true),
createParameterVerification());
return extractTypeInference(mappingExtractor);
} | 3.68 |
flink_SkipListUtils_putKeyLen | /**
* Puts the length of key to the key space.
*
* @param memorySegment memory segment for key space.
* @param offset offset of key space in the memory segment.
* @param keyLen length of key.
*/
public static void putKeyLen(MemorySegment memorySegment, int offset, int keyLen) {
memorySegment.putInt(offset + KEY_LEN_OFFSET, keyLen);
} | 3.68 |
dubbo_ClassUtils_getAllInterfaces | /**
* Get all interfaces from the specified type
*
* @param type the specified type
* @param interfaceFilters the filters for interfaces
* @return non-null read-only {@link Set}
* @since 2.7.6
*/
public static Set<Class<?>> getAllInterfaces(Class<?> type, Predicate<Class<?>>... interfaceFilters) {
if (type == null || type.isPrimitive()) {
return emptySet();
}
Set<Class<?>> allInterfaces = new LinkedHashSet<>();
Set<Class<?>> resolved = new LinkedHashSet<>();
Queue<Class<?>> waitResolve = new LinkedList<>();
resolved.add(type);
Class<?> clazz = type;
while (clazz != null) {
Class<?>[] interfaces = clazz.getInterfaces();
if (isNotEmpty(interfaces)) {
// add current interfaces
Arrays.stream(interfaces).filter(resolved::add).forEach(cls -> {
allInterfaces.add(cls);
waitResolve.add(cls);
});
}
// add all super classes to waitResolve
getAllSuperClasses(clazz).stream().filter(resolved::add).forEach(waitResolve::add);
clazz = waitResolve.poll();
}
return filterAll(allInterfaces, interfaceFilters);
} | 3.68 |
flink_StreamOperatorFactory_isInputTypeConfigurable | /** If the stream operator need to be configured with the data type they will operate on. */
default boolean isInputTypeConfigurable() {
return false;
} | 3.68 |
framework_VAccordion_hide | /**
* Hides the stack item content but does not close the stack item.
*
* @deprecated This method is not called by the framework code anymore.
*/
@Deprecated
public void hide() {
content.getStyle().setVisibility(Visibility.HIDDEN);
} | 3.68 |
hadoop_NamenodeStatusReport_setHAServiceState | /**
* Set the HA service state.
*
* @param state The HA service state to set.
*/
public void setHAServiceState(HAServiceState state) {
this.status = state;
this.haStateValid = true;
} | 3.68 |
flink_PrintStyle_tableauWithDataInferredColumnWidths | /**
* Like {@link #tableauWithDataInferredColumnWidths(ResolvedSchema, RowDataToStringConverter,
* int, boolean, boolean)}, but using default values.
*
* <p><b>NOTE:</b> please make sure the data to print is small enough to be stored in java heap
* memory.
*/
static TableauStyle tableauWithDataInferredColumnWidths(
ResolvedSchema schema, RowDataToStringConverter converter) {
return PrintStyle.tableauWithDataInferredColumnWidths(
schema, converter, DEFAULT_MAX_COLUMN_WIDTH, false, false);
} | 3.68 |
hbase_StateMachineProcedure_isRollbackSupported | /**
* Used by the default implementation of abort() to know if the current state can be aborted and
* rollback can be triggered.
*/
protected boolean isRollbackSupported(final TState state) {
return false;
} | 3.68 |
hadoop_ResourceUsageMetrics_setHeapUsage | /**
* Set the total heap usage.
*/
public void setHeapUsage(long usage) {
heapUsage = usage;
} | 3.68 |
hudi_AbstractHoodieLogRecordReader_scanInternal | /**
* @param keySpecOpt specifies target set of keys to be scanned
* @param skipProcessingBlocks controls, whether (delta) blocks have to actually be processed
*/
protected final void scanInternal(Option<KeySpec> keySpecOpt, boolean skipProcessingBlocks) {
synchronized (this) {
if (enableOptimizedLogBlocksScan) {
scanInternalV2(keySpecOpt, skipProcessingBlocks);
} else {
scanInternalV1(keySpecOpt);
}
}
} | 3.68 |
framework_VCalendar_updateMonthGrid | /**
* Updates the events in the Month view.
*
* @param daysCount
* How many days there are
* @param daysUidl
*
* @param today
* Todays date
*/
@SuppressWarnings("deprecation")
public void updateMonthGrid(int daysCount, List<CalendarDay> days,
Date today) {
int columns = getLastDayNumber() - getFirstDayNumber() + 1;
rows = (int) Math.ceil(daysCount / (double) 7);
monthGrid = new MonthGrid(this, rows, columns);
monthGrid.setEnabled(!isDisabledOrReadOnly());
weekToolbar.removeAllRows();
int pos = 0;
boolean monthNameDrawn = true;
boolean firstDayFound = false;
boolean lastDayFound = false;
for (CalendarDay day : days) {
String date = day.getDate();
Date d = dateformat_date.parse(date);
int dayOfWeek = day.getDayOfWeek();
int week = day.getWeek();
int dayOfMonth = d.getDate();
// reset at start of each month
if (dayOfMonth == 1) {
monthNameDrawn = false;
if (firstDayFound) {
lastDayFound = true;
}
firstDayFound = true;
}
if (dayOfWeek < getFirstDayNumber()
|| dayOfWeek > getLastDayNumber()) {
continue;
}
int y = (pos / columns);
int x = pos - (y * columns);
if (x == 0 && daysCount > 7) {
// Add week to weekToolbar for navigation
weekToolbar.addWeek(week, day.getYearOfWeek());
}
final SimpleDayCell cell = new SimpleDayCell(this, y, x);
cell.setMonthGrid(monthGrid);
cell.setDate(d);
cell.addDomHandler(new ContextMenuHandler() {
@Override
public void onContextMenu(ContextMenuEvent event) {
if (mouseEventListener != null) {
event.preventDefault();
event.stopPropagation();
mouseEventListener.contextMenu(event, cell);
}
}
}, ContextMenuEvent.getType());
if (!firstDayFound) {
cell.addStyleDependentName("prev-month");
} else if (lastDayFound) {
cell.addStyleDependentName("next-month");
}
if (dayOfMonth >= 1 && !monthNameDrawn) {
cell.setMonthNameVisible(true);
monthNameDrawn = true;
}
if (today.getDate() == dayOfMonth && today.getYear() == d.getYear()
&& today.getMonth() == d.getMonth()) {
cell.setToday(true);
}
monthGrid.setWidget(y, x, cell);
pos++;
}
} | 3.68 |
hbase_HRegion_createHRegion | /**
* Create a region under the given table directory.
*/
public static HRegion createHRegion(Configuration conf, RegionInfo regionInfo, FileSystem fs,
Path tableDir, TableDescriptor tableDesc) throws IOException {
LOG.info("Creating {}, tableDescriptor={}, under table dir {}", regionInfo, tableDesc,
tableDir);
HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, regionInfo);
HRegion region = HRegion.newHRegion(tableDir, null, fs, conf, regionInfo, tableDesc, null);
return region;
} | 3.68 |
hadoop_BufferData_throwIfStateIncorrect | /**
* Helper that asserts the current state is one of the expected values.
*
* @param states the collection of allowed states.
*
* @throws IllegalArgumentException if states is null.
*/
public void throwIfStateIncorrect(State... states) {
Validate.checkNotNull(states, "states");
if (this.stateEqualsOneOf(states)) {
return;
}
List<String> statesStr = new ArrayList<String>();
for (State s : states) {
statesStr.add(s.toString());
}
String message = String.format(
"Expected buffer state to be '%s' but found: %s",
String.join(" or ", statesStr), this);
throw new IllegalStateException(message);
} | 3.68 |
flink_HiveParserSemanticAnalyzer_processTable | /**
* Goes though the tabref tree and finds the alias for the table. Once found, it records the
* table name-> alias association in aliasToTabs. It also makes an association from the alias to
* the table AST in parse info.
*/
private String processTable(HiveParserQB qb, HiveParserASTNode tabref)
throws SemanticException {
// For each table reference get the table name
// and the alias (if alias is not present, the table name
// is used as an alias)
int[] indexes = findTabRefIdxs(tabref);
int aliasIndex = indexes[0];
int propsIndex = indexes[1];
int tsampleIndex = indexes[2];
int ssampleIndex = indexes[3];
HiveParserASTNode tableTree = (HiveParserASTNode) (tabref.getChild(0));
String qualifiedTableName =
getUnescapedName(
tableTree,
catalogRegistry.getCurrentCatalog(),
catalogRegistry.getCurrentDatabase())
.toLowerCase();
String originTableName = getUnescapedOriginTableName(tableTree);
String alias = findSimpleTableName(tabref, aliasIndex);
if (propsIndex >= 0) {
Tree propsAST = tabref.getChild(propsIndex);
Map<String, String> props =
HiveParserDDLSemanticAnalyzer.getProps(
(HiveParserASTNode) propsAST.getChild(0));
// We get the information from Calcite.
if ("TRUE".equals(props.get("insideView"))) {
qb.getAliasInsideView().add(alias.toLowerCase());
}
qb.setTabProps(alias, props);
}
// If the alias is already there then we have a conflict
if (qb.exists(alias)) {
throw new SemanticException(
HiveParserErrorMsg.getMsg(
ErrorMsg.AMBIGUOUS_TABLE_ALIAS, tabref.getChild(aliasIndex)));
}
if (tsampleIndex >= 0) {
HiveParserASTNode sampleClause = (HiveParserASTNode) tabref.getChild(tsampleIndex);
ArrayList<HiveParserASTNode> sampleCols = new ArrayList<>();
if (sampleClause.getChildCount() > 2) {
for (int i = 2; i < sampleClause.getChildCount(); i++) {
sampleCols.add((HiveParserASTNode) sampleClause.getChild(i));
}
}
// TODO: For now only support sampling on up to two columns
// Need to change it to list of columns
if (sampleCols.size() > 2) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
(HiveParserASTNode) tabref.getChild(0),
ErrorMsg.SAMPLE_RESTRICTION.getMsg()));
}
qb.getParseInfo().setTabSample(alias);
if (unparseTranslator.isEnabled()) {
for (HiveParserASTNode sampleCol : sampleCols) {
unparseTranslator.addIdentifierTranslation(
(HiveParserASTNode) sampleCol.getChild(0));
}
}
} else if (ssampleIndex >= 0) {
HiveParserASTNode sampleClause = (HiveParserASTNode) tabref.getChild(ssampleIndex);
Tree type = sampleClause.getChild(0);
Tree numerator = sampleClause.getChild(1);
String value = unescapeIdentifier(numerator.getText());
SplitSample sample;
if (type.getType() == HiveASTParser.TOK_PERCENT) {
double percent = Double.parseDouble(value);
if (percent < 0 || percent > 100) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
(HiveParserASTNode) numerator,
"Sampling percentage should be between 0 and 100"));
}
int seedNum = conf.getIntVar(ConfVars.HIVESAMPLERANDOMNUM);
sample = new SplitSample(percent, seedNum);
} else if (type.getType() == HiveASTParser.TOK_ROWCOUNT) {
sample = new SplitSample(Integer.parseInt(value));
} else {
assert type.getType() == HiveASTParser.TOK_LENGTH;
long length = Integer.parseInt(value.substring(0, value.length() - 1));
char last = value.charAt(value.length() - 1);
if (last == 'k' || last == 'K') {
length <<= 10;
} else if (last == 'm' || last == 'M') {
length <<= 20;
} else if (last == 'g' || last == 'G') {
length <<= 30;
}
int seedNum = conf.getIntVar(ConfVars.HIVESAMPLERANDOMNUM);
sample = new SplitSample(length, seedNum);
}
String aliasId = getAliasId(alias, qb);
nameToSplitSample.put(aliasId, sample);
}
// Insert this map into the stats
qb.setTabAlias(alias, originTableName, qualifiedTableName);
if (qb.isInsideView()) {
qb.getAliasInsideView().add(alias.toLowerCase());
}
qb.addAlias(alias);
qb.getParseInfo().setSrcForAlias(alias, tableTree);
// if alias to CTE contains the table name, we do not do the translation because
// cte is actually a subquery.
if (!this.aliasToCTEs.containsKey(qualifiedTableName)) {
unparseTranslator.addTableNameTranslation(
tableTree,
catalogRegistry.getCurrentCatalog(),
catalogRegistry.getCurrentDatabase());
if (aliasIndex != 0) {
unparseTranslator.addIdentifierTranslation(
(HiveParserASTNode) tabref.getChild(aliasIndex));
}
}
return alias;
} | 3.68 |
morf_SchemaChangeSequence_getChanges | /**
* @return the changes
*/
public List<SchemaChange> getChanges() {
return changes;
} | 3.68 |
graphhopper_VectorTile_getFeaturesOrBuilder | /**
* <pre>
* The actual features in this tile.
* </pre>
*
* <code>repeated .vector_tile.Tile.Feature features = 2;</code>
*/
public vector_tile.VectorTile.Tile.FeatureOrBuilder getFeaturesOrBuilder(
int index) {
if (featuresBuilder_ == null) {
return features_.get(index); } else {
return featuresBuilder_.getMessageOrBuilder(index);
}
} | 3.68 |
morf_SqlUtils_width | /**
* Specifies the width and scale of the parameter and
* returns the constructed parameter.
*
* @param width The width
* @param scale The scale
* @return the {@link SqlParameter}.
*/
public SqlParameter width(int width, int scale) {
return new SqlParameter(column(getMetadata().getName(), getMetadata().getType(), width, scale));
} | 3.68 |
pulsar_LinuxInfoUtils_getTotalCpuLimit | /**
* Get total cpu limit.
* @param isCGroupsEnabled Whether CGroup is enabled
* @return Total cpu limit
*/
public static double getTotalCpuLimit(boolean isCGroupsEnabled) {
if (isCGroupsEnabled) {
try {
long quota;
long period;
if (metrics != null && getCpuQuotaMethod != null && getCpuPeriodMethod != null) {
quota = (long) getCpuQuotaMethod.invoke(metrics);
period = (long) getCpuPeriodMethod.invoke(metrics);
} else {
quota = readLongFromFile(Paths.get(CGROUPS_CPU_LIMIT_QUOTA_PATH));
period = readLongFromFile(Paths.get(CGROUPS_CPU_LIMIT_PERIOD_PATH));
}
if (quota > 0) {
return 100.0 * quota / period;
}
} catch (Exception e) {
log.warn("[LinuxInfo] Failed to read CPU quotas from cgroup", e);
// Fallback to availableProcessors
}
}
// Fallback to JVM reported CPU quota
return 100 * Runtime.getRuntime().availableProcessors();
} | 3.68 |
flink_PbCodegenUtils_pbDefaultValueCode | /**
* Get protobuf default value from {@link FieldDescriptor}.
*
* @return The java code phrase which represents default value calculation.
*/
public static String pbDefaultValueCode(
FieldDescriptor fieldDescriptor, PbFormatContext pbFormatContext)
throws PbCodegenException {
String nullLiteral = pbFormatContext.getPbFormatConfig().getWriteNullStringLiterals();
switch (fieldDescriptor.getJavaType()) {
case MESSAGE:
return PbFormatUtils.getFullJavaName(fieldDescriptor.getMessageType())
+ ".getDefaultInstance()";
case INT:
return "0";
case LONG:
return "0L";
case STRING:
return "\"" + nullLiteral + "\"";
case ENUM:
return PbFormatUtils.getFullJavaName(fieldDescriptor.getEnumType())
+ ".values()[0]";
case FLOAT:
return "0.0f";
case DOUBLE:
return "0.0d";
case BYTE_STRING:
return "ByteString.EMPTY";
case BOOLEAN:
return "false";
default:
throw new PbCodegenException(
"do not support field type: " + fieldDescriptor.getJavaType());
}
} | 3.68 |
framework_CssLayoutConnector_getState | /*
* (non-Javadoc)
*
* @see com.vaadin.client.ui.AbstractLayoutConnector#getState()
*/
@Override
public CssLayoutState getState() {
return (CssLayoutState) super.getState();
} | 3.68 |
flink_InputTypeStrategies_commonType | /**
* An {@link InputTypeStrategy} that expects {@code count} arguments that have a common type.
*/
public static InputTypeStrategy commonType(int count) {
return new CommonInputTypeStrategy(ConstantArgumentCount.of(count));
} | 3.68 |
framework_Embedded_setArchive | /**
* This attribute may be used to specify a space-separated list of URIs for
* archives containing resources relevant to the object, which may include
* the resources specified by the classid and data attributes. Preloading
* archives will generally result in reduced load times for objects.
* Archives specified as relative URIs should be interpreted relative to the
* codebase attribute.
*
* @param archive
* Space-separated list of URIs with resources relevant to the
* object
*/
public void setArchive(String archive) {
String oldArchive = getArchive();
if (archive != oldArchive
|| (archive != null && !archive.equals(oldArchive))) {
getState().archive = archive;
}
} | 3.68 |
hudi_ArrayColumnReader_fetchNextValue | /**
* Reads a single value from parquet page, puts it into lastValue. Returns a boolean indicating
* if there is more values to read (true).
*
* @param category
* @return boolean
* @throws IOException
*/
private boolean fetchNextValue(LogicalType category) throws IOException {
int left = readPageIfNeed();
if (left > 0) {
// get the values of repetition and definitionLevel
readRepetitionAndDefinitionLevels();
// read the data if it isn't null
if (definitionLevel == maxDefLevel) {
if (isCurrentPageDictionaryEncoded) {
lastValue = dataColumn.readValueDictionaryId();
} else {
lastValue = readPrimitiveTypedRow(category);
}
} else {
lastValue = null;
}
return true;
} else {
eof = true;
return false;
}
} | 3.68 |
hadoop_ReservedContainerCandidatesSelector_getPreemptionCandidatesOnNode | /**
* Try to check if we can preempt resources for reserved container in given node
* @param node
* @param queueToPreemptableResourceByPartition it's a map of
* <queueName, <partition, preemptable-resource>>
* @param readOnly do we want to modify preemptable resource after we selected
* candidates
* @return NodeForPreemption if it's possible to preempt containers on the node
* to satisfy reserved resource
*/
private NodeForPreemption getPreemptionCandidatesOnNode(
FiCaSchedulerNode node,
Map<String, Map<String, Resource>> queueToPreemptableResourceByPartition,
Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates,
Resource totalPreemptionAllowed, boolean readOnly) {
RMContainer reservedContainer = node.getReservedContainer();
Resource available = Resources.clone(node.getUnallocatedResource());
Resource totalSelected = Resources.createResource(0);
List<RMContainer> sortedRunningContainers =
node.getCopiedListOfRunningContainers();
List<RMContainer> selectedContainers = new ArrayList<>();
Map<ContainerId, RMContainer> killableContainers =
node.getKillableContainers();
// Sort running container by launch time, we preferred to preempt recent
// launched preempt container
Collections.sort(sortedRunningContainers, new Comparator<RMContainer>() {
@Override public int compare(RMContainer o1, RMContainer o2) {
return -1 * o1.getContainerId().compareTo(o2.getContainerId());
}
});
// First check: can we preempt containers to allocate the
// reservedContainer?
boolean canAllocateReservedContainer = false;
// At least, we can get available + killable resources from this node
Resource cur = Resources.add(available, node.getTotalKillableResources());
String partition = node.getPartition();
// Avoid preempt any container if required <= available + killable
if (Resources.fitsIn(rc, reservedContainer.getReservedResource(), cur)) {
return null;
}
// Extra cost of am container preemption
float amPreemptionCost = 0f;
for (RMContainer c : sortedRunningContainers) {
String containerQueueName = c.getQueueName();
// Skip container if it is already marked killable
if (killableContainers.containsKey(c.getContainerId())) {
continue;
}
// An alternative approach is add a "penalty cost" if AM container is
// selected. Here for safety, avoid preempt AM container in any cases
if (c.isAMContainer()) {
LOG.debug("Skip selecting AM container on host={} AM container={}",
node.getNodeID(), c.getContainerId());
continue;
}
// Can we preempt container c?
// Check if we have quota to preempt this container
boolean canPreempt = tryToPreemptFromQueue(containerQueueName, partition,
queueToPreemptableResourceByPartition, c.getAllocatedResource(),
totalPreemptionAllowed, readOnly);
// If we can, add to selected container, and change resource accordingly.
if (canPreempt) {
if (!CapacitySchedulerPreemptionUtils.isContainerAlreadySelected(c,
selectedCandidates)) {
if (!readOnly) {
selectedContainers.add(c);
}
Resources.addTo(totalSelected, c.getAllocatedResource());
}
Resources.addTo(cur, c.getAllocatedResource());
if (Resources.fitsIn(rc,
reservedContainer.getReservedResource(), cur)) {
canAllocateReservedContainer = true;
break;
}
}
}
if (!canAllocateReservedContainer) {
if (!readOnly) {
// Revert queue preemption quotas
for (RMContainer c : selectedContainers) {
Resource res = getPreemptableResource(c.getQueueName(), partition,
queueToPreemptableResourceByPartition);
if (null == res) {
// This shouldn't happen in normal cases, one possible cause is
// container moved to different queue while executing preemption logic.
// Ignore such failures.
continue;
}
Resources.addTo(res, c.getAllocatedResource());
}
}
return null;
}
float ratio = Resources.ratio(rc, totalSelected,
reservedContainer.getReservedResource());
// Compute preemption score
NodeForPreemption nfp = new NodeForPreemption(ratio + amPreemptionCost,
node, selectedContainers);
return nfp;
} | 3.68 |
hbase_ChaosAgent_initChaosAgent | /***
* sets global params and initiates connection with ZooKeeper then does registration.
* @param conf initial configuration to use
* @param quorum ZK Quorum
* @param agentName AgentName to use
*/
private void initChaosAgent(Configuration conf, String quorum, String agentName) {
this.conf = conf;
this.quorum = quorum;
this.agentName = agentName;
this.retryCounterFactory = new RetryCounterFactory(new RetryCounter.RetryConfig()
.setMaxAttempts(
conf.getInt(ChaosConstants.RETRY_ATTEMPTS_KEY, ChaosConstants.DEFAULT_RETRY_ATTEMPTS))
.setSleepInterval(conf.getLong(ChaosConstants.RETRY_SLEEP_INTERVAL_KEY,
ChaosConstants.DEFAULT_RETRY_SLEEP_INTERVAL)));
try {
this.createZKConnection(null);
this.register();
} catch (IOException e) {
LOG.error("Error Creating Connection: " + e);
}
} | 3.68 |
hbase_HFile_longToInt | // Utility methods.
/*
* @param l Long to convert to an int.
* @return <code>l</code> cast as an int.
*/
static int longToInt(final long l) {
// Expecting the size() of a block not exceeding 4GB. Assuming the
// size() will wrap to negative integer if it exceeds 2GB (From tfile).
return (int) (l & 0x00000000ffffffffL);
} | 3.68 |
flink_SharedStateRegistry_registerReference | /**
* Shortcut for {@link #registerReference(SharedStateRegistryKey, StreamStateHandle, long,
* boolean)} with preventDiscardingCreatedCheckpoint = false.
*/
default StreamStateHandle registerReference(
SharedStateRegistryKey registrationKey, StreamStateHandle state, long checkpointID) {
return registerReference(registrationKey, state, checkpointID, false);
} | 3.68 |
hadoop_TwoColumnLayout_preHead | /**
* Do what needs to be done before the header is rendered. This usually
* involves setting page variables for Javascript and CSS rendering.
* @param html the html to use to render.
*/
protected void preHead(Page.HTML<__> html) {
} | 3.68 |
dubbo_ServiceModel_getServiceMetadata | /**
* @return serviceMetadata
*/
public ServiceMetadata getServiceMetadata() {
return serviceMetadata;
} | 3.68 |
morf_AbstractSqlDialectTest_testDeleteUsingAliasedTable | /**
* Tests a delete referring to an aliased table.
*/
@Test
public void testDeleteUsingAliasedTable() {
DeleteStatement deleteStmt = new DeleteStatement(new TableReference("myDeleteTable").as("stageName"));
String expectedSql = "DELETE FROM " + tableName("myDeleteTable") + " stageName";
assertEquals("Delete with alias", expectedSql, testDialect.convertStatementToSQL(deleteStmt));
} | 3.68 |
flink_SegmentsUtil_bitSet | /**
* set bit from segments.
*
* @param segments target segments.
* @param baseOffset bits base offset.
* @param index bit index from base offset.
*/
public static void bitSet(MemorySegment[] segments, int baseOffset, int index) {
if (segments.length == 1) {
int offset = baseOffset + byteIndex(index);
MemorySegment segment = segments[0];
byte current = segment.get(offset);
current |= (1 << (index & BIT_BYTE_INDEX_MASK));
segment.put(offset, current);
} else {
bitSetMultiSegments(segments, baseOffset, index);
}
} | 3.68 |
hbase_TableIntegrityErrorHandlerImpl_getTableInfo | /**
* {@inheritDoc}
*/
@Override
public HbckTableInfo getTableInfo() {
return ti;
} | 3.68 |
hbase_MasterFileSystem_checkStagingDir | /**
* Check permissions for bulk load staging directory. This directory has special hidden
* permissions. Create it if necessary.
*/
private void checkStagingDir() throws IOException {
Path p = new Path(this.rootdir, HConstants.BULKLOAD_STAGING_DIR_NAME);
try {
if (!this.fs.exists(p)) {
if (!this.fs.mkdirs(p, HiddenDirPerms)) {
throw new IOException("Failed to create staging directory " + p.toString());
}
}
this.fs.setPermission(p, HiddenDirPerms);
} catch (IOException e) {
LOG.error("Failed to create or set permission on staging directory " + p.toString());
throw new IOException(
"Failed to create or set permission on staging directory " + p.toString(), e);
}
} | 3.68 |
flink_RequestFailure_getRequestId | /**
* Returns the request ID responding to.
*
* @return Request ID responding to
*/
public long getRequestId() {
return requestId;
} | 3.68 |
hadoop_FileSystemReleaseFilter_init | /**
* Initializes the filter.
* <p>
* This implementation is a NOP.
*
* @param filterConfig filter configuration.
*
* @throws ServletException thrown if the filter could not be initialized.
*/
@Override
public void init(FilterConfig filterConfig) throws ServletException {
} | 3.68 |
hudi_AbstractTableFileSystemView_listPartitions | /**
* @param partitionPathList A list of pairs of the relative and absolute paths of the partitions.
* @return all the files from the partitions.
* @throws IOException upon error.
*/
protected Map<Pair<String, Path>, FileStatus[]> listPartitions(
List<Pair<String, Path>> partitionPathList) throws IOException {
Map<Pair<String, Path>, FileStatus[]> fileStatusMap = new HashMap<>();
for (Pair<String, Path> partitionPair : partitionPathList) {
Path absolutePartitionPath = partitionPair.getRight();
try {
fileStatusMap.put(partitionPair, metaClient.getFs().listStatus(absolutePartitionPath));
} catch (IOException e) {
// Create the path if it does not exist already
if (!metaClient.getFs().exists(absolutePartitionPath)) {
metaClient.getFs().mkdirs(absolutePartitionPath);
fileStatusMap.put(partitionPair, new FileStatus[0]);
} else {
// in case the partition path was created by another caller
fileStatusMap.put(partitionPair, metaClient.getFs().listStatus(absolutePartitionPath));
}
}
}
return fileStatusMap;
} | 3.68 |
flink_Module_getFunctionDefinition | /**
* Get an optional of {@link FunctionDefinition} by a given name.
*
* <p>It includes hidden functions even though not listed in {@link #listFunctions()}.
*
* @param name name of the {@link FunctionDefinition}.
* @return an optional function definition
*/
default Optional<FunctionDefinition> getFunctionDefinition(String name) {
return Optional.empty();
} | 3.68 |
framework_AbstractDateField_reconstructDateFromFields | /**
* Construct a date object from the individual field values received from
* the client.
*
* @param resolutions
* map of time unit (resolution) name and value, the key is the
* resolution name e.g. "HOUR", "MINUTE", the value can be
* {@code null}
* @param oldDate
* used as a fallback to get needed values if they are not
* defined in the specified {@code resolutions}
*
* @return the date object built from the specified resolutions
* @since 8.2
*/
protected T reconstructDateFromFields(Map<String, Integer> resolutions,
T oldDate) {
Map<R, Integer> calendarFields = new HashMap<>();
for (R resolution : getResolutionsHigherOrEqualTo(getResolution())) {
// Only handle what the client is allowed to send. The same
// resolutions that are painted
String resolutionName = resolution.name();
Integer newValue = resolutions.get(resolutionName);
if (newValue == null) {
newValue = getDatePart(oldDate, resolution);
}
calendarFields.put(resolution, newValue);
}
return buildDate(calendarFields);
} | 3.68 |
shardingsphere-elasticjob_JobScheduleController_resumeJob | /**
* Resume job.
*/
public synchronized void resumeJob() {
try {
if (!scheduler.isShutdown()) {
scheduler.resumeAll();
}
} catch (final SchedulerException ex) {
throw new JobSystemException(ex);
}
} | 3.68 |
flink_SpillChannelManager_registerChannelToBeRemovedAtShutdown | /**
* Adds a channel to the list of channels that are to be removed at shutdown.
*
* @param channel The channel id.
*/
synchronized void registerChannelToBeRemovedAtShutdown(FileIOChannel.ID channel) {
channelsToDeleteAtShutdown.add(channel);
} | 3.68 |
flink_CsvReaderFormat_forPojo | /**
* Builds a new {@code CsvReaderFormat} for reading CSV files mapped to the provided POJO class
* definition. Produced reader uses default mapper and schema settings, use {@code forSchema} if
* you need customizations.
*
* @param pojoType The type class of the POJO.
* @param <T> The type of the returned elements.
*/
public static <T> CsvReaderFormat<T> forPojo(Class<T> pojoType) {
return forSchema(
JacksonMapperFactory::createCsvMapper,
mapper -> mapper.schemaFor(pojoType).withoutQuoteChar(),
TypeInformation.of(pojoType));
} | 3.68 |
querydsl_LiteralExpression_stringValue | /**
* Create a cast to String expression
*
* @see java.lang.Object#toString()
* @return cast expression
*/
public StringExpression stringValue() {
if (stringCast == null) {
stringCast = Expressions.stringOperation(Ops.STRING_CAST, mixin);
}
return stringCast;
} | 3.68 |
hbase_CreateNamespaceProcedure_prepareCreate | /**
* Action before any real action of creating namespace.
* @param env MasterProcedureEnv
*/
private boolean prepareCreate(final MasterProcedureEnv env) throws IOException {
if (getTableNamespaceManager(env).doesNamespaceExist(nsDescriptor.getName())) {
setFailure("master-create-namespace",
new NamespaceExistException("Namespace " + nsDescriptor.getName() + " already exists"));
return false;
}
getTableNamespaceManager(env).validateTableAndRegionCount(nsDescriptor);
checkNamespaceRSGroup(env, nsDescriptor);
return true;
} | 3.68 |
hudi_InLineFSUtils_startOffset | /**
* Returns start offset w/in the base for the block identified by the given InlineFS path
*
* input: "inlinefs://file1/s3a/?start_offset=20&length=40".
* output: 20
*/
public static long startOffset(Path inlineFSPath) {
assertInlineFSPath(inlineFSPath);
String[] slices = inlineFSPath.toString().split("[?&=]");
return Long.parseLong(slices[slices.length - 3]);
} | 3.68 |
hbase_HRegion_isAllFamilies | /** Returns True if passed Set is all families in the region. */
private boolean isAllFamilies(Collection<HStore> families) {
return families == null || this.stores.size() == families.size();
} | 3.68 |
hudi_ImmutablePair_getRight | /**
* {@inheritDoc}
*/
@Override
public R getRight() {
return right;
} | 3.68 |
hbase_HashTable_main | /**
* Main entry point.
*/
public static void main(String[] args) throws Exception {
int ret = ToolRunner.run(new HashTable(HBaseConfiguration.create()), args);
System.exit(ret);
} | 3.68 |
pulsar_ConsumerConfiguration_setCryptoKeyReader | /**
* Sets a {@link CryptoKeyReader}.
*
* @param cryptoKeyReader
* CryptoKeyReader object
*/
public ConsumerConfiguration setCryptoKeyReader(CryptoKeyReader cryptoKeyReader) {
Objects.requireNonNull(cryptoKeyReader);
conf.setCryptoKeyReader(cryptoKeyReader);
return this;
} | 3.68 |
flink_BatchTask_cancelChainedTasks | /**
* Cancels all tasks via their {@link ChainedDriver#cancelTask()} method. Any occurring
* exception and error is suppressed, such that the canceling method of every task is invoked in
* all cases.
*
* @param tasks The tasks to be canceled.
*/
public static void cancelChainedTasks(List<ChainedDriver<?, ?>> tasks) {
for (ChainedDriver<?, ?> task : tasks) {
try {
task.cancelTask();
} catch (Throwable t) {
// do nothing
}
}
} | 3.68 |
dubbo_CollectionUtils_first | /**
* Take the first element from the specified collection
*
* @param values the collection object
* @param <T> the type of element of collection
* @return if found, return the first one, or <code>null</code>
* @since 2.7.6
*/
public static <T> T first(Collection<T> values) {
if (isEmpty(values)) {
return null;
}
if (values instanceof List) {
List<T> list = (List<T>) values;
return list.get(0);
} else {
return values.iterator().next();
}
} | 3.68 |
querydsl_MetaDataExporter_setBeanSuffix | /**
* Override the bean suffix for the classes (default: "")
*
* @param beanSuffix bean suffix for bean-types (default: "")
*/
public void setBeanSuffix(String beanSuffix) {
module.bind(SQLCodegenModule.BEAN_SUFFIX, beanSuffix);
} | 3.68 |
dubbo_PropertySourcesUtils_normalizePrefix | /**
* Normalize the prefix
*
* @param prefix the prefix
* @return the prefix
*/
public static String normalizePrefix(String prefix) {
return prefix.endsWith(".") ? prefix : prefix + ".";
} | 3.68 |
dubbo_FrameworkModel_destroyAll | /**
* Destroy all framework model instances, shutdown dubbo engine completely.
*/
public static void destroyAll() {
synchronized (globalLock) {
for (FrameworkModel frameworkModel : new ArrayList<>(allInstances)) {
frameworkModel.destroy();
}
}
} | 3.68 |
hbase_BucketAllocator_totalCount | /**
* Combined {@link #freeCount()} + {@link #usedCount()}
*/
public long totalCount() {
return totalCount;
} | 3.68 |
flink_SourceOperatorStreamTask_cleanupCheckpoint | /** Remove temporary data about a canceled checkpoint. */
private void cleanupCheckpoint(long checkpointId) {
assert (mailboxProcessor.isMailboxThread());
triggeredCheckpoints.remove(checkpointId);
untriggeredCheckpoints.remove(checkpointId);
maybeResumeProcessing();
} | 3.68 |
morf_NamedParameterPreparedStatement_setString | /**
* Sets the value of a named string parameter.
*
* @param parameter the parameter metadata.
* @param value the parameter value.
* @return this, for method chaining
* @exception SQLException if an error occurs when setting the parameter
*/
public NamedParameterPreparedStatement setString(SqlParameter parameter, final String value) throws SQLException {
forEachOccurrenceOfParameter(parameter, new Operation() {
@Override
public void apply(int parameterIndex) throws SQLException {
// TODO: dialect nullability is deprecated, and should be ousted asap
if (sql.dialect != null && sql.dialect.usesNVARCHARforStrings()) {
statement.setNString(parameterIndex, value);
}
else {
statement.setString(parameterIndex, value);
}
}
});
return this;
} | 3.68 |
graphhopper_VectorTile_getFeaturesBuilderList | /**
* <pre>
* The actual features in this tile.
* </pre>
*
* <code>repeated .vector_tile.Tile.Feature features = 2;</code>
*/
public java.util.List<vector_tile.VectorTile.Tile.Feature.Builder>
getFeaturesBuilderList() {
return getFeaturesFieldBuilder().getBuilderList();
} | 3.68 |
hbase_ThriftUtilities_resultFromHBase | /**
* Creates a {@link TResult} (Thrift) from a {@link Result} (HBase).
* @param in the <code>Result</code> to convert
* @return converted result, returns an empty result if the input is <code>null</code>
*/
public static TResult resultFromHBase(Result in) {
Cell[] raw = in.rawCells();
TResult out = new TResult();
byte[] row = in.getRow();
if (row != null) {
out.setRow(in.getRow());
}
List<TColumnValue> columnValues = new ArrayList<>(raw.length);
for (Cell kv : raw) {
TColumnValue col = new TColumnValue();
col.setFamily(CellUtil.cloneFamily(kv));
col.setQualifier(CellUtil.cloneQualifier(kv));
col.setTimestamp(kv.getTimestamp());
col.setValue(CellUtil.cloneValue(kv));
col.setType(kv.getType().getCode());
if (kv.getTagsLength() > 0) {
col.setTags(PrivateCellUtil.cloneTags(kv));
}
columnValues.add(col);
}
out.setColumnValues(columnValues);
out.setStale(in.isStale());
out.setPartial(in.mayHaveMoreCellsInRow());
return out;
} | 3.68 |
flink_Tuple13_setFields | /**
* Sets new values to all fields of the tuple.
*
* @param f0 The value for field 0
* @param f1 The value for field 1
* @param f2 The value for field 2
* @param f3 The value for field 3
* @param f4 The value for field 4
* @param f5 The value for field 5
* @param f6 The value for field 6
* @param f7 The value for field 7
* @param f8 The value for field 8
* @param f9 The value for field 9
* @param f10 The value for field 10
* @param f11 The value for field 11
* @param f12 The value for field 12
*/
public void setFields(
T0 f0,
T1 f1,
T2 f2,
T3 f3,
T4 f4,
T5 f5,
T6 f6,
T7 f7,
T8 f8,
T9 f9,
T10 f10,
T11 f11,
T12 f12) {
this.f0 = f0;
this.f1 = f1;
this.f2 = f2;
this.f3 = f3;
this.f4 = f4;
this.f5 = f5;
this.f6 = f6;
this.f7 = f7;
this.f8 = f8;
this.f9 = f9;
this.f10 = f10;
this.f11 = f11;
this.f12 = f12;
} | 3.68 |
hbase_RSGroupAdminServiceImpl_fillTables | // for backward compatible
private RSGroupInfo fillTables(RSGroupInfo rsGroupInfo) throws IOException {
return RSGroupUtil.fillTables(rsGroupInfo, master.getTableDescriptors().getAll().values());
} | 3.68 |
flink_DeltaIterationBase_setNextWorkset | /**
* Sets the contract of the step function that represents the next workset. This contract is
* considered one of the two sinks of the step function (the other one being the solution set
* delta).
*
* @param result The contract representing the next workset.
*/
public void setNextWorkset(Operator<WT> result) {
this.nextWorkset = result;
} | 3.68 |
querydsl_DateTimeExpression_minute | /**
* Create a minutes expression (range 0-59)
*
* @return minute
*/
public NumberExpression<Integer> minute() {
if (minutes == null) {
minutes = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.MINUTE, mixin);
}
return minutes;
} | 3.68 |
graphhopper_KVStorage_cutString | /**
* This method limits the specified String value to the length currently accepted for values in the KVStorage.
*/
public static String cutString(String value) {
byte[] bytes = value.getBytes(Helper.UTF_CS);
// See #2609 and test why we use a value < 255
return bytes.length > 250 ? new String(bytes, 0, 250, Helper.UTF_CS) : value;
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_processAndCommit | /**
* Processes commit metadata from data table and commits to metadata table.
*
* @param instantTime instant time of interest.
* @param convertMetadataFunction converter function to convert the respective metadata to List of HoodieRecords to be written to metadata table.
*/
private void processAndCommit(String instantTime, ConvertMetadataFunction convertMetadataFunction) {
Set<String> partitionsToUpdate = getMetadataPartitionsToUpdate();
if (initialized && metadata != null) {
// convert metadata and filter only the entries whose partition path are in partitionsToUpdate
Map<MetadataPartitionType, HoodieData<HoodieRecord>> partitionRecordsMap = convertMetadataFunction.convertMetadata().entrySet().stream()
.filter(entry -> partitionsToUpdate.contains(entry.getKey().getPartitionPath())).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
commit(instantTime, partitionRecordsMap);
}
} | 3.68 |
framework_VCalendar_setListener | /**
* Set the listener that listen to mouse events.
*
* @param mouseEventListener
* The listener to use
*/
public void setListener(MouseEventListener mouseEventListener) {
this.mouseEventListener = mouseEventListener;
} | 3.68 |
hbase_PrivateCellUtil_isDeleteType | /** Returns True if this cell is a {@link KeyValue.Type#Delete} type. */
public static boolean isDeleteType(Cell cell) {
return cell.getTypeByte() == KeyValue.Type.Delete.getCode();
} | 3.68 |
hadoop_OBSFileSystem_getBoundedCopyThreadPool | /**
* Return bounded thread pool for copy.
*
* @return the bounded thread pool for copy
*/
ThreadPoolExecutor getBoundedCopyThreadPool() {
return boundedCopyThreadPool;
} | 3.68 |
hadoop_AbstractS3ACommitter_getJobAttemptPath | /**
* Compute the path where the output of a given job attempt will be placed.
* @param context the context of the job. This is used to get the
* application attempt ID.
* @return the path to store job attempt data.
*/
public Path getJobAttemptPath(JobContext context) {
return getJobAttemptPath(getAppAttemptId(context));
} | 3.68 |
hbase_SnapshotManager_cleanupCompletedRestoreInMap | /**
* Remove the procedures that are marked as finished
*/
private synchronized void cleanupCompletedRestoreInMap() {
ProcedureExecutor<MasterProcedureEnv> procExec = master.getMasterProcedureExecutor();
Iterator<Map.Entry<TableName, Long>> it = restoreTableToProcIdMap.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<TableName, Long> entry = it.next();
Long procId = entry.getValue();
if (procExec.isRunning() && procExec.isFinished(procId)) {
it.remove();
}
}
} | 3.68 |