name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
streampipes_DensityRulesClassifier_getInstance | /**
* Returns the singleton instance for RulebasedBoilerpipeClassifier.
*/
public static DensityRulesClassifier getInstance() {
return INSTANCE;
} | 3.68 |
hbase_RegionCoprocessorHost_preIncrement | /**
* Supports Coprocessor 'bypass'.
* @param increment increment object
* @param edit The WALEdit object.
* @return result to return to client if default operation should be bypassed, null otherwise
* @throws IOException if an error occurred on the coprocessor
*/
public Result preIncrement(final Increment increment, final WALEdit edit) throws IOException {
boolean bypassable = true;
Result defaultResult = null;
if (coprocEnvironments.isEmpty()) {
return defaultResult;
}
return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Result>(
regionObserverGetter, defaultResult, bypassable) {
@Override
public Result call(RegionObserver observer) throws IOException {
return observer.preIncrement(this, increment, edit);
}
});
} | 3.68 |
flink_CompileUtils_cleanUp | /** Triggers internal garbage collection of expired cache entries. */
public static void cleanUp() {
COMPILED_CLASS_CACHE.cleanUp();
COMPILED_EXPRESSION_CACHE.cleanUp();
} | 3.68 |
AreaShop_GithubUpdateCheck_hasFailed | /**
* Check if the update check failed.
* @return true if the update check failed (an error message has been logged)
*/
public boolean hasFailed() {
return error;
} | 3.68 |
hbase_WALPrettyPrinter_beginPersistentOutput | /**
* enables output as a single, persistent list. at present, only relevant in the case of JSON
* output.
*/
public void beginPersistentOutput() {
if (persistentOutput) {
return;
}
persistentOutput = true;
firstTxn = true;
if (outputJSON) {
out.print("[");
}
} | 3.68 |
flink_StateTtlConfig_setTtl | /**
* Sets the ttl time.
*
* @param ttl The ttl time.
*/
@Nonnull
public Builder setTtl(@Nonnull Time ttl) {
this.ttl = ttl;
return this;
} | 3.68 |
hadoop_RegistryDNSServer_processServiceRecord | /**
* Process the service record, parsing the information and creating the
* required DNS records.
* @param path the service record path.
* @param record the record.
* @param command the registry command to execute.
* @throws IOException
*/
private void processServiceRecord(String path, ServiceRecord record,
ManagementCommand command)
throws IOException {
command.exec(path, record);
} | 3.68 |
hbase_BackupAdminImpl_checkIfValidForMerge | /**
* Verifies that backup images are valid for merge.
* <ul>
* <li>All backups MUST be in the same destination
* <li>No FULL backups are allowed - only INCREMENTAL
* <li>All backups must be in COMPLETE state
* <li>No holes in backup list are allowed
* </ul>
* <p>
* @param backupIds list of backup ids
* @param table backup system table
* @throws IOException if the backup image is not valid for merge
*/
private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table)
throws IOException {
String backupRoot = null;
final Set<TableName> allTables = new HashSet<>();
final Set<String> allBackups = new HashSet<>();
long minTime = Long.MAX_VALUE, maxTime = Long.MIN_VALUE;
for (String backupId : backupIds) {
BackupInfo bInfo = table.readBackupInfo(backupId);
if (bInfo == null) {
String msg = "Backup session " + backupId + " not found";
throw new IOException(msg);
}
if (backupRoot == null) {
backupRoot = bInfo.getBackupRootDir();
} else if (!bInfo.getBackupRootDir().equals(backupRoot)) {
throw new IOException("Found different backup destinations in a list of a backup sessions "
+ "\n1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir());
}
if (bInfo.getType() == BackupType.FULL) {
throw new IOException("FULL backup image can not be merged for: \n" + bInfo);
}
if (bInfo.getState() != BackupState.COMPLETE) {
throw new IOException("Backup image " + backupId
+ " can not be merged becuase of its state: " + bInfo.getState());
}
allBackups.add(backupId);
allTables.addAll(bInfo.getTableNames());
long time = bInfo.getStartTs();
if (time < minTime) {
minTime = time;
}
if (time > maxTime) {
maxTime = time;
}
}
final long startRangeTime = minTime;
final long endRangeTime = maxTime;
final String backupDest = backupRoot;
// Check we have no 'holes' in backup id list
// Filter 1 : backupRoot
// Filter 2 : time range filter
// Filter 3 : table filter
BackupInfo.Filter destinationFilter = info -> info.getBackupRootDir().equals(backupDest);
BackupInfo.Filter timeRangeFilter = info -> {
long time = info.getStartTs();
return time >= startRangeTime && time <= endRangeTime;
};
BackupInfo.Filter tableFilter = info -> {
List<TableName> tables = info.getTableNames();
return !Collections.disjoint(allTables, tables);
};
BackupInfo.Filter typeFilter = info -> info.getType() == BackupType.INCREMENTAL;
BackupInfo.Filter stateFilter = info -> info.getState() == BackupState.COMPLETE;
List<BackupInfo> allInfos = table.getBackupHistory(-1, destinationFilter, timeRangeFilter,
tableFilter, typeFilter, stateFilter);
if (allInfos.size() != allBackups.size()) {
// Yes we have at least one hole in backup image sequence
List<String> missingIds = new ArrayList<>();
for (BackupInfo info : allInfos) {
if (allBackups.contains(info.getBackupId())) {
continue;
}
missingIds.add(info.getBackupId());
}
String errMsg =
"Sequence of backup ids has 'holes'. The following backup images must be added:"
+ org.apache.hadoop.util.StringUtils.join(",", missingIds);
throw new IOException(errMsg);
}
} | 3.68 |
shardingsphere-elasticjob_JobAPIFactory_createJobStatisticsAPI | /**
* Create job statistics API.
*
* @param connectString registry center connect string
* @param namespace registry center namespace
* @param digest registry center digest
* @return job statistics API
*/
public static JobStatisticsAPI createJobStatisticsAPI(final String connectString, final String namespace, final String digest) {
return new JobStatisticsAPIImpl(RegistryCenterFactory.createCoordinatorRegistryCenter(connectString, namespace, digest));
} | 3.68 |
flink_FlinkContainersSettings_getJarPaths | /**
* Gets jar paths.
*
* @return The jar paths.
*/
public Collection<String> getJarPaths() {
return jarPaths;
} | 3.68 |
flink_RemoteStreamEnvironment_getHost | /**
* Gets the hostname of the master (JobManager), where the program will be executed.
*
* @return The hostname of the master
*/
public String getHost() {
return configuration.getString(JobManagerOptions.ADDRESS);
} | 3.68 |
hbase_Queue_add | // ======================================================================
// Functions to handle procedure queue
// ======================================================================
public void add(Procedure<?> proc, boolean addToFront) {
if (addToFront) {
runnables.addFirst(proc);
} else {
runnables.addLast(proc);
}
} | 3.68 |
hudi_MarkerUtils_makerToPartitionAndFileID | /**
* Get fileID from full marker path, for example:
* 20210623/0/20210825/932a86d9-5c1d-44c7-ac99-cb88b8ef8478-0_85-15-1390_20220620181735781.parquet.marker.MERGE
* ==> get 20210623/0/20210825/932a86d9-5c1d-44c7-ac99-cb88b8ef8478-0
* @param marker
* @return
*/
public static String makerToPartitionAndFileID(String marker) {
String[] ele = marker.split("_");
return ele[0];
} | 3.68 |
flink_DeltaIteration_isSolutionSetUnManaged | /**
* gets whether the solution set is in managed or unmanaged memory.
*
* @return True, if the solution set is in unmanaged memory (object heap), false if in managed
* memory.
* @see #setSolutionSetUnManaged(boolean)
*/
public boolean isSolutionSetUnManaged() {
return solutionSetUnManaged;
} | 3.68 |
hbase_ForeignException_toStackTrace | /**
* Unwind a serialized array of {@link StackTraceElementMessage}s to a {@link StackTraceElement}s.
* @param traceList list that was serialized
* @return the deserialized list or <tt>null</tt> if it couldn't be unwound (e.g. wasn't set on
* the sender).
*/
private static StackTraceElement[] toStackTrace(List<StackTraceElementMessage> traceList) {
if (traceList == null || traceList.isEmpty()) {
return new StackTraceElement[0]; // empty array
}
StackTraceElement[] trace = new StackTraceElement[traceList.size()];
for (int i = 0; i < traceList.size(); i++) {
StackTraceElementMessage elem = traceList.get(i);
trace[i] = new StackTraceElement(elem.getDeclaringClass(), elem.getMethodName(),
elem.getFileName(), elem.getLineNumber());
}
return trace;
} | 3.68 |
hadoop_AbstractS3ACommitter_getWorkPath | /**
* This is the critical method for {@code FileOutputFormat}; it declares
* the path for work.
* @return the working path.
*/
@Override
public final Path getWorkPath() {
return workPath;
} | 3.68 |
framework_Escalator_createCellElement | /**
* Create and setup an empty cell element.
*
* @param width
* the width of the cell, in pixels
*
* @return a set-up empty cell element
*/
public TableCellElement createCellElement(final double width) {
final TableCellElement cellElem = TableCellElement
.as(DOM.createElement(getCellElementTagName()));
final double height = getDefaultRowHeight();
assert height >= 0 : "defaultRowHeight was negative. There's a setter leak somewhere.";
cellElem.getStyle().setHeight(height, Unit.PX);
if (width >= 0) {
cellElem.getStyle().setWidth(width, Unit.PX);
}
cellElem.addClassName(getStylePrimaryName() + "-cell");
return cellElem;
} | 3.68 |
hudi_TimelineUtils_getDroppedPartitions | /**
* Returns partitions that have been deleted or marked for deletion in the given timeline.
* Does not include internal operations such as clean in the timeline.
*/
public static List<String> getDroppedPartitions(HoodieTimeline timeline) {
HoodieTimeline replaceCommitTimeline = timeline.getWriteTimeline().filterCompletedInstants().getCompletedReplaceTimeline();
return replaceCommitTimeline.getInstantsAsStream().flatMap(instant -> {
try {
HoodieReplaceCommitMetadata commitMetadata = HoodieReplaceCommitMetadata.fromBytes(
replaceCommitTimeline.getInstantDetails(instant).get(), HoodieReplaceCommitMetadata.class);
if (WriteOperationType.DELETE_PARTITION.equals(commitMetadata.getOperationType())) {
Map<String, List<String>> partitionToReplaceFileIds = commitMetadata.getPartitionToReplaceFileIds();
return partitionToReplaceFileIds.keySet().stream();
} else {
return Stream.empty();
}
} catch (IOException e) {
throw new HoodieIOException("Failed to get partitions modified at " + instant, e);
}
}).distinct().filter(partition -> !partition.isEmpty()).collect(Collectors.toList());
} | 3.68 |
pulsar_BrokerMonitor_printLoadReport | // Print the load report in a tabular form for a broker running SimpleLoadManagerImpl.
private synchronized void printLoadReport(final String broker, final LoadReport loadReport) {
loadData.put(broker, loadReport);
// Initialize the constant rows.
final Object[][] rows = new Object[10][];
rows[0] = COUNT_ROW;
rows[2] = RAW_SYSTEM_ROW;
rows[4] = ALLOC_SYSTEM_ROW;
rows[6] = RAW_MESSAGE_ROW;
rows[8] = ALLOC_MESSAGE_ROW;
// First column is a label, so start at the second column at index 1.
// Client count row.
rows[1] = new Object[COUNT_ROW.length];
initRow(rows[1], loadReport.getNumTopics(), loadReport.getNumBundles(), loadReport.getNumProducers(),
loadReport.getNumConsumers(), loadReport.getBundleGains().size(),
loadReport.getBundleLosses().size());
// Raw system row.
final SystemResourceUsage systemResourceUsage = loadReport.getSystemResourceUsage();
final ResourceUsage cpu = systemResourceUsage.getCpu();
final ResourceUsage memory = systemResourceUsage.getMemory();
final ResourceUsage directMemory = systemResourceUsage.getDirectMemory();
final ResourceUsage bandwidthIn = systemResourceUsage.getBandwidthIn();
final ResourceUsage bandwidthOut = systemResourceUsage.getBandwidthOut();
final double maxUsage = Math.max(
Math.max(Math.max(cpu.percentUsage(), memory.percentUsage()),
Math.max(directMemory.percentUsage(), bandwidthIn.percentUsage())),
bandwidthOut.percentUsage());
rows[3] = new Object[RAW_SYSTEM_ROW.length];
initRow(rows[3], cpu.percentUsage(), memory.percentUsage(), directMemory.percentUsage(),
bandwidthIn.percentUsage(), bandwidthOut.percentUsage(), maxUsage);
// Allocated system row.
rows[5] = new Object[ALLOC_SYSTEM_ROW.length];
final double allocatedCpuUsage = percentUsage(loadReport.getAllocatedCPU(), cpu.limit);
final double allocatedMemoryUsage = percentUsage(loadReport.getAllocatedMemory(), memory.limit);
final double allocatedBandwidthInUsage = percentUsage(loadReport.getAllocatedBandwidthIn(),
bandwidthIn.limit);
final double allocatedBandwidthOutUsage = percentUsage(loadReport.getAllocatedBandwidthOut(),
bandwidthOut.limit);
final double maxAllocatedUsage = Math.max(
Math.max(Math.max(allocatedCpuUsage, allocatedMemoryUsage), allocatedBandwidthInUsage),
allocatedBandwidthOutUsage);
initRow(rows[5], allocatedCpuUsage, allocatedMemoryUsage, null, allocatedBandwidthInUsage,
allocatedBandwidthOutUsage, maxAllocatedUsage);
// Raw message row.
rows[7] = new Object[RAW_MESSAGE_ROW.length];
initMessageRow(rows[7], loadReport.getMsgRateIn(), loadReport.getMsgRateOut(), bandwidthIn.usage,
bandwidthOut.usage);
// Allocated message row.
rows[9] = new Object[ALLOC_MESSAGE_ROW.length];
initMessageRow(rows[9], loadReport.getAllocatedMsgRateIn(), loadReport.getAllocatedMsgRateOut(),
loadReport.getAllocatedBandwidthIn(), loadReport.getAllocatedBandwidthOut());
final String table = localTableMaker.make(rows);
log.info("\nLoad Report for {}:\n{}\n", broker, table);
} | 3.68 |
flink_BulkIterationBase_setBroadcastVariable | /**
* The BulkIteration meta operator cannot have broadcast inputs. This method always throws an
* exception.
*
* @param name Ignored.
* @param root Ignored.
*/
public void setBroadcastVariable(String name, Operator<?> root) {
throw new UnsupportedOperationException(
"The BulkIteration meta operator cannot have broadcast inputs.");
} | 3.68 |
hbase_MasterRpcServices_unassigns | /**
* A 'raw' version of unassign that does bulk and can skirt Master state checks if override is
* set; i.e. unassigns can be forced during Master startup or if RegionState is unclean. Used by
* HBCK2.
*/
@Override
public MasterProtos.UnassignsResponse unassigns(RpcController controller,
MasterProtos.UnassignsRequest request) throws ServiceException {
checkMasterProcedureExecutor();
final ProcedureExecutor<MasterProcedureEnv> pe = server.getMasterProcedureExecutor();
final AssignmentManager am = server.getAssignmentManager();
MasterProtos.UnassignsResponse.Builder responseBuilder =
MasterProtos.UnassignsResponse.newBuilder();
final boolean override = request.getOverride();
LOG.info("{} unassigns, override={}", server.getClientIdAuditPrefix(), override);
for (HBaseProtos.RegionSpecifier rs : request.getRegionList()) {
final RegionInfo info = getRegionInfo(rs);
if (info == null) {
LOG.info("Unknown region {}", rs);
continue;
}
responseBuilder.addPid(Optional.ofNullable(am.createOneUnassignProcedure(info, override))
.map(pe::submitProcedure).orElse(Procedure.NO_PROC_ID));
}
return responseBuilder.build();
} | 3.68 |
framework_GenericFontIcon_equals | /*
* (non-Javadoc)
*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof GenericFontIcon)) {
return false;
}
GenericFontIcon other = (GenericFontIcon) obj;
if (codePoint != other.codePoint) {
return false;
}
if (fontFamily == null) {
if (other.fontFamily != null) {
return false;
}
} else if (!fontFamily.equals(other.fontFamily)) {
return false;
}
return true;
} | 3.68 |
flink_TableConfigUtils_isOperatorDisabled | /**
* Returns whether the given operator type is disabled.
*
* @param tableConfig TableConfig object
* @param operatorType operator type to check
* @return true if the given operator is disabled.
*/
public static boolean isOperatorDisabled(TableConfig tableConfig, OperatorType operatorType) {
String value = tableConfig.get(TABLE_EXEC_DISABLED_OPERATORS);
if (value == null) {
return false;
}
String[] operators = value.split(",");
Set<OperatorType> operatorSets = new HashSet<>();
for (String operator : operators) {
operator = operator.trim();
if (operator.isEmpty()) {
continue;
}
if (operator.equals("HashJoin")) {
operatorSets.add(OperatorType.BroadcastHashJoin);
operatorSets.add(OperatorType.ShuffleHashJoin);
} else {
operatorSets.add(OperatorType.valueOf(operator));
}
}
return operatorSets.contains(operatorType);
} | 3.68 |
framework_AbstractJavaScriptComponent_callFunction | /**
* Invoke a named function that the connector JavaScript has added to the
* JavaScript connector wrapper object. The arguments can be any boxed
* primitive type, String, {@link JsonValue} or arrays of any other
* supported type. Complex types (e.g. List, Set, Map, Connector or any
* JavaBean type) must be explicitly serialized to a {@link JsonValue}
* before sending. This can be done either with
* {@link JsonCodec#encode(Object, JsonValue, java.lang.reflect.Type, com.vaadin.ui.ConnectorTracker)}
* or using the factory methods in {@link Json}.
*
* @param name
* the name of the function
* @param arguments
* function arguments
*/
protected void callFunction(String name, Object... arguments) {
callbackHelper.invokeCallback(name, arguments);
} | 3.68 |
hmily_DelegationThreadPoolExecutor_onInitialRejection | /**
* On initial rejection.
*
* @param runnable the runnable
*/
private void onInitialRejection(final Runnable runnable) {
LOGGER.info("DelegationThreadPoolExecutor:thread {} rejection", runnable);
} | 3.68 |
flink_HiveTableUtil_createHiveColumns | /** Create Hive columns from Flink ResolvedSchema. */
public static List<FieldSchema> createHiveColumns(ResolvedSchema schema) {
String[] fieldNames = schema.getColumnNames().toArray(new String[0]);
DataType[] fieldTypes = schema.getColumnDataTypes().toArray(new DataType[0]);
List<FieldSchema> columns = new ArrayList<>(fieldNames.length);
for (int i = 0; i < fieldNames.length; i++) {
columns.add(
new FieldSchema(
fieldNames[i],
HiveTypeUtil.toHiveTypeInfo(fieldTypes[i], true).getTypeName(),
null));
}
return columns;
} | 3.68 |
hbase_TableMapReduceUtil_buildDependencyClasspath | /**
* Returns a classpath string built from the content of the "tmpjars" value in {@code conf}. Also
* exposed to shell scripts via `bin/hbase mapredcp`.
*/
public static String buildDependencyClasspath(Configuration conf) {
if (conf == null) {
throw new IllegalArgumentException("Must provide a configuration object.");
}
Set<String> paths = new HashSet<>(conf.getStringCollection("tmpjars"));
if (paths.isEmpty()) {
throw new IllegalArgumentException("Configuration contains no tmpjars.");
}
StringBuilder sb = new StringBuilder();
for (String s : paths) {
// entries can take the form 'file:/path/to/file.jar'.
int idx = s.indexOf(":");
if (idx != -1) s = s.substring(idx + 1);
if (sb.length() > 0) sb.append(File.pathSeparator);
sb.append(s);
}
return sb.toString();
} | 3.68 |
hadoop_S3ClientFactory_getPathUri | /**
* Get the full s3 path.
* added in HADOOP-18330
* @return path URI
*/
public URI getPathUri() {
return pathUri;
} | 3.68 |
hudi_AvroSchemaUtils_createNullableSchema | /**
* Creates schema following Avro's typical nullable schema definition: {@code Union(Schema.Type.NULL, <NonNullType>)},
* wrapping around provided target non-null type
*/
public static Schema createNullableSchema(Schema.Type avroType) {
return createNullableSchema(Schema.create(avroType));
} | 3.68 |
morf_OracleDialect_getSqlForDateToYyyymmdd | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForDateToYyyymmdd(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForDateToYyyymmdd(Function function) {
return String.format("TO_NUMBER(TO_CHAR(%s, 'yyyymmdd'))",getSqlFrom(function.getArguments().get(0)));
} | 3.68 |
hudi_MarkerUtils_deleteMarkerTypeFile | /**
* Deletes `MARKERS.type` file.
*
* @param fileSystem file system to use.
* @param markerDir marker directory.
*/
public static void deleteMarkerTypeFile(FileSystem fileSystem, String markerDir) {
Path markerTypeFilePath = new Path(markerDir, MARKER_TYPE_FILENAME);
try {
fileSystem.delete(markerTypeFilePath, false);
} catch (IOException e) {
throw new HoodieIOException("Cannot delete marker type file " + markerTypeFilePath.toString()
+ "; " + e.getMessage(), e);
}
} | 3.68 |
framework_GridConnector_setRenderer | /**
* Sets a new renderer for this column object
*
* @param rendererConnector
* a renderer connector object
*/
public void setRenderer(
AbstractGridRendererConnector<Object> rendererConnector) {
setRenderer(rendererConnector.getRenderer());
this.rendererConnector = rendererConnector;
} | 3.68 |
hadoop_AbstractOperationAuditor_init | /**
* Sets the IOStats and then calls init().
* @param opts options to initialize with.
*/
@Override
public void init(final OperationAuditorOptions opts) {
this.options = opts;
this.iostatistics = opts.getIoStatisticsStore();
init(opts.getConfiguration());
} | 3.68 |
hadoop_IOStatisticsBinding_publishAsStorageStatistics | /**
* Publish the IOStatistics as a set of storage statistics.
* This is dynamic.
* @param name storage statistics name.
* @param scheme FS scheme; may be null.
* @param source IOStatistics source.
* @return a dynamic storage statistics object.
*/
public static StorageStatistics publishAsStorageStatistics(
String name, String scheme, IOStatistics source) {
return new StorageStatisticsFromIOStatistics(name, scheme, source);
} | 3.68 |
flink_CatalogManager_listTables | /**
* Returns an array of names of all tables (tables and views, both temporary and permanent)
* registered in the namespace of the given catalog and database.
*
* @return names of all registered tables
*/
public Set<String> listTables(String catalogName, String databaseName) {
Catalog catalog = getCatalogOrThrowException(catalogName);
if (catalog == null) {
throw new ValidationException(String.format("Catalog %s does not exist", catalogName));
}
try {
return Stream.concat(
catalog.listTables(databaseName).stream(),
listTemporaryTablesInternal(catalogName, databaseName)
.map(e -> e.getKey().getObjectName()))
.collect(Collectors.toSet());
} catch (DatabaseNotExistException e) {
throw new ValidationException(
String.format("Database %s does not exist", databaseName), e);
}
} | 3.68 |
hbase_FileLink_exists | /** Returns true if the file pointed by the link exists */
public boolean exists(final FileSystem fs) throws IOException {
for (int i = 0; i < locations.length; ++i) {
if (fs.exists(locations[i])) {
return true;
}
}
return false;
} | 3.68 |
framework_ContainerOrderedWrapper_isLastId | /*
* Tests if the given item is the last item in the container Don't add a
* JavaDoc comment here, we use the default documentation from implemented
* interface.
*/
@Override
public boolean isLastId(Object itemId) {
if (ordered) {
return ((Container.Ordered) container).isLastId(itemId);
}
return last != null && last.equals(itemId);
} | 3.68 |
hbase_StoreFileReader_getRefCount | /**
* Return the ref count associated with the reader whenever a scanner associated with the reader
* is opened.
*/
int getRefCount() {
return storeFileInfo.getRefCount();
} | 3.68 |
flink_ChangelogKeyedStateBackend_notifyCheckpointComplete | // -------------------- CheckpointListener --------------------------------
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
if (lastCheckpointId == checkpointId) {
// Notify the writer so that it can re-use the previous uploads. Do NOT notify it about
// a range status change if it is not relevant anymore. Otherwise, it could CONFIRM a
// newer upload instead of the previous one. This newer upload could then be re-used
// while in fact JM has discarded its results.
// This might change if the log ownership changes (the method won't likely be needed).
stateChangelogWriter.confirm(lastUploadedFrom, lastUploadedTo, checkpointId);
}
Long materializationID = materializationIdByCheckpointId.remove(checkpointId);
if (materializationID != null) {
if (materializationID > lastConfirmedMaterializationId) {
keyedStateBackend.notifyCheckpointComplete(materializationID);
lastConfirmedMaterializationId = materializationID;
}
}
materializationIdByCheckpointId.headMap(checkpointId, true).clear();
} | 3.68 |
zxing_FinderPatternFinder_crossCheckHorizontal | /**
* <p>Like {@link #crossCheckVertical(int, int, int, int)}, and in fact is basically identical,
* except it reads horizontally instead of vertically. This is used to cross-cross
* check a vertical cross check and locate the real center of the alignment pattern.</p>
*/
private float crossCheckHorizontal(int startJ, int centerI, int maxCount,
int originalStateCountTotal) {
BitMatrix image = this.image;
int maxJ = image.getWidth();
int[] stateCount = getCrossCheckStateCount();
int j = startJ;
while (j >= 0 && image.get(j, centerI)) {
stateCount[2]++;
j--;
}
if (j < 0) {
return Float.NaN;
}
while (j >= 0 && !image.get(j, centerI) && stateCount[1] <= maxCount) {
stateCount[1]++;
j--;
}
if (j < 0 || stateCount[1] > maxCount) {
return Float.NaN;
}
while (j >= 0 && image.get(j, centerI) && stateCount[0] <= maxCount) {
stateCount[0]++;
j--;
}
if (stateCount[0] > maxCount) {
return Float.NaN;
}
j = startJ + 1;
while (j < maxJ && image.get(j, centerI)) {
stateCount[2]++;
j++;
}
if (j == maxJ) {
return Float.NaN;
}
while (j < maxJ && !image.get(j, centerI) && stateCount[3] < maxCount) {
stateCount[3]++;
j++;
}
if (j == maxJ || stateCount[3] >= maxCount) {
return Float.NaN;
}
while (j < maxJ && image.get(j, centerI) && stateCount[4] < maxCount) {
stateCount[4]++;
j++;
}
if (stateCount[4] >= maxCount) {
return Float.NaN;
}
// If we found a finder-pattern-like section, but its size is significantly different than
// the original, assume it's a false positive
int stateCountTotal = stateCount[0] + stateCount[1] + stateCount[2] + stateCount[3] +
stateCount[4];
if (5 * Math.abs(stateCountTotal - originalStateCountTotal) >= originalStateCountTotal) {
return Float.NaN;
}
return foundPatternCross(stateCount) ? centerFromEnd(stateCount, j) : Float.NaN;
} | 3.68 |
hadoop_InstrumentedWriteLock_startLockTiming | /**
* Starts timing for the instrumented write lock.
*/
@Override
protected void startLockTiming() {
if (readWriteLock.getWriteHoldCount() == 1) {
writeLockHeldTimeStamp = getTimer().monotonicNow();
}
} | 3.68 |
flink_MathUtils_longToIntWithBitMixing | /**
* Pseudo-randomly maps a long (64-bit) to an integer (32-bit) using some bit-mixing for better
* distribution.
*
* @param in the long (64-bit)input.
* @return the bit-mixed int (32-bit) output
*/
public static int longToIntWithBitMixing(long in) {
in = (in ^ (in >>> 30)) * 0xbf58476d1ce4e5b9L;
in = (in ^ (in >>> 27)) * 0x94d049bb133111ebL;
in = in ^ (in >>> 31);
return (int) in;
} | 3.68 |
flink_DataStream_sinkTo | /**
* Adds the given {@link Sink} to this DataStream. Only streams with sinks added will be
* executed once the {@link StreamExecutionEnvironment#execute()} method is called.
*
* <p>This method is intended to be used only to recover a snapshot where no uids have been set
* before taking the snapshot.
*
* @param customSinkOperatorUidHashes operator hashes to support state binding
* @param sink The user defined sink.
* @return The closed DataStream.
*/
@PublicEvolving
public DataStreamSink<T> sinkTo(
Sink<T> sink, CustomSinkOperatorUidHashes customSinkOperatorUidHashes) {
// read the output type of the input Transform to coax out errors about MissingTypeInfo
transformation.getOutputType();
return DataStreamSink.forSink(this, sink, customSinkOperatorUidHashes);
} | 3.68 |
hudi_CleanPlanner_isFileSliceNeededForPendingCompaction | /**
* Determine if file slice needed to be preserved for pending compaction.
*
* @param fileSlice File Slice
* @return true if file slice needs to be preserved, false otherwise.
*/
private boolean isFileSliceNeededForPendingCompaction(FileSlice fileSlice) {
CompactionOperation op = fgIdToPendingCompactionOperations.get(fileSlice.getFileGroupId());
if (null != op) {
// If file slice's instant time is newer or same as that of operation, do not clean
return HoodieTimeline.compareTimestamps(fileSlice.getBaseInstantTime(), HoodieTimeline.GREATER_THAN_OR_EQUALS, op.getBaseInstantTime()
);
}
return false;
} | 3.68 |
hadoop_FederationMembershipStateStoreInputValidator_validate | /**
* Quick validation on the input to check some obvious fail conditions (fail
* fast). Check if the provided {@link GetSubClusterInfoRequest} for querying
* subcluster's information is valid or not.
*
* @param request the {@link GetSubClusterInfoRequest} to validate against
* @throws FederationStateStoreInvalidInputException if the request is invalid
*/
public static void validate(GetSubClusterInfoRequest request)
throws FederationStateStoreInvalidInputException {
// check if the request is present
if (request == null) {
String message = "Missing GetSubClusterInfo Request."
+ " Please try again by specifying a Get SubCluster information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// validate subcluster id
checkSubClusterId(request.getSubClusterId());
} | 3.68 |
hbase_DictionaryCache_loadFromResource | // Visible for testing
public static byte[] loadFromResource(final Configuration conf, final String s, final int maxSize)
throws IOException {
if (!s.startsWith(RESOURCE_SCHEME)) {
throw new IOException("Path does not start with " + RESOURCE_SCHEME);
}
final String path = s.substring(RESOURCE_SCHEME.length(), s.length());
LOG.info("Loading resource {}", path);
final InputStream in = DictionaryCache.class.getClassLoader().getResourceAsStream(path);
if (in == null) {
throw new FileNotFoundException("Resource " + path + " not found");
}
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
final byte[] buffer = new byte[8192];
int n, len = 0;
do {
n = in.read(buffer);
if (n > 0) {
len += n;
if (len > maxSize) {
throw new IOException("Dictionary " + s + " is too large, limit=" + maxSize);
}
baos.write(buffer, 0, n);
}
} while (n > 0);
} finally {
in.close();
}
return baos.toByteArray();
} | 3.68 |
framework_HierarchyRenderer_setInnerRenderer | /**
* Sets the renderer to be wrapped. This is the original renderer before
* hierarchy is applied.
*
* @param innerRenderer
* Renderer to be wrapped.
*/
@SuppressWarnings("rawtypes")
public void setInnerRenderer(Renderer innerRenderer) {
this.innerRenderer = innerRenderer;
} | 3.68 |
hbase_DisableTableProcedure_setTableStateToDisabled | /**
* Mark table state to Disabled
* @param env MasterProcedureEnv
*/
protected static void setTableStateToDisabled(final MasterProcedureEnv env,
final TableName tableName) throws IOException {
// Flip the table to disabled
env.getMasterServices().getTableStateManager().setTableState(tableName,
TableState.State.DISABLED);
LOG.info("Set {} to state={}", tableName, TableState.State.DISABLED);
} | 3.68 |
hadoop_Chunk_writeChunk | /**
* Write out a chunk.
*
* @param chunk
* The chunk buffer.
* @param offset
* Offset to chunk buffer for the beginning of chunk.
* @param len
* @param last
* Is this the last call to flushBuffer?
*/
private void writeChunk(byte[] chunk, int offset, int len, boolean last)
throws IOException {
if (last) { // always write out the length for the last chunk.
Utils.writeVInt(out, len);
if (len > 0) {
out.write(chunk, offset, len);
}
} else {
if (len > 0) {
Utils.writeVInt(out, -len);
out.write(chunk, offset, len);
}
}
} | 3.68 |
hbase_HMobStore_commitFile | /**
* Commits the mob file.
* @param sourceFile The source file.
* @param targetPath The directory path where the source file is renamed to.
*/
public void commitFile(final Path sourceFile, Path targetPath) throws IOException {
if (sourceFile == null) {
return;
}
Path dstPath = new Path(targetPath, sourceFile.getName());
validateMobFile(sourceFile);
if (sourceFile.equals(targetPath)) {
LOG.info("File is already in the destination dir: {}", sourceFile);
return;
}
LOG.info(" FLUSH Renaming flushed file from {} to {}", sourceFile, dstPath);
Path parent = dstPath.getParent();
if (!getFileSystem().exists(parent)) {
getFileSystem().mkdirs(parent);
}
if (!getFileSystem().rename(sourceFile, dstPath)) {
throw new IOException("Failed rename of " + sourceFile + " to " + dstPath);
}
} | 3.68 |
flink_PythonTableUtils_getInputFormat | /**
* Wrap the unpickled python data with an InputFormat. It will be passed to
* PythonDynamicTableSource later.
*
* @param data The unpickled python data.
* @param dataType The python data type.
* @return An InputFormat containing the python data.
*/
public static InputFormat<RowData, ?> getInputFormat(
final List<Object[]> data, final DataType dataType) {
Function<Object, Object> converter = converter(dataType.getLogicalType());
Collection<RowData> dataCollection =
data.stream()
.map(objects -> (RowData) converter.apply(objects))
.collect(Collectors.toList());
return new CollectionInputFormat<>(
dataCollection, InternalSerializers.create(dataType.getLogicalType()));
} | 3.68 |
framework_StaticSection_getStyleName | /**
* Returns the custom style name for this cell.
*
* @return the style name or null if no style name has been set
*/
public String getStyleName() {
return cellState.styleName;
} | 3.68 |
flink_SubtaskStateStats_getAckTimestamp | /**
* Returns the timestamp when the acknowledgement of this subtask was received at the
* coordinator.
*
* @return ACK timestamp at the coordinator.
*/
public long getAckTimestamp() {
return ackTimestamp;
} | 3.68 |
hbase_GlobalMetricRegistriesAdapter_init | /**
* Make sure that this global MetricSource for hbase-metrics module based metrics are initialized.
* This should be called only once.
*/
public static GlobalMetricRegistriesAdapter init() {
return new GlobalMetricRegistriesAdapter();
} | 3.68 |
framework_LayoutDependencyTree_hasVerticaConnectorToLayout | /**
* Returns whether there are any managed layouts waiting for vertical
* layouting.
*
* @return {@code true} if vertical layouting queue is not empty,
* {@code false} otherwise
*/
public boolean hasVerticaConnectorToLayout() {
return !getLayoutQueue(VERTICAL).isEmpty();
} | 3.68 |
flink_ExtractionUtils_isAssignable | /**
* Checks if one {@code Class} can be assigned to a variable of another {@code Class}.
*
* <p>Unlike the {@link Class#isAssignableFrom(java.lang.Class)} method, this method takes into
* account widenings of primitive classes and {@code null}s.
*
* <p>Primitive widenings allow an int to be assigned to a long, float or double. This method
* returns the correct result for these cases.
*
* <p>{@code Null} may be assigned to any reference type. This method will return {@code true}
* if {@code null} is passed in and the toClass is non-primitive.
*
* <p>Specifically, this method tests whether the type represented by the specified {@code
* Class} parameter can be converted to the type represented by this {@code Class} object via an
* identity conversion widening primitive or widening reference conversion. See <em><a
* href="http://docs.oracle.com/javase/specs/">The Java Language Specification</a></em>,
* sections 5.1.1, 5.1.2 and 5.1.4 for details.
*
* @param cls the Class to check, may be null
* @param toClass the Class to try to assign into, returns false if null
* @param autoboxing whether to use implicit autoboxing/unboxing between primitives and wrappers
* @return {@code true} if assignment possible
*/
public static boolean isAssignable(
Class<?> cls, final Class<?> toClass, final boolean autoboxing) {
if (toClass == null) {
return false;
}
// have to check for null, as isAssignableFrom doesn't
if (cls == null) {
return !toClass.isPrimitive();
}
// autoboxing:
if (autoboxing) {
if (cls.isPrimitive() && !toClass.isPrimitive()) {
cls = primitiveToWrapper(cls);
if (cls == null) {
return false;
}
}
if (toClass.isPrimitive() && !cls.isPrimitive()) {
cls = wrapperToPrimitive(cls);
if (cls == null) {
return false;
}
}
}
if (cls.equals(toClass)) {
return true;
}
if (cls.isPrimitive()) {
if (!toClass.isPrimitive()) {
return false;
}
if (Integer.TYPE.equals(cls)) {
return Long.TYPE.equals(toClass)
|| Float.TYPE.equals(toClass)
|| Double.TYPE.equals(toClass);
}
if (Long.TYPE.equals(cls)) {
return Float.TYPE.equals(toClass) || Double.TYPE.equals(toClass);
}
if (Boolean.TYPE.equals(cls)) {
return false;
}
if (Double.TYPE.equals(cls)) {
return false;
}
if (Float.TYPE.equals(cls)) {
return Double.TYPE.equals(toClass);
}
if (Character.TYPE.equals(cls)) {
return Integer.TYPE.equals(toClass)
|| Long.TYPE.equals(toClass)
|| Float.TYPE.equals(toClass)
|| Double.TYPE.equals(toClass);
}
if (Short.TYPE.equals(cls)) {
return Integer.TYPE.equals(toClass)
|| Long.TYPE.equals(toClass)
|| Float.TYPE.equals(toClass)
|| Double.TYPE.equals(toClass);
}
if (Byte.TYPE.equals(cls)) {
return Short.TYPE.equals(toClass)
|| Integer.TYPE.equals(toClass)
|| Long.TYPE.equals(toClass)
|| Float.TYPE.equals(toClass)
|| Double.TYPE.equals(toClass);
}
// should never get here
return false;
}
return toClass.isAssignableFrom(cls);
} | 3.68 |
hadoop_ServiceRecord_findByAPI | /**
* Find an endpoint by its API
* @param list list
* @param api api name
* @return the endpoint or null if there was no match
*/
private Endpoint findByAPI(List<Endpoint> list, String api) {
for (Endpoint endpoint : list) {
if (endpoint.api.equals(api)) {
return endpoint;
}
}
return null;
} | 3.68 |
hadoop_OBSBlockOutputStream_putObject | /**
* Upload the current block as a single PUT request; if the buffer is empty a
* 0-byte PUT will be invoked, as it is needed to create an entry at the far
* end.
*
* @throws IOException any problem.
*/
private synchronized void putObject() throws IOException {
LOG.debug("Executing regular upload for {}",
writeOperationHelper.toString(key));
final OBSDataBlocks.DataBlock block = getActiveBlock();
clearActiveBlock();
final int size = block.dataSize();
final PutObjectRequest putObjectRequest;
if (block instanceof OBSDataBlocks.DiskBlock) {
putObjectRequest = writeOperationHelper.newPutRequest(key,
(File) block.startUpload());
} else {
putObjectRequest =
writeOperationHelper.newPutRequest(key,
(InputStream) block.startUpload(), size);
}
putObjectRequest.setAcl(fs.getCannedACL());
fs.getSchemeStatistics().incrementWriteOps(1);
try {
// the putObject call automatically closes the input
// stream afterwards.
writeOperationHelper.putObject(putObjectRequest);
} finally {
OBSCommonUtils.closeAll(block);
}
} | 3.68 |
hudi_AbstractHoodieLogRecordReader_isNewInstantBlock | /**
* Checks if the current logblock belongs to a later instant.
*/
private boolean isNewInstantBlock(HoodieLogBlock logBlock) {
return currentInstantLogBlocks.size() > 0 && currentInstantLogBlocks.peek().getBlockType() != CORRUPT_BLOCK
&& !logBlock.getLogBlockHeader().get(INSTANT_TIME)
.contentEquals(currentInstantLogBlocks.peek().getLogBlockHeader().get(INSTANT_TIME));
} | 3.68 |
hadoop_RequestFactoryImpl_withCannedACL | /**
* ACL For new objects.
* @param value new value
* @return the builder
*/
public RequestFactoryBuilder withCannedACL(
final String value) {
cannedACL = value;
return this;
} | 3.68 |
flink_MessageSerializer_deserializeRequestFailure | /**
* De-serializes the {@link RequestFailure} sent to the {@link
* org.apache.flink.queryablestate.network.Client} in case of protocol related errors.
*
* <pre>
* <b>The buffer is expected to be at the correct position.</b>
* </pre>
*
* @param buf The {@link ByteBuf} containing the serialized failure message.
* @return The failure message.
*/
public static RequestFailure deserializeRequestFailure(final ByteBuf buf)
throws IOException, ClassNotFoundException {
long requestId = buf.readLong();
Throwable cause;
try (ByteBufInputStream bis = new ByteBufInputStream(buf);
ObjectInputStream in = new ObjectInputStream(bis)) {
cause = (Throwable) in.readObject();
}
return new RequestFailure(requestId, cause);
} | 3.68 |
framework_DataCommunicator_setFilter | /**
* Sets the filter for this DataCommunicator. This method is used by user
* through the consumer method from {@link #setDataProvider} and should not
* be called elsewhere.
*
* @param filter
* the filter
*
* @param <F>
* the filter type
*
* @since 8.1
*/
protected <F> void setFilter(F filter) {
this.filter = filter;
} | 3.68 |
framework_VRadioButtonGroup_updateItemEnabled | /**
* Updates the enabled state of a radio button.
*
* @param radioButton
* the radio button to update
* @param value
* {@code true} if enabled; {@code false} if not
*
* @since 8.3.3
*/
protected void updateItemEnabled(RadioButton radioButton, boolean value) {
boolean enabled = value && !isReadonly() && isEnabled();
radioButton.setEnabled(enabled);
// #9258 apply the v-disabled class when disabled for UX
boolean hasDisabledStyle = !isEnabled() || !value;
radioButton.setStyleName(StyleConstants.DISABLED, hasDisabledStyle);
} | 3.68 |
hibernate-validator_NotEmptyValidatorForMap_isValid | /**
* Checks the map is not {@code null} and not empty.
*
* @param map the map to validate
* @param constraintValidatorContext context in which the constraint is evaluated
* @return returns {@code true} if the map is not {@code null} and the map is not empty
*/
@Override
public boolean isValid(Map map, ConstraintValidatorContext constraintValidatorContext) {
if ( map == null ) {
return false;
}
return map.size() > 0;
} | 3.68 |
morf_SelectStatementBuilder_distinct | /**
* Use DISTINCT.
*
* @return this, for method chaining.
*/
public SelectStatementBuilder distinct() {
this.distinct = true;
return this;
} | 3.68 |
hadoop_ContainerReapContext_setContainer | /**
* Set the container within the context.
*
* @param container the {@link Container}.
* @return the Builder with the container set.
*/
public Builder setContainer(Container container) {
this.builderContainer = container;
return this;
} | 3.68 |
framework_VScrollTable_getHeaderPadding | /**
* Returns the extra space that is given to the header column when column
* width is determined by header text.
*
* @return extra space in pixels
*/
private int getHeaderPadding() {
return scrollBody.getCellExtraWidth();
} | 3.68 |
shardingsphere-elasticjob_RegistryCenterFactory_createCoordinatorRegistryCenter | /**
* Create a {@link CoordinatorRegistryCenter} or return the existing one if there is one set up with the same {@code connectionString}, {@code namespace} and {@code digest} already.
*
* @param connectString registry center connect string
* @param namespace registry center namespace
* @param digest registry center digest
* @return registry center
*/
public static CoordinatorRegistryCenter createCoordinatorRegistryCenter(final String connectString, final String namespace, final String digest) {
Hasher hasher = Hashing.sha256().newHasher().putString(connectString, StandardCharsets.UTF_8).putString(namespace, StandardCharsets.UTF_8);
if (!Strings.isNullOrEmpty(digest)) {
hasher.putString(digest, StandardCharsets.UTF_8);
}
HashCode hashCode = hasher.hash();
return REG_CENTER_REGISTRY.computeIfAbsent(hashCode, unused -> {
CoordinatorRegistryCenter result = newCoordinatorRegistryCenter(connectString, namespace, digest);
result.init();
return result;
});
} | 3.68 |
Activiti_BpmnDeploymentHelper_getMostRecentVersionOfProcessDefinition | /**
* Gets the most recent persisted process definition that matches this one for tenant and key.
* If none is found, returns null. This method assumes that the tenant and key are properly
* set on the process definition entity.
*/
public ProcessDefinitionEntity getMostRecentVersionOfProcessDefinition(ProcessDefinitionEntity processDefinition) {
String key = processDefinition.getKey();
String tenantId = processDefinition.getTenantId();
ProcessDefinitionEntityManager processDefinitionManager
= Context.getCommandContext().getProcessEngineConfiguration().getProcessDefinitionEntityManager();
ProcessDefinitionEntity existingDefinition = null;
if (tenantId != null && !tenantId.equals(ProcessEngineConfiguration.NO_TENANT_ID)) {
existingDefinition = processDefinitionManager.findLatestProcessDefinitionByKeyAndTenantId(key, tenantId);
} else {
existingDefinition = processDefinitionManager.findLatestProcessDefinitionByKey(key);
}
return existingDefinition;
} | 3.68 |
dubbo_AccessLogData_setInvocationTime | /**
* Set the invocation date. As an argument it accept date string.
*
* @param invocationTime
*/
public void setInvocationTime(Date invocationTime) {
set(INVOCATION_TIME, invocationTime);
} | 3.68 |
hbase_QuotaTableUtil_makeQuotaSnapshotScanForTable | /**
* Creates a {@link Scan} which returns only {@link SpaceQuotaSnapshot} from the quota table for a
* specific table.
* @param tn Optionally, a table name to limit the scan's rowkey space. Can be null.
*/
public static Scan makeQuotaSnapshotScanForTable(TableName tn) {
Scan s = new Scan();
// Limit to "u:v" column
s.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY);
if (null == tn) {
s.setStartStopRowForPrefixScan(QUOTA_TABLE_ROW_KEY_PREFIX);
} else {
byte[] row = getTableRowKey(tn);
// Limit rowspace to the "t:" prefix
s.withStartRow(row, true).withStopRow(row, true);
}
return s;
} | 3.68 |
framework_DefaultEditorEventHandler_findPrevEditableColumnIndex | /**
* Finds index of the last editable column, searching backwards starting at
* the specified index.
*
* @param grid
* the current grid, not null.
* @param startingWith
* start with this column. Index into the
* {@link Grid#getVisibleColumns()}.
* @return the index of the nearest visible column; may return the
* <code>startingWith</code> itself. Returns -1 if there is no such
* column.
*/
protected int findPrevEditableColumnIndex(Grid<T> grid, int startingWith) {
final List<Grid.Column<?, T>> columns = grid.getVisibleColumns();
for (int i = startingWith; i >= 0; i--) {
if (isEditable(grid, columns.get(i))) {
return i;
}
}
return -1;
} | 3.68 |
hbase_StorageClusterStatusModel_setCpRequestsCount | /**
* @param cpRequestsCount The current total read requests made to region
*/
public void setCpRequestsCount(long cpRequestsCount) {
this.cpRequestsCount = cpRequestsCount;
} | 3.68 |
hbase_NamespacesInstanceResource_getNamespaceInstanceResource | /**
* Dispatch to NamespaceInstanceResource for getting list of tables.
*/
@Path("tables")
public NamespacesInstanceResource
getNamespaceInstanceResource(final @PathParam("tables") String namespace) throws IOException {
return new NamespacesInstanceResource(this.namespace, true);
} | 3.68 |
pulsar_MLTransactionSequenceIdGenerator_onManagedLedgerPropertiesInitialize | // When all of ledger have been deleted, we will generate sequenceId from managedLedger properties
@Override
public void onManagedLedgerPropertiesInitialize(Map<String, String> propertiesMap) {
if (propertiesMap == null || propertiesMap.size() == 0) {
return;
}
if (propertiesMap.containsKey(MAX_LOCAL_TXN_ID)) {
sequenceId.set(Long.parseLong(propertiesMap.get(MAX_LOCAL_TXN_ID)));
}
} | 3.68 |
framework_User_setRoles | /**
* Replaces the set of roles with another collection. User references in
* roles are automatically updated when setting the roles of a user.
*
* @param roles
* non-null set of roles
*/
public void setRoles(Set<Role> roles) {
for (Role role : this.roles) {
role.getUsers().remove(this);
}
this.roles = roles;
for (Role role : this.roles) {
role.getUsers().add(this);
}
} | 3.68 |
hbase_HRegion_hasSeenFailedSanityCheck | /** Returns If a {@link FailedSanityCheckException} has been observed. */
boolean hasSeenFailedSanityCheck() {
return failedSanityCheck;
} | 3.68 |
framework_CompositeValidator_setErrorMessage | /**
* Sets the message to be included in the exception in case the value does
* not validate. The exception message is typically shown to the end user.
*
* @param errorMessage
* the error message.
*/
public void setErrorMessage(String errorMessage) {
this.errorMessage = errorMessage;
} | 3.68 |
rocketmq-connect_Worker_checkAndStopConnectors | /**
* check and stop connectors
*
* @param assigns
*/
private void checkAndStopConnectors(Collection<String> assigns) {
Set<String> connectors = this.connectors.keySet();
if (CollectionUtils.isEmpty(assigns)) {
// delete all
for (String connector : connectors) {
log.info("It may be that the load balancing assigns this connector to other nodes,connector {}", connector);
stopAndAwaitConnector(connector);
}
return;
}
for (String connectorName : connectors) {
if (!assigns.contains(connectorName)) {
log.info("It may be that the load balancing assigns this connector to other nodes,connector {}", connectorName);
stopAndAwaitConnector(connectorName);
}
}
} | 3.68 |
framework_FreeformQuery_setOrderBy | /*
* (non-Javadoc)
*
* @see
* com.vaadin.data.util.sqlcontainer.query.QueryDelegate#setOrderBy(java
* .util.List)
*/
@Override
public void setOrderBy(List<OrderBy> orderBys)
throws UnsupportedOperationException {
if (delegate != null) {
delegate.setOrderBy(orderBys);
} else if (orderBys != null) {
throw new UnsupportedOperationException(
"FreeFormQueryDelegate not set!");
}
} | 3.68 |
framework_VFilterSelect_setTextboxText | /**
* Sets the text in the text box.
*
* @param text
* the text to set in the text box
*/
public void setTextboxText(final String text) {
if (enableDebug) {
debug("VFS: setTextboxText(" + text + ")");
}
setText(text);
} | 3.68 |
zxing_CalendarParsedResult_getStartTimestamp | /**
* @return start time
* @see #getEndTimestamp()
*/
public long getStartTimestamp() {
return start;
} | 3.68 |
hadoop_FederationStateStoreFacade_addOrUpdateReservationHomeSubCluster | /**
* Add or Update ReservationHomeSubCluster.
*
* @param reservationId reservationId.
* @param subClusterId homeSubClusterId, this is selected by strategy.
* @param retryCount number of retries.
* @throws YarnException yarn exception.
*/
public void addOrUpdateReservationHomeSubCluster(ReservationId reservationId,
SubClusterId subClusterId, int retryCount) throws YarnException {
Boolean exists = existsReservationHomeSubCluster(reservationId);
ReservationHomeSubCluster reservationHomeSubCluster =
ReservationHomeSubCluster.newInstance(reservationId, subClusterId);
if (!exists || retryCount == 0) {
// persist the mapping of reservationId and the subClusterId which has
// been selected as its home.
addReservationHomeSubCluster(reservationId, reservationHomeSubCluster);
} else {
// update the mapping of reservationId and the home subClusterId to
// the new subClusterId we have selected.
updateReservationHomeSubCluster(subClusterId, reservationId,
reservationHomeSubCluster);
}
} | 3.68 |
hadoop_HdfsLocatedFileStatus_getStoragePolicy | /** @return the storage policy id */
@Override
public byte getStoragePolicy() {
return storagePolicy;
} | 3.68 |
flink_MergeTableLikeUtil_mergeOptions | /** Merges the options part of {@code CREATE TABLE} statement. */
public Map<String, String> mergeOptions(
MergingStrategy mergingStrategy,
Map<String, String> sourceOptions,
Map<String, String> derivedOptions) {
Map<String, String> options = new HashMap<>();
if (mergingStrategy != MergingStrategy.EXCLUDING) {
options.putAll(sourceOptions);
}
derivedOptions.forEach(
(key, value) -> {
if (mergingStrategy != MergingStrategy.OVERWRITING
&& options.containsKey(key)) {
throw new ValidationException(
String.format(
"There already exists an option ['%s' -> '%s'] in the "
+ "base table. You might want to specify EXCLUDING OPTIONS or OVERWRITING OPTIONS.",
key, options.get(key)));
}
options.put(key, value);
});
return options;
} | 3.68 |
flink_TwoInputTransformation_getStateKeySelector2 | /**
* Returns the {@code KeySelector} that must be used for partitioning keyed state in this
* Operation for the second input.
*
* @see #setStateKeySelectors
*/
public KeySelector<IN2, ?> getStateKeySelector2() {
return stateKeySelector2;
} | 3.68 |
hadoop_PersistentCommitData_load | /**
* Load an instance from a status, then validate it.
* This uses the openFile() API, which S3A supports for
* faster load and declaring sequential access, always
* @param <T> type of persistent format
* @param fs filesystem
* @param status status of file to load
* @param serializer serializer to use
* @return the loaded instance
* @throws IOException IO failure
* @throws ValidationFailure if the data is invalid
*/
public static <T extends PersistentCommitData> T load(FileSystem fs,
FileStatus status,
JsonSerialization<T> serializer)
throws IOException {
Path path = status.getPath();
LOG.debug("Reading commit data from file {}", path);
T result = serializer.load(fs, path, status);
result.validate();
return result;
} | 3.68 |
dubbo_GlobalResourcesRepository_getOneoffDisposables | // for test
public List<Disposable> getOneoffDisposables() {
return oneoffDisposables;
} | 3.68 |
morf_AbstractConnectionResources_openSchemaResource | /**
* @see org.alfasoftware.morf.jdbc.ConnectionResources#openSchemaResource(DataSource)
*/
@Override
public final SchemaResource openSchemaResource(DataSource dataSource) {
return SchemaResourceImpl.create(dataSource, this);
} | 3.68 |
hudi_ClusteringUtils_isClusteringCommit | /**
* Checks if the replacecommit is clustering commit.
*/
public static boolean isClusteringCommit(HoodieTableMetaClient metaClient, HoodieInstant pendingReplaceInstant) {
return getClusteringPlan(metaClient, pendingReplaceInstant).isPresent();
} | 3.68 |
dubbo_IdleSensible_canHandleIdle | /**
* Whether the implementation can sense and handle the idle connection. By default, it's false, the implementation
* relies on dedicated timer to take care of idle connection.
*
* @return whether it has the ability to handle idle connection
*/
default boolean canHandleIdle() {
return false;
} | 3.68 |
flink_StreamExecutionEnvironment_setStateBackend | /**
* Sets the state backend that describes how to store operator. It defines the data structures
* that hold state during execution (for example hash tables, RocksDB, or other data stores).
*
* <p>State managed by the state backend includes both keyed state that is accessible on {@link
* org.apache.flink.streaming.api.datastream.KeyedStream keyed streams}, as well as state
* maintained directly by the user code that implements {@link
* org.apache.flink.streaming.api.checkpoint.CheckpointedFunction CheckpointedFunction}.
*
* <p>The {@link org.apache.flink.runtime.state.hashmap.HashMapStateBackend} maintains state in
* heap memory, as objects. It is lightweight without extra dependencies, but is limited to JVM
* heap memory.
*
* <p>In contrast, the {@code EmbeddedRocksDBStateBackend} stores its state in an embedded
* {@code RocksDB} instance. This state backend can store very large state that exceeds memory
* and spills to local disk. All key/value state (including windows) is stored in the key/value
* index of RocksDB.
*
* <p>In both cases, fault tolerance is managed via the jobs {@link
* org.apache.flink.runtime.state.CheckpointStorage} which configures how and where state
* backends persist during a checkpoint.
*
* @return This StreamExecutionEnvironment itself, to allow chaining of function calls.
* @see #getStateBackend()
* @see CheckpointConfig#setCheckpointStorage( org.apache.flink.runtime.state.CheckpointStorage)
*/
@PublicEvolving
public StreamExecutionEnvironment setStateBackend(StateBackend backend) {
this.defaultStateBackend = Preconditions.checkNotNull(backend);
return this;
} | 3.68 |
querydsl_AbstractPostgreSQLQuery_distinctOn | /**
* adds a DISTINCT ON clause
*
* @param exprs
* @return
*/
public C distinctOn(Expression<?>... exprs) {
return addFlag(Position.AFTER_SELECT,
Expressions.template(Object.class, "distinct on({0}) ",
ExpressionUtils.list(Object.class, exprs)));
} | 3.68 |
framework_DragAndDropWrapper_getVerticalDropLocation | /**
* @return a detail about the drags vertical position over the wrapper.
*/
public VerticalDropLocation getVerticalDropLocation() {
return VerticalDropLocation
.valueOf((String) getData("verticalLocation"));
} | 3.68 |
flink_DataStreamUtils_reinterpretAsKeyedStream | /**
* Reinterprets the given {@link DataStream} as a {@link KeyedStream}, which extracts keys with
* the given {@link KeySelector}.
*
* <p>IMPORTANT: For every partition of the base stream, the keys of events in the base stream
* must be partitioned exactly in the same way as if it was created through a {@link
* DataStream#keyBy(KeySelector)}.
*
* @param stream The data stream to reinterpret. For every partition, this stream must be
* partitioned exactly in the same way as if it was created through a {@link
* DataStream#keyBy(KeySelector)}.
* @param keySelector Function that defines how keys are extracted from the data stream.
* @param typeInfo Explicit type information about the key type.
* @param <T> Type of events in the data stream.
* @param <K> Type of the extracted keys.
* @return The reinterpretation of the {@link DataStream} as a {@link KeyedStream}.
*/
public static <T, K> KeyedStream<T, K> reinterpretAsKeyedStream(
DataStream<T> stream, KeySelector<T, K> keySelector, TypeInformation<K> typeInfo) {
PartitionTransformation<T> partitionTransformation =
new PartitionTransformation<>(
stream.getTransformation(), new ForwardPartitioner<>());
return new KeyedStream<>(stream, partitionTransformation, keySelector, typeInfo);
} | 3.68 |
flink_WindowMapState_get | /**
* Returns the current value associated with the given key.
*
* @param key The key of the mapping
* @return The value of the mapping with the given key
* @throws Exception Thrown if the system cannot access the state.
*/
public UV get(W window, RowData key) throws Exception {
windowState.setCurrentNamespace(window);
return windowState.get(key);
} | 3.68 |
morf_DataSetProducerBuilderImpl_close | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#close()
*/
@Override
public void close() {
// Nothing to do
} | 3.68 |
hbase_ExponentialClientBackoffPolicy_scale | /** Scale valueIn in the range [baseMin,baseMax] to the range [limitMin,limitMax] */
private static double scale(double valueIn, double baseMin, double baseMax, double limitMin,
double limitMax) {
Preconditions.checkArgument(baseMin <= baseMax, "Illegal source range [%s,%s]", baseMin,
baseMax);
Preconditions.checkArgument(limitMin <= limitMax, "Illegal target range [%s,%s]", limitMin,
limitMax);
Preconditions.checkArgument(valueIn >= baseMin && valueIn <= baseMax,
"Value %s must be within the range [%s,%s]", valueIn, baseMin, baseMax);
return ((limitMax - limitMin) * (valueIn - baseMin) / (baseMax - baseMin)) + limitMin;
} | 3.68 |
flink_CepOperator_advanceTime | /**
* Advances the time for the given NFA to the given timestamp. This means that no more events
* with timestamp <b>lower</b> than the given timestamp should be passed to the nfa, This can
* lead to pruning and timeouts.
*/
private void advanceTime(NFAState nfaState, long timestamp) throws Exception {
try (SharedBufferAccessor<IN> sharedBufferAccessor = partialMatches.getAccessor()) {
Tuple2<
Collection<Map<String, List<IN>>>,
Collection<Tuple2<Map<String, List<IN>>, Long>>>
pendingMatchesAndTimeout =
nfa.advanceTime(
sharedBufferAccessor,
nfaState,
timestamp,
afterMatchSkipStrategy);
Collection<Map<String, List<IN>>> pendingMatches = pendingMatchesAndTimeout.f0;
Collection<Tuple2<Map<String, List<IN>>, Long>> timedOut = pendingMatchesAndTimeout.f1;
if (!pendingMatches.isEmpty()) {
processMatchedSequences(pendingMatches, timestamp);
}
if (!timedOut.isEmpty()) {
processTimedOutSequences(timedOut);
}
}
} | 3.68 |
streampipes_Protocols_kafka | /**
* Defines the transport protocol Kafka used by a data stream at runtime using a
* {@link org.apache.streampipes.model.grounding.WildcardTopicDefinition}
*
* @param kafkaHost The hostname of any Kafka broker
* @param kafkaPort The port of any Kafka broker
* @param wildcardTopicDefinition The wildcard topic definition.
* @return The {@link org.apache.streampipes.model.grounding.KafkaTransportProtocol}
* containing URL and topic where data arrives.
*/
public static KafkaTransportProtocol kafka(String kafkaHost, Integer kafkaPort, WildcardTopicDefinition
wildcardTopicDefinition) {
return new KafkaTransportProtocol(kafkaHost, kafkaPort, wildcardTopicDefinition);
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.