name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_RpcThrottlingException_stringFromMillis | // Visible for TestRpcThrottlingException
protected static String stringFromMillis(long millis) {
StringBuilder buf = new StringBuilder();
long hours = millis / (60 * 60 * 1000);
long rem = (millis % (60 * 60 * 1000));
long minutes = rem / (60 * 1000);
rem = rem % (60 * 1000);
long seconds = rem / 1000;
long milliseconds = rem % 1000;
if (hours != 0) {
buf.append(hours);
buf.append(hours > 1 ? "hrs, " : "hr, ");
}
if (minutes != 0) {
buf.append(minutes);
buf.append(minutes > 1 ? "mins, " : "min, ");
}
if (seconds != 0) {
buf.append(seconds);
buf.append("sec, ");
}
buf.append(milliseconds);
buf.append("ms");
return buf.toString();
} | 3.68 |
framework_VaadinService_createVaadinSession | /**
* Creates a new Vaadin session for this service and request.
*
* @param request
* The request for which to create a VaadinSession
* @return A new VaadinSession
* @throws ServiceException
*
*/
protected VaadinSession createVaadinSession(VaadinRequest request)
throws ServiceException {
return new VaadinSession(this);
} | 3.68 |
framework_CssLayout_addComponentAsFirst | /**
* Adds a component into this container. The component is added to the left
* or on top of the other components.
*
* @param c
* the component to be added.
*/
public void addComponentAsFirst(Component c) {
// If c is already in this, we must remove it before proceeding
// see ticket #7668
if (equals(c.getParent())) {
removeComponent(c);
}
components.addFirst(c);
try {
super.addComponent(c);
} catch (IllegalArgumentException e) {
components.remove(c);
throw e;
}
} | 3.68 |
hudi_AvroSchemaUtils_containsFieldInSchema | /**
* Returns true in case when schema contains the field w/ provided name
*/
public static boolean containsFieldInSchema(Schema schema, String fieldName) {
try {
Schema.Field field = schema.getField(fieldName);
return field != null;
} catch (Exception e) {
return false;
}
} | 3.68 |
flink_InPlaceMutableHashTable_overwritePointerAt | /**
* Overwrites the long value at the specified position.
*
* @param pointer Points to the position to overwrite.
* @param value The value to write.
* @throws IOException
*/
public void overwritePointerAt(long pointer, long value) throws IOException {
setWritePosition(pointer);
outView.writeLong(value);
} | 3.68 |
flink_ProjectOperator_projectTuple17 | /**
* Projects a {@link Tuple} {@link DataSet} to the previously selected fields.
*
* @return The projected DataSet.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16>
ProjectOperator<
T,
Tuple17<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16>>
projectTuple17() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType());
TupleTypeInfo<
Tuple17<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16>>
tType =
new TupleTypeInfo<
Tuple17<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16>>(fTypes);
return new ProjectOperator<
T,
Tuple17<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16>>(this.ds, this.fieldIndexes, tType);
} | 3.68 |
hbase_Threads_printThreadInfo | /**
* Print all of the thread's information and stack traces. Wrapper around Hadoop's method.
* @param stream the stream to
* @param title a string title for the stack trace
*/
public static void printThreadInfo(PrintStream stream, String title) {
ReflectionUtils.printThreadInfo(stream, title);
} | 3.68 |
hbase_OrderedInt8_encodeByte | /**
* Write instance {@code val} into buffer {@code dst}.
* @param dst the {@link PositionedByteRange} to write to
* @param val the value to write to {@code dst}
* @return the number of bytes written
*/
public int encodeByte(PositionedByteRange dst, byte val) {
return OrderedBytes.encodeInt8(dst, val, order);
} | 3.68 |
flink_FileChannelManagerImpl_close | /** Remove all the temp directories. */
@Override
public void close() throws Exception {
// Marks shutdown and exits if it has already shutdown.
if (!isShutdown.compareAndSet(false, true)) {
return;
}
IOUtils.closeAll(
Arrays.stream(paths)
.filter(File::exists)
.map(FileChannelManagerImpl::getFileCloser)
.collect(Collectors.toList()));
ShutdownHookUtil.removeShutdownHook(
shutdownHook, String.format("%s-%s", getClass().getSimpleName(), prefix), LOG);
} | 3.68 |
framework_VFlash_createFlashEmbed | /**
* Creates the embed String.
*
* @return the embed String
*/
protected String createFlashEmbed() {
/*
* To ensure cross-browser compatibility we are using the twice-cooked
* method to embed flash i.e. we add a OBJECT tag for IE ActiveX and
* inside it a EMBED for all other browsers.
*/
StringBuilder html = new StringBuilder();
// Start the object tag
html.append("<object ");
/*
* Add classid required for ActiveX to recognize the flash. This is a
* predefined value which ActiveX recognizes and must be the given
* value. More info can be found on
* http://kb2.adobe.com/cps/415/tn_4150.html. Allow user to override
* this by setting his own classid.
*/
if (classId != null) {
html.append(
"classid=\"" + WidgetUtil.escapeAttribute(classId) + "\" ");
} else {
html.append(
"classid=\"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000\" ");
}
/*
* Add codebase required for ActiveX and must be exactly this according
* to http://kb2.adobe.com/cps/415/tn_4150.html to work with the above
* given classid. Again, see more info on
* http://kb2.adobe.com/cps/415/tn_4150.html. Limiting Flash version to
* 6.0.0.0 and above. Allow user to override this by setting his own
* codebase
*/
if (codebase != null) {
html.append("codebase=\"" + WidgetUtil.escapeAttribute(codebase)
+ "\" ");
} else {
html.append(
"codebase=\"http://download.macromedia.com/pub/shockwave/cabs/flash/swflash.cab#version=6,0,0,0\" ");
}
// Add width and height
html.append("width=\"" + WidgetUtil.escapeAttribute(width) + "\" ");
html.append("height=\"" + WidgetUtil.escapeAttribute(height) + "\" ");
html.append("type=\"application/x-shockwave-flash\" ");
// Codetype
if (codetype != null) {
html.append("codetype=\"" + WidgetUtil.escapeAttribute(codetype)
+ "\" ");
}
// Standby
if (standby != null) {
html.append(
"standby=\"" + WidgetUtil.escapeAttribute(standby) + "\" ");
}
// Archive
if (archive != null) {
html.append(
"archive=\"" + WidgetUtil.escapeAttribute(archive) + "\" ");
}
// End object tag
html.append('>');
// Ensure we have an movie parameter
if (embedParams.get("movie") == null) {
embedParams.put("movie", source);
}
// Add parameters to OBJECT
for (String name : embedParams.keySet()) {
html.append("<param ");
html.append("name=\"" + WidgetUtil.escapeAttribute(name) + "\" ");
html.append("value=\""
+ WidgetUtil.escapeAttribute(embedParams.get(name))
+ "\" ");
html.append("/>");
}
// Build inner EMBED tag
html.append("<embed ");
html.append("src=\"" + WidgetUtil.escapeAttribute(source) + "\" ");
if (hasPercentageWidth() && slotOffsetWidth >= 0) {
html.append("width=\"" + getRelativePixelWidth() + "\" ");
} else {
html.append("width=\"" + WidgetUtil.escapeAttribute(width) + "\" ");
}
if (hasPercentageHeight() && slotOffsetHeight >= 0) {
html.append("height=\"" + getRelativePixelHeight() + "px\" ");
} else {
html.append(
"height=\"" + WidgetUtil.escapeAttribute(height) + "\" ");
}
html.append("type=\"application/x-shockwave-flash\" ");
// Add the parameters to the Embed
for (String name : embedParams.keySet()) {
html.append(WidgetUtil.escapeAttribute(name));
html.append('=');
html.append("\"" + WidgetUtil.escapeAttribute(embedParams.get(name))
+ "\"");
}
// End embed tag
html.append("></embed>");
if (altText != null) {
html.append("<noembed>");
html.append(altText);
html.append("</noembed>");
}
// End object tag
html.append("</object>");
return html.toString();
} | 3.68 |
MagicPlugin_MageData_getLocation | /**
* Data can be saved asynchronously, and Locations' Worlds can be invalidated if the server unloads a world.
* So do not call this method during saving.
*/
@Nullable
public Location getLocation() {
return location == null ? null : location.asLocation();
} | 3.68 |
hbase_VersionModel_toString | /*
* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("rest ");
sb.append(restVersion);
sb.append(" [JVM: ");
sb.append(jvmVersion);
sb.append("] [OS: ");
sb.append(osVersion);
sb.append("] [Server: ");
sb.append(serverVersion);
sb.append("] [Jersey: ");
sb.append(jerseyVersion);
sb.append("]\n");
return sb.toString();
} | 3.68 |
framework_CustomLayout_setTemplateName | /**
* Set the name of the template used to draw custom layout.
*
* With GWT-adapter, the template with name 'templatename' is loaded from
* VAADIN/themes/themename/layouts/templatename.html. If the theme has not
* been set (with Application.setTheme()), themename is 'default'.
*
* @param templateName
*/
public void setTemplateName(String templateName) {
getState().templateName = templateName;
getState().templateContents = null;
} | 3.68 |
flink_SingleOutputStreamOperator_setParallelism | /**
* Sets the parallelism for this operator.
*
* @param parallelism The parallelism for this operator.
* @return The operator with set parallelism.
*/
public SingleOutputStreamOperator<T> setParallelism(int parallelism) {
OperatorValidationUtils.validateParallelism(parallelism, canBeParallel());
transformation.setParallelism(parallelism);
return this;
} | 3.68 |
framework_VScrollTable_isWorkPending | /*
* Return true if component need to perform some work and false otherwise.
*/
@Override
public boolean isWorkPending() {
return lazyAdjustColumnWidths.isRunning();
} | 3.68 |
flink_JoinOperator_projectTuple2 | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1> ProjectJoin<I1, I2, Tuple2<T0, T1>> projectTuple2() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple2<T0, T1>> tType = new TupleTypeInfo<Tuple2<T0, T1>>(fTypes);
return new ProjectJoin<I1, I2, Tuple2<T0, T1>>(
this.ds1,
this.ds2,
this.keys1,
this.keys2,
this.hint,
this.fieldIndexes,
this.isFieldInFirst,
tType,
this);
} | 3.68 |
flink_PageSizeUtil_getSystemPageSizeOrDefault | /**
* Tries to get the system page size. If the page size cannot be determined, this returns the
* {@link #DEFAULT_PAGE_SIZE}.
*/
public static int getSystemPageSizeOrDefault() {
final int pageSize = getSystemPageSize();
return pageSize == PAGE_SIZE_UNKNOWN ? DEFAULT_PAGE_SIZE : pageSize;
} | 3.68 |
hadoop_Anonymizer_createJsonGenerator | // Creates a JSON generator
private JsonGenerator createJsonGenerator(Configuration conf, Path path)
throws IOException {
FileSystem outFS = path.getFileSystem(conf);
CompressionCodec codec =
new CompressionCodecFactory(conf).getCodec(path);
OutputStream output;
Compressor compressor = null;
if (codec != null) {
compressor = CodecPool.getCompressor(codec);
output = codec.createOutputStream(outFS.create(path), compressor);
} else {
output = outFS.create(path);
}
JsonGenerator outGen =
outFactory.createGenerator(output, JsonEncoding.UTF8);
outGen.useDefaultPrettyPrinter();
return outGen;
} | 3.68 |
hbase_ClassSize_getSizeCoefficients | /**
* The estimate of the size of a class instance depends on whether the JVM uses 32 or 64 bit
* addresses, that is it depends on the size of an object reference. It is a linear function of
* the size of a reference, e.g. 24 + 5*r where r is the size of a reference (usually 4 or 8
* bytes). This method returns the coefficients of the linear function, e.g. {24, 5} in the above
* example.
* @param cl A class whose instance size is to be estimated
* @param debug debug flag
* @return an array of 3 integers. The first integer is the size of the primitives, the second the
* number of arrays and the third the number of references.
*/
@SuppressWarnings("unchecked")
private static int[] getSizeCoefficients(Class cl, boolean debug) {
int primitives = 0;
int arrays = 0;
int references = 0;
int index = 0;
for (; null != cl; cl = cl.getSuperclass()) {
Field[] field = cl.getDeclaredFields();
if (null != field) {
for (Field aField : field) {
if (Modifier.isStatic(aField.getModifiers())) continue;
Class fieldClass = aField.getType();
if (fieldClass.isArray()) {
arrays++;
references++;
} else if (!fieldClass.isPrimitive()) {
references++;
} else {// Is simple primitive
String name = fieldClass.getName();
if (name.equals("int") || name.equals("I")) primitives += Bytes.SIZEOF_INT;
else if (name.equals("long") || name.equals("J")) primitives += Bytes.SIZEOF_LONG;
else if (name.equals("boolean") || name.equals("Z")) primitives += Bytes.SIZEOF_BOOLEAN;
else if (name.equals("short") || name.equals("S")) primitives += Bytes.SIZEOF_SHORT;
else if (name.equals("byte") || name.equals("B")) primitives += Bytes.SIZEOF_BYTE;
else if (name.equals("char") || name.equals("C")) primitives += Bytes.SIZEOF_CHAR;
else if (name.equals("float") || name.equals("F")) primitives += Bytes.SIZEOF_FLOAT;
else if (name.equals("double") || name.equals("D")) primitives += Bytes.SIZEOF_DOUBLE;
}
if (debug) {
if (LOG.isDebugEnabled()) {
LOG.debug("" + index + " " + aField.getName() + " " + aField.getType());
}
}
index++;
}
}
}
return new int[] { primitives, arrays, references };
} | 3.68 |
hbase_ColumnSchemaModel___setInMemory | /**
* @param value the desired value of the IN_MEMORY attribute
*/
public void __setInMemory(boolean value) {
attrs.put(IN_MEMORY, Boolean.toString(value));
} | 3.68 |
flink_RexLiteralUtil_toFlinkInternalValue | /**
* Convert a value from Calcite's {@link Comparable} data structures to Flink internal data
* structures and also tries to be a bit flexible by accepting usual Java types such as String
* and boxed numerics.
*
* <p>In case of symbol types, this function will return provided value, checking that it's an
* {@link Enum}.
*
* <p>This function is essentially like {@link FlinkTypeFactory#toLogicalType(RelDataType)} but
* for values.
*
* <p>Check {@link RexLiteral#valueMatchesType(Comparable, SqlTypeName, boolean)} for details on
* the {@link Comparable} data structures and {@link org.apache.flink.table.data.RowData} for
* details on Flink's internal data structures.
*
* @param value the value in Calcite's {@link Comparable} data structures
* @param valueType the type of the value
* @return the value in Flink's internal data structures
* @throws IllegalArgumentException in case the class of value does not match the expectations
* of valueType
*/
public static Object toFlinkInternalValue(Comparable<?> value, LogicalType valueType) {
if (value == null) {
return null;
}
switch (valueType.getTypeRoot()) {
case CHAR:
case VARCHAR:
if (value instanceof NlsString) {
return BinaryStringData.fromString(((NlsString) value).getValue());
}
if (value instanceof String) {
return BinaryStringData.fromString((String) value);
}
break;
case BOOLEAN:
if (value instanceof Boolean) {
return value;
}
break;
case BINARY:
case VARBINARY:
if (value instanceof ByteString) {
return ((ByteString) value).getBytes();
}
break;
case DECIMAL:
if (value instanceof BigDecimal) {
return DecimalData.fromBigDecimal(
(BigDecimal) value,
LogicalTypeChecks.getPrecision(valueType),
LogicalTypeChecks.getScale(valueType));
}
break;
case TINYINT:
if (value instanceof Number) {
return ((Number) value).byteValue();
}
break;
case SMALLINT:
if (value instanceof Number) {
return ((Number) value).shortValue();
}
break;
case INTEGER:
case INTERVAL_YEAR_MONTH:
if (value instanceof Number) {
return ((Number) value).intValue();
}
break;
case BIGINT:
case INTERVAL_DAY_TIME:
if (value instanceof Number) {
return ((Number) value).longValue();
}
break;
case FLOAT:
if (value instanceof Number) {
return ((Number) value).floatValue();
}
break;
case DOUBLE:
if (value instanceof Number) {
return ((Number) value).doubleValue();
}
break;
case DATE:
if (value instanceof DateString) {
return ((DateString) value).getDaysSinceEpoch();
}
if (value instanceof Number) {
return ((Number) value).intValue();
}
break;
case TIME_WITHOUT_TIME_ZONE:
if (value instanceof TimeString) {
return ((TimeString) value).getMillisOfDay();
}
if (value instanceof Number) {
return ((Number) value).intValue();
}
break;
case TIMESTAMP_WITHOUT_TIME_ZONE:
if (value instanceof TimestampString) {
return TimestampData.fromLocalDateTime(
toLocalDateTime((TimestampString) value));
}
break;
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
if (value instanceof TimestampString) {
return TimestampData.fromInstant(
toLocalDateTime((TimestampString) value)
.atOffset(ZoneOffset.UTC)
.toInstant());
}
break;
case DISTINCT_TYPE:
return toFlinkInternalValue(value, ((DistinctType) valueType).getSourceType());
case SYMBOL:
if (value instanceof Enum) {
return value;
}
break;
case TIMESTAMP_WITH_TIME_ZONE:
case ARRAY:
case MULTISET:
case MAP:
case ROW:
case STRUCTURED_TYPE:
case NULL:
case UNRESOLVED:
throw new CodeGenException("Type not supported: " + valueType);
}
throw new IllegalStateException(
"Unexpected class " + value.getClass() + " for value of type " + valueType);
} | 3.68 |
flink_TaskConfig_setImplicitConvergenceCriterion | /**
* Sets the default convergence criterion of a {@link DeltaIteration}
*
* @param aggregatorName
* @param convCriterion
*/
public void setImplicitConvergenceCriterion(
String aggregatorName, ConvergenceCriterion<?> convCriterion) {
try {
InstantiationUtil.writeObjectToConfig(
convCriterion, this.config, ITERATION_IMPLICIT_CONVERGENCE_CRITERION);
} catch (IOException e) {
throw new RuntimeException(
"Error while writing the implicit convergence criterion object to the task configuration.");
}
this.config.setString(ITERATION_IMPLICIT_CONVERGENCE_CRITERION_AGG_NAME, aggregatorName);
} | 3.68 |
hudi_MarkerDirState_parseMarkerFileIndex | /**
* Parses the marker file index from the marker file path.
* <p>
* E.g., if the marker file path is /tmp/table/.hoodie/.temp/000/MARKERS3, the index returned is 3.
*
* @param markerFilePathStr full path of marker file
* @return the marker file index
*/
private int parseMarkerFileIndex(String markerFilePathStr) {
String markerFileName = new Path(markerFilePathStr).getName();
int prefixIndex = markerFileName.indexOf(MARKERS_FILENAME_PREFIX);
if (prefixIndex < 0) {
return -1;
}
try {
return Integer.parseInt(markerFileName.substring(prefixIndex + MARKERS_FILENAME_PREFIX.length()));
} catch (NumberFormatException nfe) {
LOG.error("Failed to parse marker file index from " + markerFilePathStr);
throw new HoodieException(nfe.getMessage(), nfe);
}
} | 3.68 |
hbase_HMobStore_validateMobFile | /**
* Validates a mob file by opening and closing it.
* @param path the path to the mob file
*/
private void validateMobFile(Path path) throws IOException {
HStoreFile storeFile = null;
try {
storeFile = new HStoreFile(getFileSystem(), path, conf, getCacheConfig(), BloomType.NONE,
isPrimaryReplicaStore());
storeFile.initReader();
} catch (IOException e) {
LOG.error("Fail to open mob file[" + path + "], keep it in temp directory.", e);
throw e;
} finally {
if (storeFile != null) {
storeFile.closeStoreFile(false);
}
}
} | 3.68 |
flink_StopWithSavepoint_cancel | /**
* Cancel the job and fail the savepoint operation future.
*
* <p>We don't wait for the {@link #internalSavepointFuture} here so that users can still cancel
* a job if the savepoint takes too long (or gets stuck).
*
* <p>Since we don't actually cancel the savepoint (for which there is no API to do so), there
* is a small risk that the job is cancelled at the very moment that the savepoint completes,
* causing it to not be reported to the user. See FLINK-28127.
*/
@Override
public void cancel() {
operationFailureCause = new FlinkException("The job was cancelled.");
context.goToCanceling(
getExecutionGraph(),
getExecutionGraphHandler(),
getOperatorCoordinatorHandler(),
getFailures());
} | 3.68 |
hudi_StreamSync_writeToSinkAndDoMetaSync | /**
* Perform Hoodie Write. Run Cleaner, schedule compaction and syncs to hive if needed.
*
* @param instantTime instant time to use for ingest.
* @param inputBatch input batch that contains the records, checkpoint, and schema provider
* @param metrics Metrics
* @param overallTimerContext Timer Context
* @return Option Compaction instant if one is scheduled
*/
private Pair<Option<String>, JavaRDD<WriteStatus>> writeToSinkAndDoMetaSync(String instantTime, InputBatch inputBatch,
HoodieIngestionMetrics metrics,
Timer.Context overallTimerContext) {
Option<String> scheduledCompactionInstant = Option.empty();
// write to hudi and fetch result
WriteClientWriteResult writeClientWriteResult = writeToSink(inputBatch, instantTime);
JavaRDD<WriteStatus> writeStatusRDD = writeClientWriteResult.getWriteStatusRDD();
Map<String, List<String>> partitionToReplacedFileIds = writeClientWriteResult.getPartitionToReplacedFileIds();
// process write status
long totalErrorRecords = writeStatusRDD.mapToDouble(WriteStatus::getTotalErrorRecords).sum().longValue();
long totalRecords = writeStatusRDD.mapToDouble(WriteStatus::getTotalRecords).sum().longValue();
long totalSuccessfulRecords = totalRecords - totalErrorRecords;
LOG.info(String.format("instantTime=%s, totalRecords=%d, totalErrorRecords=%d, totalSuccessfulRecords=%d",
instantTime, totalRecords, totalErrorRecords, totalSuccessfulRecords));
if (totalRecords == 0) {
LOG.info("No new data, perform empty commit.");
}
boolean hasErrors = totalErrorRecords > 0;
if (!hasErrors || cfg.commitOnErrors) {
HashMap<String, String> checkpointCommitMetadata = new HashMap<>();
if (!getBooleanWithAltKeys(props, CHECKPOINT_FORCE_SKIP)) {
if (inputBatch.getCheckpointForNextBatch() != null) {
checkpointCommitMetadata.put(CHECKPOINT_KEY, inputBatch.getCheckpointForNextBatch());
}
if (cfg.checkpoint != null) {
checkpointCommitMetadata.put(CHECKPOINT_RESET_KEY, cfg.checkpoint);
}
}
if (hasErrors) {
LOG.warn("Some records failed to be merged but forcing commit since commitOnErrors set. Errors/Total="
+ totalErrorRecords + "/" + totalRecords);
}
String commitActionType = CommitUtils.getCommitActionType(cfg.operation, HoodieTableType.valueOf(cfg.tableType));
if (errorTableWriter.isPresent()) {
// Commit the error events triggered so far to the error table
Option<String> commitedInstantTime = getLatestInstantWithValidCheckpointInfo(commitsTimelineOpt);
boolean errorTableSuccess = errorTableWriter.get().upsertAndCommit(instantTime, commitedInstantTime);
if (!errorTableSuccess) {
switch (errorWriteFailureStrategy) {
case ROLLBACK_COMMIT:
LOG.info("Commit " + instantTime + " failed!");
writeClient.rollback(instantTime);
throw new HoodieStreamerWriteException("Error table commit failed");
case LOG_ERROR:
LOG.error("Error Table write failed for instant " + instantTime);
break;
default:
throw new HoodieStreamerWriteException("Write failure strategy not implemented for " + errorWriteFailureStrategy);
}
}
}
boolean success = writeClient.commit(instantTime, writeStatusRDD, Option.of(checkpointCommitMetadata), commitActionType, partitionToReplacedFileIds, Option.empty());
if (success) {
LOG.info("Commit " + instantTime + " successful!");
this.formatAdapter.getSource().onCommit(inputBatch.getCheckpointForNextBatch());
// Schedule compaction if needed
if (cfg.isAsyncCompactionEnabled()) {
scheduledCompactionInstant = writeClient.scheduleCompaction(Option.empty());
}
if ((totalSuccessfulRecords > 0) || cfg.forceEmptyMetaSync) {
runMetaSync();
} else {
LOG.info(String.format("Not running metaSync totalSuccessfulRecords=%d", totalSuccessfulRecords));
}
} else {
LOG.info("Commit " + instantTime + " failed!");
throw new HoodieStreamerWriteException("Commit " + instantTime + " failed!");
}
} else {
LOG.error("Delta Sync found errors when writing. Errors/Total=" + totalErrorRecords + "/" + totalRecords);
LOG.error("Printing out the top 100 errors");
writeStatusRDD.filter(WriteStatus::hasErrors).take(100).forEach(ws -> {
LOG.error("Global error :", ws.getGlobalError());
if (ws.getErrors().size() > 0) {
ws.getErrors().forEach((key, value) -> LOG.trace("Error for key:" + key + " is " + value));
}
});
// Rolling back instant
writeClient.rollback(instantTime);
throw new HoodieStreamerWriteException("Commit " + instantTime + " failed and rolled-back !");
}
long overallTimeMs = overallTimerContext != null ? overallTimerContext.stop() : 0;
// Send DeltaStreamer Metrics
metrics.updateStreamerMetrics(overallTimeMs);
return Pair.of(scheduledCompactionInstant, writeStatusRDD);
} | 3.68 |
hbase_MasterObserver_preSetSplitOrMergeEnabled | /**
* Called prior to setting split / merge switch Supports Coprocessor 'bypass'.
* @param ctx the coprocessor instance's environment
* @param newValue the new value submitted in the call
* @param switchType type of switch
*/
default void preSetSplitOrMergeEnabled(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean newValue, final MasterSwitchType switchType) throws IOException {
} | 3.68 |
framework_StringDecorator_quote | /**
* Surround a string with quote characters.
*
* @param str
* the string to quote
* @return the quoted string
*/
public String quote(Object str) {
return quoteStart + str + quoteEnd;
} | 3.68 |
hadoop_DefaultStringifier_loadArray | /**
* Restores the array of objects from the configuration.
*
* @param <K> the class of the item
* @param conf the configuration to use
* @param keyName the name of the key to use
* @param itemClass the class of the item
* @return restored object
* @throws IOException : forwards Exceptions from the underlying
* {@link Serialization} classes.
*/
public static <K> K[] loadArray(Configuration conf, String keyName,
Class<K> itemClass) throws IOException {
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf,
itemClass);
try {
String itemStr = conf.get(keyName);
ArrayList<K> list = new ArrayList<K>();
String[] parts = itemStr.split(SEPARATOR);
for (String part : parts) {
if (!part.isEmpty())
list.add(stringifier.fromString(part));
}
return GenericsUtil.toArray(itemClass, list);
}
finally {
stringifier.close();
}
} | 3.68 |
hadoop_AMRMProxyTokenSecretManager_createIdentifier | /**
* Creates an empty TokenId to be used for de-serializing an
* {@link AMRMTokenIdentifier} by the RPC layer.
*/
@Override
public AMRMTokenIdentifier createIdentifier() {
return new AMRMTokenIdentifier();
} | 3.68 |
hbase_User_isLoginFromKeytab | /** Returns true if user credentials are obtained from keytab. */
public boolean isLoginFromKeytab() {
return ugi.isFromKeytab();
} | 3.68 |
hbase_RegionCoprocessorHost_preCheckAndMutateAfterRowLock | /**
* Supports Coprocessor 'bypass'.
* @param checkAndMutate the CheckAndMutate object
* @return true or false to return to client if default processing should be bypassed, or null
* otherwise
* @throws IOException if an error occurred on the coprocessor
*/
public CheckAndMutateResult preCheckAndMutateAfterRowLock(CheckAndMutate checkAndMutate)
throws IOException {
boolean bypassable = true;
CheckAndMutateResult defaultResult = new CheckAndMutateResult(false, null);
if (coprocEnvironments.isEmpty()) {
return null;
}
return execOperationWithResult(
new ObserverOperationWithResult<RegionObserver, CheckAndMutateResult>(regionObserverGetter,
defaultResult, bypassable) {
@Override
public CheckAndMutateResult call(RegionObserver observer) throws IOException {
return observer.preCheckAndMutateAfterRowLock(this, checkAndMutate, getResult());
}
});
} | 3.68 |
framework_StatementHelper_handleUnrecognizedTypeNullValue | /**
* Handle unrecognized null values. Override this to handle null values for
* platform specific data types that are not handled by the default
* implementation of the {@link StatementHelper}.
*
* @param i
* @param pstmt
* @param dataTypes2
*
* @return true if handled, false otherwise
*
* @see {@link http://dev.vaadin.com/ticket/9148}
*/
protected boolean handleUnrecognizedTypeNullValue(int i,
PreparedStatement pstmt, Map<Integer, Class<?>> dataTypes)
throws SQLException {
return false;
} | 3.68 |
hadoop_FileIoProvider_fullyDelete | /**
* Delete the given directory using {@link FileUtil#fullyDelete(File)}.
*
* @param volume target volume. null if unavailable.
* @param dir directory to be deleted.
* @return true on success false on failure.
*/
public boolean fullyDelete(@Nullable FsVolumeSpi volume, File dir) {
final long begin = profilingEventHook.beforeMetadataOp(volume, DELETE);
try {
faultInjectorEventHook.beforeMetadataOp(volume, DELETE);
boolean deleted = FileUtil.fullyDelete(dir);
LOG.trace("Deletion of dir {} {}", dir, deleted ? "succeeded" : "failed");
profilingEventHook.afterMetadataOp(volume, DELETE, begin);
return deleted;
} catch(Exception e) {
onFailure(volume, begin);
throw e;
}
} | 3.68 |
hadoop_DataNodeFaultInjector_blockUtilSendFullBlockReport | /**
* Used as a hook to inject intercept when re-register.
*/
public void blockUtilSendFullBlockReport() {} | 3.68 |
framework_FilesystemContainer_containsId | /*
* Tests if the filesystem contains the specified Item. Don't add a JavaDoc
* comment here, we use the default documentation from implemented
* interface.
*/
@Override
public boolean containsId(Object itemId) {
if (!(itemId instanceof File)) {
return false;
}
boolean val = false;
// Try to match all roots
for (File root : roots) {
try {
val |= ((File) itemId).getCanonicalPath()
.startsWith(root.getCanonicalPath());
} catch (final IOException e) {
// Exception ignored
}
}
if (val && filter != null) {
val &= filter.accept(((File) itemId).getParentFile(),
((File) itemId).getName());
}
return val;
} | 3.68 |
framework_FileParameters_setMime | /**
* Sets the mime type.
*
* @param mime
* Mime type of the file.
*/
public void setMime(String mime) {
this.mime = mime;
} | 3.68 |
hadoop_YarnRegistryViewForProviders_getComponent | /**
* Get a component.
* @param componentName component name
* @return the service record
* @throws IOException
*/
public ServiceRecord getComponent(String componentName) throws IOException {
String path = RegistryUtils.componentPath(
user, serviceClass, instanceName, componentName);
LOG.info("Resolving path {}", path);
return registryOperations.resolve(path);
} | 3.68 |
incubator-hugegraph-toolchain_LicenseService_getActualDataSize | /**
* Keep 2 method for future use now
*/
private static long getActualDataSize(HugeClient client, String graph) {
Map<String, Object> metrics = client.metrics().backend(graph);
Object dataSize = metrics.get(METRICS_DATA_SIZE);
if (dataSize == null) {
return 0L;
}
Ex.check(dataSize instanceof String,
"The backend metrics data_size must be String type, " +
"but got '%s'(%s)", dataSize, dataSize.getClass());
// Unit is MB
return displaySizeToMB((String) dataSize);
} | 3.68 |
flink_JobEdge_setPreProcessingOperationName | /**
* Sets the name of the pre-processing operation for this input.
*
* @param preProcessingOperationName The name of the pre-processing operation.
*/
public void setPreProcessingOperationName(String preProcessingOperationName) {
this.preProcessingOperationName = preProcessingOperationName;
} | 3.68 |
flink_FutureUtils_getWithoutException | /**
* Gets the result of a completable future without any exception thrown.
*
* @param future the completable future specified.
* @param <T> the type of result
* @return the result of completable future, or null if it's unfinished or finished
* exceptionally
*/
@Nullable
public static <T> T getWithoutException(CompletableFuture<T> future) {
if (isCompletedNormally(future)) {
try {
return future.get();
} catch (InterruptedException | ExecutionException ignored) {
}
}
return null;
} | 3.68 |
hudi_BaseHoodieClient_createNewInstantTime | /**
* Returns next instant time in the correct format.
*
* @param shouldLock Whether to lock the context to get the instant time.
*/
public String createNewInstantTime(boolean shouldLock) {
return HoodieActiveTimeline.createNewInstantTime(shouldLock, timeGenerator);
} | 3.68 |
hbase_CellBlockBuilder_createCellScanner | /**
* Create a cell scanner.
* @param codec to use for cellblock
* @param cellBlock to encode
* @return CellScanner to work against the content of <code>cellBlock</code>
* @throws IOException if encoding fails
*/
public CellScanner createCellScanner(final Codec codec, final CompressionCodec compressor,
final byte[] cellBlock) throws IOException {
// Use this method from Client side to create the CellScanner
if (compressor != null) {
ByteBuffer cellBlockBuf = decompress(compressor, cellBlock);
return codec.getDecoder(new ByteBufferInputStream(cellBlockBuf));
}
// Not making the Decoder over the ByteBuffer purposefully. The Decoder over the BB will
// make Cells directly over the passed BB. This method is called at client side and we don't
// want the Cells to share the same byte[] where the RPC response is being read. Caching of any
// of the Cells at user's app level will make it not possible to GC the response byte[]
return codec.getDecoder(new ByteArrayInputStream(cellBlock));
} | 3.68 |
hadoop_CloseableReferenceCount_reference | /**
* Increment the reference count.
*
* @throws ClosedChannelException If the status is closed.
*/
public void reference() throws ClosedChannelException {
int curBits = status.incrementAndGet();
if ((curBits & STATUS_CLOSED_MASK) != 0) {
status.decrementAndGet();
throw new ClosedChannelException();
}
} | 3.68 |
hmily_TableMetaDataLoader_load | /**
* Load table meta data.
*
* @param connectionAdapter connection adapter
* @param tableNamePattern table name pattern
* @param databaseType database type
* @return table meta data
* @throws SQLException SQL exception
*/
public static Optional<TableMetaData> load(final MetaDataConnectionAdapter connectionAdapter, final String tableNamePattern, final DatabaseType databaseType) throws SQLException {
String formattedTableNamePattern = formatTableNamePattern(tableNamePattern, databaseType);
return isTableExist(connectionAdapter, formattedTableNamePattern)
? Optional.of(new TableMetaData(formattedTableNamePattern, ColumnMetaDataLoader.load(
connectionAdapter, formattedTableNamePattern, databaseType), IndexMetaDataLoader.load(connectionAdapter, formattedTableNamePattern)))
: Optional.empty();
} | 3.68 |
hadoop_FileSetUtils_convertFileSetToFiles | /**
* Converts a Maven FileSet to a list of File objects.
*
* @param source FileSet to convert
* @return List containing every element of the FileSet as a File
* @throws IOException if an I/O error occurs while trying to find the files
*/
@SuppressWarnings("unchecked")
public static List<File> convertFileSetToFiles(FileSet source) throws IOException {
String includes = getCommaSeparatedList(source.getIncludes());
String excludes = getCommaSeparatedList(source.getExcludes());
return FileUtils.getFiles(new File(source.getDirectory()), includes, excludes);
} | 3.68 |
hbase_RegionCoprocessorHost_postReplayWALs | /**
* @param info the RegionInfo for this region
* @param edits the file of recovered edits
* @throws IOException Exception
*/
public void postReplayWALs(final RegionInfo info, final Path edits) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() {
@Override
public void call(RegionObserver observer) throws IOException {
observer.postReplayWALs(this, info, edits);
}
});
} | 3.68 |
flink_DateTimeUtils_hms | /** Appends hour:minute:second to a buffer; assumes they are valid. */
private static StringBuilder hms(StringBuilder b, int h, int m, int s) {
int2(b, h);
b.append(':');
int2(b, m);
b.append(':');
int2(b, s);
return b;
} | 3.68 |
hadoop_FilePosition_setData | /**
* Associates a buffer with this file.
*
* @param bufferData the buffer associated with this file.
* @param startOffset Start offset of the buffer relative to the start of a file.
* @param readOffset Offset where reading starts relative to the start of a file.
*
* @throws IllegalArgumentException if bufferData is null.
* @throws IllegalArgumentException if startOffset is negative.
* @throws IllegalArgumentException if readOffset is negative.
* @throws IllegalArgumentException if readOffset is outside the range [startOffset, buffer end].
*/
public void setData(BufferData bufferData,
long startOffset,
long readOffset) {
checkNotNull(bufferData, "bufferData");
checkNotNegative(startOffset, "startOffset");
checkNotNegative(readOffset, "readOffset");
checkWithinRange(
readOffset,
"readOffset",
startOffset,
startOffset + bufferData.getBuffer().limit());
data = bufferData;
buffer = bufferData.getBuffer().duplicate();
bufferStartOffset = startOffset;
readStartOffset = readOffset;
setAbsolute(readOffset);
resetReadStats();
} | 3.68 |
hadoop_Lz4Codec_createCompressor | /**
* Create a new {@link Compressor} for use by this {@link CompressionCodec}.
*
* @return a new compressor for use by this codec
*/
@Override
public Compressor createCompressor() {
int bufferSize = conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT);
boolean useLz4HC = conf.getBoolean(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_DEFAULT);
return new Lz4Compressor(bufferSize, useLz4HC);
} | 3.68 |
dubbo_MeshRuleRouter_randomSelectDestination | /**
* Find out target invokers from RouteDestination
*/
protected String randomSelectDestination(
MeshRuleCache<T> meshRuleCache,
String appName,
List<DubboRouteDestination> routeDestination,
BitList<Invoker<T>> availableInvokers)
throws RpcException {
// randomly select one DubboRouteDestination from list by weight
int totalWeight = 0;
for (DubboRouteDestination dubboRouteDestination : routeDestination) {
totalWeight += Math.max(dubboRouteDestination.getWeight(), 1);
}
int target = ThreadLocalRandom.current().nextInt(totalWeight);
for (DubboRouteDestination destination : routeDestination) {
target -= Math.max(destination.getWeight(), 1);
if (target <= 0) {
// match weight
String result =
computeDestination(meshRuleCache, appName, destination.getDestination(), availableInvokers);
if (result != null) {
return result;
}
}
}
// fall back
for (DubboRouteDestination destination : routeDestination) {
String result = computeDestination(meshRuleCache, appName, destination.getDestination(), availableInvokers);
if (result != null) {
return result;
}
}
return null;
} | 3.68 |
pulsar_Reflections_classExistsInJar | /**
* Check if a class is in a jar.
*
* @param jar location of the jar
* @param fqcn fully qualified class name to search for in jar
* @return true if class can be loaded from jar and false if otherwise
*/
public static boolean classExistsInJar(java.io.File jar, String fqcn) {
java.net.URLClassLoader loader = null;
try {
loader = (URLClassLoader) ClassLoaderUtils.loadJar(jar);
Class.forName(fqcn, false, loader);
return true;
} catch (ClassNotFoundException | NoClassDefFoundError | IOException e) {
return false;
} finally {
if (loader != null) {
try {
loader.close();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}
} | 3.68 |
framework_VColorPickerArea_addClickHandler | /**
* Adds a click handler to the widget and sinks the click event.
*
* @param handler
* @return HandlerRegistration used to remove the handler
*/
@Override
public HandlerRegistration addClickHandler(ClickHandler handler) {
return addDomHandler(handler, ClickEvent.getType());
} | 3.68 |
dubbo_LFUCache_withdrawNode | /**
* This method takes specified node and reattaches it neighbors nodes
* links to each other, so specified node will no longer tied with them.
* Returns united node, returns null if argument is null.
*
* @param node note to retrieve
* @param <K> key
* @param <V> value
* @return retrieved node
*/
static <K, V> CacheNode<K, V> withdrawNode(final CacheNode<K, V> node) {
if (node != null && node.prev != null) {
node.prev.next = node.next;
if (node.next != null) {
node.next.prev = node.prev;
}
}
return node;
} | 3.68 |
hudi_ParquetUtils_filterParquetRowKeys | /**
* Read the rowKey list matching the given filter, from the given parquet file. If the filter is empty, then this will
* return all the rowkeys.
*
* @param filePath The parquet file path.
* @param configuration configuration to build fs object
* @param filter record keys filter
* @param readSchema schema of columns to be read
* @return Set Set of pairs of row key and position matching candidateRecordKeys
*/
private static Set<Pair<String, Long>> filterParquetRowKeys(Configuration configuration, Path filePath, Set<String> filter,
Schema readSchema) {
Option<RecordKeysFilterFunction> filterFunction = Option.empty();
if (filter != null && !filter.isEmpty()) {
filterFunction = Option.of(new RecordKeysFilterFunction(filter));
}
Configuration conf = new Configuration(configuration);
conf.addResource(FSUtils.getFs(filePath.toString(), conf).getConf());
AvroReadSupport.setAvroReadSchema(conf, readSchema);
AvroReadSupport.setRequestedProjection(conf, readSchema);
Set<Pair<String, Long>> rowKeys = new HashSet<>();
long rowPosition = 0;
try (ParquetReader reader = AvroParquetReader.builder(filePath).withConf(conf).build()) {
Object obj = reader.read();
while (obj != null) {
if (obj instanceof GenericRecord) {
String recordKey = ((GenericRecord) obj).get(HoodieRecord.RECORD_KEY_METADATA_FIELD).toString();
if (!filterFunction.isPresent() || filterFunction.get().apply(recordKey)) {
rowKeys.add(Pair.of(recordKey, rowPosition));
}
obj = reader.read();
rowPosition++;
}
}
} catch (IOException e) {
throw new HoodieIOException("Failed to read row keys from Parquet " + filePath, e);
}
// ignore
return rowKeys;
} | 3.68 |
hadoop_OBSBlockOutputStream_putObjectIfNeedAppend | /**
* If flush has take place, need to append file, else to put object.
*
* @throws IOException any problem in append or put object
*/
private synchronized void putObjectIfNeedAppend() throws IOException {
if (appendAble.get() && fs.exists(
OBSCommonUtils.keyToQualifiedPath(fs, key))) {
appendFsFile();
} else {
putObject();
}
} | 3.68 |
morf_DataValueLookupMetadata_setChildren | /**
* Updates the children during internment.
*
* @param children The new map of child arrangements.
*/
void setChildren(ImmutableMap<CaseInsensitiveString, DataValueLookupMetadata> children) {
this.children = children;
} | 3.68 |
framework_LayoutDependencyTree_getMeasureTargetsJsArray | /**
* Returns a JsArrayString array of connectorIds for components that are
* waiting for either horizontal or vertical measuring.
*
* @return JsArrayString of connectorIds
*/
public JsArrayString getMeasureTargetsJsArray() {
FastStringSet allMeasuredTargets = FastStringSet.create();
allMeasuredTargets.addAll(getMeasureQueue(HORIZONTAL));
allMeasuredTargets.addAll(getMeasureQueue(VERTICAL));
return allMeasuredTargets.dump();
} | 3.68 |
flink_PrioritizedDeque_containsPriorityElement | /**
* Returns whether the given element is a known priority element. Test is performed by identity.
*/
public boolean containsPriorityElement(T element) {
if (numPriorityElements == 0) {
return false;
}
final Iterator<T> iterator = deque.iterator();
for (int i = 0; i < numPriorityElements && iterator.hasNext(); i++) {
if (iterator.next() == element) {
return true;
}
}
return false;
} | 3.68 |
framework_AbstractSelect_setItemCaptionMode | /**
* Sets the item caption mode.
*
* See {@link ItemCaptionMode} for a description of the modes.
* <p>
* {@link ItemCaptionMode#EXPLICIT_DEFAULTS_ID} is the default mode.
* </p>
*
* @param mode
* the One of the modes listed above.
*/
public void setItemCaptionMode(ItemCaptionMode mode) {
if (mode != null) {
itemCaptionMode = mode;
markAsDirty();
}
} | 3.68 |
flink_MetricConfig_getLong | /**
* Searches for the property with the specified key in this property list. If the key is not
* found in this property list, the default property list, and its defaults, recursively, are
* then checked. The method returns the default value argument if the property is not found.
*
* @param key the hashtable key.
* @param defaultValue a default value.
* @return the value in this property list with the specified key value parsed as a long.
*/
public long getLong(String key, long defaultValue) {
String argument = getProperty(key, null);
return argument == null ? defaultValue : Long.parseLong(argument);
} | 3.68 |
hudi_HoodieRealtimeInputFormatUtils_cleanProjectionColumnIds | /**
* Hive will append read columns' ids to old columns' ids during getRecordReader. In some cases, e.g. SELECT COUNT(*),
* the read columns' id is an empty string and Hive will combine it with Hoodie required projection ids and becomes
* e.g. ",2,0,3" and will cause an error. Actually this method is a temporary solution because the real bug is from
* Hive. Hive has fixed this bug after 3.0.0, but the version before that would still face this problem. (HIVE-22438)
*/
public static void cleanProjectionColumnIds(Configuration conf) {
String columnIds = conf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR);
if (!columnIds.isEmpty() && columnIds.charAt(0) == ',') {
conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, columnIds.substring(1));
if (LOG.isDebugEnabled()) {
LOG.debug("The projection Ids: {" + columnIds + "} start with ','. First comma is removed");
}
}
} | 3.68 |
hadoop_AbstractTask_getEnvironment | /**
* Get environment for a Task.
* @return environment of a Task
*/
@Override
public final Map<String, String> getEnvironment() {
return environment;
} | 3.68 |
hbase_MultiByteBuff_putInt | /**
* Writes an int to this MBB at its current position. Also advances the position by size of int
* @param val Int value to write
* @return this object
*/
@Override
public MultiByteBuff putInt(int val) {
checkRefCount();
if (this.curItem.remaining() >= Bytes.SIZEOF_INT) {
this.curItem.putInt(val);
return this;
}
if (this.curItemIndex == this.items.length - 1) {
throw new BufferOverflowException();
}
// During read, we will read as byte by byte for this case. So just write in Big endian
put(int3(val));
put(int2(val));
put(int1(val));
put(int0(val));
return this;
} | 3.68 |
hbase_RecoverableZooKeeper_exists | /**
* exists is an idempotent operation. Retry before throwing exception
* @return A Stat instance
*/
public Stat exists(String path, boolean watch) throws KeeperException, InterruptedException {
return exists(path, null, watch);
} | 3.68 |
hadoop_BytesWritable_compare | /**
* Compare the buffers in serialized form.
*/
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
return compareBytes(b1, s1 + LENGTH_BYTES, l1 - LENGTH_BYTES,
b2, s2 + LENGTH_BYTES, l2 - LENGTH_BYTES);
} | 3.68 |
hbase_ServerRpcController_failedOnException | /**
* Returns whether or not a server exception was generated in the prior RPC invocation.
*/
public boolean failedOnException() {
return serviceException != null;
} | 3.68 |
morf_DatabaseMetaDataProvider_views | /**
* @see org.alfasoftware.morf.metadata.Schema#views()
*/
@Override
public Collection<View> views() {
return viewNames.get().values().stream().map(RealName::getRealName).map(this::getView).collect(Collectors.toList());
} | 3.68 |
flink_LogicalTypeMerging_findSumAggType | /** Finds the result type of a decimal sum aggregation. */
public static LogicalType findSumAggType(LogicalType argType) {
// adopted from
// https://docs.microsoft.com/en-us/sql/t-sql/functions/sum-transact-sql
final LogicalType resultType;
if (argType.is(DECIMAL)) {
// a hack to make legacy types possible until we drop them
if (argType instanceof LegacyTypeInformationType) {
return argType;
}
resultType = new DecimalType(false, 38, getScale(argType));
} else {
resultType = argType;
}
return resultType.copy(argType.isNullable());
} | 3.68 |
hbase_ReplicationSourceWALReader_sizeOfStoreFilesIncludeBulkLoad | /**
* Calculate the total size of all the store files
* @param edit edit to count row keys from
* @return the total size of the store files
*/
private int sizeOfStoreFilesIncludeBulkLoad(WALEdit edit) {
List<Cell> cells = edit.getCells();
int totalStoreFilesSize = 0;
int totalCells = edit.size();
for (int i = 0; i < totalCells; i++) {
if (CellUtil.matchingQualifier(cells.get(i), WALEdit.BULK_LOAD)) {
try {
BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cells.get(i));
List<StoreDescriptor> stores = bld.getStoresList();
int totalStores = stores.size();
for (int j = 0; j < totalStores; j++) {
totalStoreFilesSize =
(int) (totalStoreFilesSize + stores.get(j).getStoreFileSizeBytes());
}
} catch (IOException e) {
LOG.error("Failed to deserialize bulk load entry from wal edit. "
+ "Size of HFiles part of cell will not be considered in replication "
+ "request size calculation.", e);
}
}
}
return totalStoreFilesSize;
} | 3.68 |
flink_CatalogDatabaseImpl_copy | /**
* Get a deep copy of the CatalogDatabase instance.
*
* @return a copy of CatalogDatabase instance
*/
public CatalogDatabase copy() {
return copy(getProperties());
} | 3.68 |
flink_StreamExecutionEnvironment_createInput | /**
* Generic method to create an input data stream with {@link
* org.apache.flink.api.common.io.InputFormat}.
*
* <p>The data stream is typed to the given TypeInformation. This method is intended for input
* formats where the return type cannot be determined by reflection analysis, and that do not
* implement the {@link org.apache.flink.api.java.typeutils.ResultTypeQueryable} interface.
*
* <p><b>NOTES ON CHECKPOINTING: </b> In the case of a {@link FileInputFormat}, the source
* (which executes the {@link ContinuousFileMonitoringFunction}) monitors the path, creates the
* {@link org.apache.flink.core.fs.FileInputSplit FileInputSplits} to be processed, forwards
* them to the downstream readers to read the actual data, and exits, without waiting for the
* readers to finish reading. This implies that no more checkpoint barriers are going to be
* forwarded after the source exits, thus having no checkpoints.
*
* @param inputFormat The input format used to create the data stream
* @param typeInfo The information about the type of the output type
* @param <OUT> The type of the returned data stream
* @return The data stream that represents the data created by the input format
*/
@PublicEvolving
public <OUT> DataStreamSource<OUT> createInput(
InputFormat<OUT, ?> inputFormat, TypeInformation<OUT> typeInfo) {
DataStreamSource<OUT> source;
if (inputFormat instanceof FileInputFormat) {
@SuppressWarnings("unchecked")
FileInputFormat<OUT> format = (FileInputFormat<OUT>) inputFormat;
source =
createFileInput(
format,
typeInfo,
"Custom File source",
FileProcessingMode.PROCESS_ONCE,
-1);
} else {
source = createInput(inputFormat, typeInfo, "Custom Source");
}
return source;
} | 3.68 |
flink_ModifyKindSet_union | /** Returns the union of a number of ModifyKindSets. */
public static ModifyKindSet union(ModifyKindSet... modifyKindSets) {
Builder builder = newBuilder();
for (ModifyKindSet set : modifyKindSets) {
for (ModifyKind kind : set.getContainedKinds()) {
builder.addContainedKind(kind);
}
}
return builder.build();
} | 3.68 |
framework_VaadinService_requestStart | /**
* Called before the framework starts handling a request.
*
* @param request
* The request
* @param response
* The response
*/
public void requestStart(VaadinRequest request, VaadinResponse response) {
if (!initialized) {
throw new IllegalStateException(
"Can not process requests before init() has been called");
}
setCurrentInstances(request, response);
request.setAttribute(REQUEST_START_TIME_ATTRIBUTE, System.nanoTime());
} | 3.68 |
flink_AbstractFsCheckpointStorageAccess_resolveCheckpointPointer | /**
* Takes the given string (representing a pointer to a checkpoint) and resolves it to a file
* status for the checkpoint's metadata file.
*
* @param checkpointPointer The pointer to resolve.
* @return A state handle to checkpoint/savepoint's metadata.
* @throws IOException Thrown, if the pointer cannot be resolved, the file system not accessed,
* or the pointer points to a location that does not seem to be a checkpoint/savepoint.
*/
@Internal
public static FsCompletedCheckpointStorageLocation resolveCheckpointPointer(
String checkpointPointer) throws IOException {
checkNotNull(checkpointPointer, "checkpointPointer");
checkArgument(!checkpointPointer.isEmpty(), "empty checkpoint pointer");
// check if the pointer is in fact a valid file path
final Path path;
try {
path = new Path(checkpointPointer);
} catch (Exception e) {
throw new IOException(
"Checkpoint/savepoint path '"
+ checkpointPointer
+ "' is not a valid file URI. "
+ "Either the pointer path is invalid, or the checkpoint was created by a different state backend.");
}
// check if the file system can be accessed
final FileSystem fs;
try {
fs = path.getFileSystem();
} catch (IOException e) {
throw new IOException(
"Cannot access file system for checkpoint/savepoint path '"
+ checkpointPointer
+ "'.",
e);
}
final FileStatus status;
try {
status = fs.getFileStatus(path);
} catch (FileNotFoundException e) {
throw new FileNotFoundException(
"Cannot find checkpoint or savepoint "
+ "file/directory '"
+ checkpointPointer
+ "' on file system '"
+ fs.getUri().getScheme()
+ "'.");
}
// if we are here, the file / directory exists
final Path checkpointDir;
final FileStatus metadataFileStatus;
// If this is a directory, we need to find the meta data file
if (status.isDir()) {
checkpointDir = status.getPath();
final Path metadataFilePath = new Path(path, METADATA_FILE_NAME);
try {
metadataFileStatus = fs.getFileStatus(metadataFilePath);
} catch (FileNotFoundException e) {
throw new FileNotFoundException(
"Cannot find meta data file '"
+ METADATA_FILE_NAME
+ "' in directory '"
+ path
+ "'. Please try to load the checkpoint/savepoint "
+ "directly from the metadata file instead of the directory.");
}
} else {
// this points to a file and we either do no name validation, or
// the name is actually correct, so we can return the path
metadataFileStatus = status;
checkpointDir = status.getPath().getParent();
}
final FileStateHandle metaDataFileHandle =
new FileStateHandle(metadataFileStatus.getPath(), metadataFileStatus.getLen());
final String pointer = checkpointDir.makeQualified(fs).toString();
return new FsCompletedCheckpointStorageLocation(
fs, checkpointDir, metaDataFileHandle, pointer);
} | 3.68 |
hadoop_ColumnRWHelper_readResults | /**
* @param <K> identifies the type of column name(indicated by type of key
* converter).
* @param result from which to read columns
* @param columnPrefixBytes optional prefix to limit columns. If null all
* columns are returned.
* @param keyConverter used to convert column bytes to the appropriate key
* type.
* @return the latest values of columns in the column family. If the column
* prefix is null, the column qualifier is returned as Strings. For a
* non-null column prefix bytes, the column qualifier is returned as
* a list of parts, each part a byte[]. This is to facilitate
* returning byte arrays of values that were not Strings.
* @throws IOException if any problem occurs while reading results.
*/
public static <K> Map<K, Object> readResults(Result result,
byte[] columnFamilyBytes, byte[] columnPrefixBytes,
KeyConverter<K> keyConverter, ValueConverter valueConverter)
throws IOException {
Map<K, Object> results = new HashMap<K, Object>();
if (result != null) {
Map<byte[], byte[]> columns = result.getFamilyMap(columnFamilyBytes);
for (Map.Entry<byte[], byte[]> entry : columns.entrySet()) {
byte[] columnKey = entry.getKey();
if (columnKey != null && columnKey.length > 0) {
K converterColumnKey = null;
if (columnPrefixBytes == null) {
try {
converterColumnKey = keyConverter.decode(columnKey);
} catch (IllegalArgumentException iae) {
LOG.error("Illegal column found, skipping this column.", iae);
continue;
}
} else {
// A non-null prefix means columns are actually of the form
// prefix!columnNameRemainder
byte[][] columnNameParts = Separator.QUALIFIERS.split(columnKey, 2);
if (columnNameParts.length > 0) {
byte[] actualColumnPrefixBytes = columnNameParts[0];
// If this is the prefix that we want
if (Bytes.equals(columnPrefixBytes, actualColumnPrefixBytes)
&& columnNameParts.length == 2) {
try {
converterColumnKey = keyConverter.decode(columnNameParts[1]);
} catch (IllegalArgumentException iae) {
LOG.error("Illegal column found, skipping this column.", iae);
continue;
}
}
}
} // if-else
// If the columnPrefix is null (we want all columns), or the actual
// prefix matches the given prefix we want this column
if (converterColumnKey != null) {
Object value = valueConverter.decodeValue(entry.getValue());
// we return the columnQualifier in parts since we don't know
// which part is of which data type.
results.put(converterColumnKey, value);
}
}
} // for entry
}
return results;
} | 3.68 |
flink_CatalogManager_setCurrentDatabase | /**
* Sets the current database name that will be used when resolving a table path. The database
* has to exist in the current catalog.
*
* @param databaseName database name to set as current database name
* @throws CatalogException thrown if the database doesn't exist in the current catalog
* @see CatalogManager#qualifyIdentifier(UnresolvedIdentifier)
* @see CatalogManager#setCurrentCatalog(String)
*/
public void setCurrentDatabase(@Nullable String databaseName) {
if (databaseName == null) {
this.currentDatabaseName = null;
return;
}
checkArgument(
!StringUtils.isNullOrWhitespaceOnly(databaseName),
"The database name cannot be empty.");
if (currentCatalogName == null) {
throw new CatalogException("Current catalog has not been set.");
}
if (!getCatalogOrThrowException(currentCatalogName).databaseExists(databaseName)) {
throw new CatalogException(
format(
"A database with name [%s] does not exist in the catalog: [%s].",
databaseName, currentCatalogName));
}
if (!databaseName.equals(currentDatabaseName)) {
currentDatabaseName = databaseName;
LOG.info(
"Set the current default database as [{}] in the current default catalog [{}].",
currentDatabaseName,
currentCatalogName);
}
} | 3.68 |
framework_LayoutManager_getInnerHeight | /**
* Gets the inner height (excluding margins, paddings and borders) of the
* given element, provided that it has been measured. These elements are
* guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* -1 is returned if the element has not been measured. If 0 is returned, it
* might indicate that the element is not attached to the DOM.
* <p>
* The value returned by this method is always rounded up. To get the exact
* outer width, use {@link #getInnerHeightDouble(Element)}
*
* @param element
* the element to get the measured size for
* @return the measured inner height (excluding margins, paddings and
* borders) of the element in pixels.
*/
public final int getInnerHeight(Element element) {
assert needsMeasure(
element) : "Getting measurement for element that is not measured";
return (int) Math
.ceil(getMeasuredSize(element, nullSize).getInnerHeight());
} | 3.68 |
hbase_ZKWatcher_isSuperUserId | /*
* Validate whether ACL ID is superuser.
*/
public static boolean isSuperUserId(String[] superUsers, Id id) {
for (String user : superUsers) {
// TODO: Validate super group members also when ZK supports setting node ACL for groups.
if (!AuthUtil.isGroupPrincipal(user) && new Id("sasl", user).equals(id)) {
return true;
}
}
return false;
} | 3.68 |
flink_StateMapSnapshot_isOwner | /** Returns true iff the given state map is the owner of this snapshot object. */
public boolean isOwner(T stateMap) {
return owningStateMap == stateMap;
} | 3.68 |
flink_AsynchronousFileIOChannel_close | /**
* Closes the channel and waits until all pending asynchronous requests are processed. The
* underlying <code>FileChannel</code> is closed even if an exception interrupts the closing.
*
* <p><strong>Important:</strong> the {@link #isClosed()} method returns <code>true</code>
* immediately after this method has been called even when there are outstanding requests.
*
* @throws IOException Thrown, if an I/O exception occurred while waiting for the buffers, or if
* the closing was interrupted.
*/
@Override
public void close() throws IOException {
// atomically set the close flag
synchronized (this.closeLock) {
if (this.closed) {
return;
}
this.closed = true;
try {
// wait until as many buffers have been returned as were written
// only then is everything guaranteed to be consistent.
while (this.requestsNotReturned.get() > 0) {
try {
// we add a timeout here, because it is not guaranteed that the
// decrementing during buffer return and the check here are deadlock free.
// the deadlock situation is however unlikely and caught by the timeout
this.closeLock.wait(1000);
checkErroneous();
} catch (InterruptedException iex) {
throw new IOException(
"Closing of asynchronous file channel was interrupted.");
}
}
// Additional check because we might have skipped the while loop
checkErroneous();
} finally {
// close the file
if (this.fileChannel.isOpen()) {
this.fileChannel.close();
}
}
}
} | 3.68 |
flink_ExecutionConfigAccessor_fromConfiguration | /** Creates an {@link ExecutionConfigAccessor} based on the provided {@link Configuration}. */
public static ExecutionConfigAccessor fromConfiguration(final Configuration configuration) {
return new ExecutionConfigAccessor(checkNotNull(configuration));
} | 3.68 |
hudi_BaseHoodieWriteClient_startCommitWithTime | /**
* Completes a new commit time for a write operation (insert/update/delete) with specified action.
*/
private void startCommitWithTime(String instantTime, String actionType, HoodieTableMetaClient metaClient) {
CleanerUtils.rollbackFailedWrites(config.getFailedWritesCleanPolicy(),
HoodieTimeline.COMMIT_ACTION, () -> tableServiceClient.rollbackFailedWrites());
startCommit(instantTime, actionType, metaClient);
} | 3.68 |
rocketmq-connect_MemoryConfigManagementServiceImpl_getConnectorConfigs | /**
* get all connector configs enabled
*
* @return
*/
@Override
public Map<String, ConnectKeyValue> getConnectorConfigs() {
return connectorKeyValueStore.getKVMap();
} | 3.68 |
hbase_JenkinsHash_main | /**
* Compute the hash of the specified file
* @param args name of file to compute hash of.
* @throws IOException e
*/
public static void main(String[] args) throws IOException {
if (args.length != 1) {
System.err.println("Usage: JenkinsHash filename");
System.exit(-1);
}
FileInputStream in = new FileInputStream(args[0]);
byte[] bytes = new byte[512];
int value = 0;
JenkinsHash hash = new JenkinsHash();
try {
for (int length = in.read(bytes); length > 0; length = in.read(bytes)) {
value = hash.hash(new ByteArrayHashKey(bytes, 0, length), value);
}
} finally {
in.close();
}
System.out.println(Math.abs(value));
} | 3.68 |
hadoop_AbfsInputStream_getStreamStatistics | /**
* Getter for AbfsInputStreamStatistics.
*
* @return an instance of AbfsInputStreamStatistics.
*/
@VisibleForTesting
public AbfsInputStreamStatistics getStreamStatistics() {
return streamStatistics;
} | 3.68 |
pulsar_ManagedLedgerImpl_getLastPositionAndCounter | /**
* Get the last position written in the managed ledger, alongside with the associated counter.
*/
Pair<PositionImpl, Long> getLastPositionAndCounter() {
PositionImpl pos;
long count;
do {
pos = lastConfirmedEntry;
count = ENTRIES_ADDED_COUNTER_UPDATER.get(this);
// Ensure no entry was written while reading the two values
} while (pos.compareTo(lastConfirmedEntry) != 0);
return Pair.of(pos, count);
} | 3.68 |
flink_BinaryStringDataUtil_toLong | /**
* Parses this BinaryStringData to Long.
*
* <p>Note that, in this method we accumulate the result in negative format, and convert it to
* positive format at the end, if this string is not started with '-'. This is because min value
* is bigger than max value in digits, e.g. Long.MAX_VALUE is '9223372036854775807' and
* Long.MIN_VALUE is '-9223372036854775808'.
*
* <p>This code is mostly copied from LazyLong.parseLong in Hive.
*/
public static long toLong(BinaryStringData str) throws NumberFormatException {
int sizeInBytes = str.getSizeInBytes();
byte[] tmpBytes = getTmpBytes(str, sizeInBytes);
if (sizeInBytes == 0) {
throw numberFormatExceptionFor(str, "Input is empty.");
}
int i = 0;
byte b = tmpBytes[i];
final boolean negative = b == '-';
if (negative || b == '+') {
i++;
if (sizeInBytes == 1) {
throw numberFormatExceptionFor(str, "Input has only positive or negative symbol.");
}
}
long result = 0;
final byte separator = '.';
final int radix = 10;
final long stopValue = Long.MIN_VALUE / radix;
while (i < sizeInBytes) {
b = tmpBytes[i];
i++;
if (b == separator) {
// We allow decimals and will return a truncated integral in that case.
// Therefore we won't throw an exception here (checking the fractional
// part happens below.)
break;
}
int digit;
if (b >= '0' && b <= '9') {
digit = b - '0';
} else {
throw numberFormatExceptionFor(str, "Invalid character found.");
}
// We are going to process the new digit and accumulate the result. However, before
// doing this, if the result is already smaller than the
// stopValue(Long.MIN_VALUE / radix), then result * 10 will definitely be smaller
// than minValue, and we can stop.
if (result < stopValue) {
throw numberFormatExceptionFor(str, "Overflow.");
}
result = result * radix - digit;
// Since the previous result is less than or equal to
// stopValue(Long.MIN_VALUE / radix), we can just use `result > 0` to check overflow.
// If result overflows, we should stop.
if (result > 0) {
throw numberFormatExceptionFor(str, "Overflow.");
}
}
// This is the case when we've encountered a decimal separator. The fractional
// part will not change the number, but we will verify that the fractional part
// is well formed.
while (i < sizeInBytes) {
byte currentByte = tmpBytes[i];
if (currentByte < '0' || currentByte > '9') {
throw numberFormatExceptionFor(str, "Invalid character found.");
}
i++;
}
if (!negative) {
result = -result;
if (result < 0) {
throw numberFormatExceptionFor(str, "Overflow.");
}
}
return result;
} | 3.68 |
framework_WindowWaiAriaRoles_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 14289;
} | 3.68 |
hbase_AbstractMemStore_getNextRow | /*
* @param key Find row that follows this one. If null, return first.
* @param set Set to look in for a row beyond <code>row</code>.
* @return Next row or null if none found. If one found, will be a new KeyValue -- can be
* destroyed by subsequent calls to this method.
*/
protected Cell getNextRow(final Cell key, final NavigableSet<Cell> set) {
Cell result = null;
SortedSet<Cell> tail = key == null ? set : set.tailSet(key);
// Iterate until we fall into the next row; i.e. move off current row
for (Cell cell : tail) {
if (comparator.compareRows(cell, key) <= 0) {
continue;
}
// Note: Not suppressing deletes or expired cells. Needs to be handled
// by higher up functions.
result = cell;
break;
}
return result;
} | 3.68 |
flink_WrappingRuntimeException_wrapIfNecessary | /**
* Ensures that any throwable can be thrown as a checked exception by potentially wrapping it.
*
* @return a runtime exception wrapping the throwable if checked or by returning the throwable
* if it's a runtime exception.
*/
public static RuntimeException wrapIfNecessary(Throwable throwable) {
if (throwable instanceof RuntimeException) {
return (RuntimeException) throwable;
}
return new WrappingRuntimeException(throwable);
} | 3.68 |
hbase_PrivateCellUtil_writeCellToBuffer | /**
* Writes a cell to the buffer at the given offset
* @param cell the cell to be written
* @param buf the buffer to which the cell has to be wrriten
* @param offset the offset at which the cell should be written
*/
public static void writeCellToBuffer(Cell cell, ByteBuffer buf, int offset) {
if (cell instanceof ExtendedCell) {
((ExtendedCell) cell).write(buf, offset);
} else {
// Using the KVUtil
byte[] bytes = KeyValueUtil.copyToNewByteArray(cell);
ByteBufferUtils.copyFromArrayToBuffer(buf, offset, bytes, 0, bytes.length);
}
} | 3.68 |
morf_SelectFirstStatement_asField | /**
* @see org.alfasoftware.morf.sql.AbstractSelectStatement#asField()
*/
@Override
public AliasedField asField() {
return new FieldFromSelectFirst(this);
} | 3.68 |
hbase_ByteBufferUtils_writeVLong | /**
* Similar to {@link WritableUtils#writeVLong(java.io.DataOutput, long)}, but writes to a
* {@link ByteBuffer}.
*/
public static void writeVLong(ByteBuffer out, long i) {
if (i >= -112 && i <= 127) {
out.put((byte) i);
return;
}
int len = -112;
if (i < 0) {
i ^= -1L; // take one's complement
len = -120;
}
long tmp = i;
while (tmp != 0) {
tmp = tmp >> 8;
len--;
}
out.put((byte) len);
len = (len < -120) ? -(len + 120) : -(len + 112);
for (int idx = len; idx != 0; idx--) {
int shiftbits = (idx - 1) * 8;
long mask = 0xFFL << shiftbits;
out.put((byte) ((i & mask) >> shiftbits));
}
} | 3.68 |
streampipes_Options_from | /**
* Creates a new list of options by using the provided string values.
*
* @param optionLabel An arbitrary number of option labels.
* @return
*/
public static List<Option> from(String... optionLabel) {
return Arrays.stream(optionLabel).map(Option::new).collect(Collectors.toList());
} | 3.68 |
hbase_TableState_isEnabling | /** Returns True if table is {@link State#ENABLING}. */
public boolean isEnabling() {
return isInStates(State.ENABLING);
} | 3.68 |
pulsar_AuthorizationService_canProduceAsync | /**
* Check if the specified role has permission to send messages to the specified fully qualified topic name.
*
* @param topicName
* the fully qualified topic name associated with the topic.
* @param role
* the app id used to send messages to the topic.
*/
public CompletableFuture<Boolean> canProduceAsync(TopicName topicName, String role,
AuthenticationDataSource authenticationData) {
if (!this.conf.isAuthorizationEnabled()) {
return CompletableFuture.completedFuture(true);
}
return provider.isSuperUser(role, authenticationData, conf).thenComposeAsync(isSuperUser -> {
if (isSuperUser) {
return CompletableFuture.completedFuture(true);
} else {
return provider.canProduceAsync(topicName, role, authenticationData);
}
});
} | 3.68 |
framework_MouseEventDetailsBuilder_buildMouseEventDetails | /**
* Construct a {@link MouseEventDetails} object from the given event.
*
* @param evt
* The event to use as a source for the details
* @param relativeToElement
* The element whose position
* {@link MouseEventDetails#getRelativeX()} and
* {@link MouseEventDetails#getRelativeY()} are relative to.
* @return a MouseEventDetails containing information from the event
*/
public static MouseEventDetails buildMouseEventDetails(NativeEvent evt,
Element relativeToElement) {
MouseEventDetails mouseEventDetails = new MouseEventDetails();
mouseEventDetails.setType(Event.getTypeInt(evt.getType()));
mouseEventDetails.setClientX(WidgetUtil.getTouchOrMouseClientX(evt));
mouseEventDetails.setClientY(WidgetUtil.getTouchOrMouseClientY(evt));
if (evt.getButton() == NativeEvent.BUTTON_LEFT) {
mouseEventDetails.setButton(MouseButton.LEFT);
} else if (evt.getButton() == NativeEvent.BUTTON_RIGHT) {
mouseEventDetails.setButton(MouseButton.RIGHT);
} else if (evt.getButton() == NativeEvent.BUTTON_MIDDLE) {
mouseEventDetails.setButton(MouseButton.MIDDLE);
} else {
// No button reported? Assume left.
mouseEventDetails.setButton(MouseButton.LEFT);
}
mouseEventDetails.setAltKey(evt.getAltKey());
mouseEventDetails.setCtrlKey(evt.getCtrlKey());
mouseEventDetails.setMetaKey(evt.getMetaKey());
mouseEventDetails.setShiftKey(evt.getShiftKey());
if (relativeToElement != null) {
mouseEventDetails.setRelativeX(getRelativeX(
mouseEventDetails.getClientX(), relativeToElement));
mouseEventDetails.setRelativeY(getRelativeY(
mouseEventDetails.getClientY(), relativeToElement));
}
return mouseEventDetails;
} | 3.68 |
pulsar_TopicList_minus | // get topics, which are contained in list1, and not in list2
public static Set<String> minus(Collection<String> list1, Collection<String> list2) {
HashSet<String> s1 = new HashSet<>(list1);
s1.removeAll(list2);
return s1;
} | 3.68 |
rocketmq-connect_AbstractStateManagementService_connectors | /**
* Get all cached connectors.
*
* @return the set of connector names
*/
@Override
public Set<String> connectors() {
return new HashSet<>(connAndTaskStatus.getConnectors().keySet());
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.