name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_HelloHBase_namespaceExists | /**
* Checks to see whether a namespace exists.
* @param admin Standard Admin object
* @param namespaceName Name of namespace
* @return true If namespace exists
* @throws IOException If IO problem encountered
*/
static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException {
try {
admin.getNamespaceDescriptor(namespaceName);
} catch (NamespaceNotFoundException e) {
return false;
}
return true;
} | 3.68 |
framework_VCalendarPanel_getForwardKey | /**
* The key that selects the next day in the calendar. By default this is the
* right arrow key but by overriding this method it can be changed to
* whatever you like.
*
* @return
*/
protected int getForwardKey() {
return KeyCodes.KEY_RIGHT;
} | 3.68 |
hudi_HoodieRepairTool_undoRepair | /**
* Undoes repair for UNDO mode.
*
* @throws IOException upon errors.
*/
boolean undoRepair() throws IOException {
FileSystem fs = metaClient.getFs();
String backupPathStr = cfg.backupPath;
Path backupPath = new Path(backupPathStr);
if (!fs.exists(backupPath)) {
LOG.error("Cannot find backup path: " + backupPath);
return false;
}
List<String> allPartitionPaths = tableMetadata.getAllPartitionPaths();
if (allPartitionPaths.isEmpty()) {
LOG.error("Cannot get one partition path since there is no partition available");
return false;
}
int partitionLevels = getExpectedLevelBasedOnPartitionPath(allPartitionPaths.get(0));
List<String> relativeFilePaths = listFilesFromBasePath(
context, backupPathStr, partitionLevels, cfg.parallelism).stream()
.map(filePath ->
FSUtils.getRelativePartitionPath(new Path(backupPathStr), new Path(filePath)))
.collect(Collectors.toList());
return restoreFiles(relativeFilePaths);
} | 3.68 |
flink_LogicalTypeMerging_adjustPrecisionScale | /**
* Scale adjustment implementation is inspired to SQLServer's one. In particular, when a result
* precision is greater than MAX_PRECISION, the corresponding scale is reduced to prevent the
* integral part of a result from being truncated.
*
* <p>https://docs.microsoft.com/en-us/sql/t-sql/data-types/precision-scale-and-length-transact-sql
*
* <p>The rules (although inspired by SQL Server) are not followed 100%, instead the approach of
* Spark/Hive is followed for adjusting the precision.
*
* <p>http://www.openkb.info/2021/05/understand-decimal-precision-and-scale.html
*
* <p>For (38, 8) + (32, 8) -> (39, 8) (The rules for addition, initially calculate a decimal
* type, assuming its precision is infinite) results in a decimal with integral part of 31
* digits.
*
* <p>This method is called subsequently to adjust the resulting decimal since the maximum
* allowed precision is 38 (so far a precision of 39 is calculated in the first step). So, the
* rounding for SQL Server would be: (39, 8) -> (38, 8) // integral part: 30, but instead we
* follow the Hive/Spark approach which gives: (39, 8) -> (38, 7) // integral part: 31
*/
private static DecimalType adjustPrecisionScale(int precision, int scale) {
if (precision <= DecimalType.MAX_PRECISION) {
// Adjustment only needed when we exceed max precision
return new DecimalType(false, precision, scale);
} else {
int digitPart = precision - scale;
// If original scale is less than MINIMUM_ADJUSTED_SCALE, use original scale value;
// otherwise preserve at least MINIMUM_ADJUSTED_SCALE fractional digits
int minScalePart = Math.min(scale, MINIMUM_ADJUSTED_SCALE);
int adjustScale = Math.max(DecimalType.MAX_PRECISION - digitPart, minScalePart);
return new DecimalType(false, DecimalType.MAX_PRECISION, adjustScale);
}
} | 3.68 |
zilla_HpackContext_staticIndex6 | // Index in static table for the given name of length 6
private static int staticIndex6(DirectBuffer name)
{
switch (name.getByte(5))
{
case 'e':
if (STATIC_TABLE[32].name.equals(name)) // cookie
{
return 32;
}
break;
case 'r':
if (STATIC_TABLE[54].name.equals(name)) // server
{
return 54;
}
break;
case 't':
if (STATIC_TABLE[19].name.equals(name)) // accept
{
return 19;
}
if (STATIC_TABLE[35].name.equals(name)) // expect
{
return 35;
}
break;
}
return -1;
} | 3.68 |
hudi_HoodieTableMetadataUtil_convertMetadataToBloomFilterRecords | /**
* Convert clean metadata to bloom filter index records.
*
* @param cleanMetadata - Clean action metadata
* @param engineContext - Engine context
* @param instantTime - Clean action instant time
* @param recordsGenerationParams - Parameters for bloom filter record generation
* @return List of bloom filter index records for the clean metadata
*/
public static HoodieData<HoodieRecord> convertMetadataToBloomFilterRecords(HoodieCleanMetadata cleanMetadata,
HoodieEngineContext engineContext,
String instantTime,
MetadataRecordsGenerationParams recordsGenerationParams) {
List<Pair<String, String>> deleteFileList = new ArrayList<>();
cleanMetadata.getPartitionMetadata().forEach((partition, partitionMetadata) -> {
// Files deleted from a partition
List<String> deletedFiles = partitionMetadata.getDeletePathPatterns();
deletedFiles.forEach(entry -> {
final Path deletedFilePath = new Path(entry);
if (FSUtils.isBaseFile(deletedFilePath)) {
deleteFileList.add(Pair.of(partition, deletedFilePath.getName()));
}
});
});
final int parallelism = Math.max(Math.min(deleteFileList.size(), recordsGenerationParams.getBloomIndexParallelism()), 1);
HoodieData<Pair<String, String>> deleteFileListRDD = engineContext.parallelize(deleteFileList, parallelism);
return deleteFileListRDD.map(deleteFileInfoPair -> HoodieMetadataPayload.createBloomFilterMetadataRecord(
deleteFileInfoPair.getLeft(), deleteFileInfoPair.getRight(), instantTime, StringUtils.EMPTY_STRING,
ByteBuffer.allocate(0), true));
} | 3.68 |
morf_SqlDialect_appendAlias | /**
* appends alias to the result
*
* @param result alias will be appended to this
* @param currentField field to be aliased
*/
protected void appendAlias(StringBuilder result, AliasedField currentField) {
if (!StringUtils.isBlank(currentField.getAlias())) {
result.append(String.format(" AS %s", currentField.getAlias()));
}
} | 3.68 |
morf_WhenCondition_drive | /**
* @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser)
*/
@Override
public void drive(ObjectTreeTraverser traverser) {
traverser
.dispatch(getCriterion())
.dispatch(getValue());
} | 3.68 |
dubbo_AbstractReferenceConfig_isInjvm | /**
* @return
* @deprecated instead, use the parameter <b>scope</> to judge if it's in jvm, scope=local
*/
@Deprecated
public Boolean isInjvm() {
return injvm;
} | 3.68 |
morf_XmlDataSetProducer_getTable | /**
* @see org.alfasoftware.morf.metadata.Schema#getTable(java.lang.String)
*/
@Override
public Table getTable(String name) {
// Read the meta data for the specified table
InputStream inputStream = xmlStreamProvider.openInputStreamForTable(name);
try {
XMLStreamReader xmlStreamReader = openPullParser(inputStream);
XmlPullProcessor.readTag(xmlStreamReader, XmlDataSetNode.TABLE_NODE);
String version = xmlStreamReader.getAttributeValue(XmlDataSetNode.URI, XmlDataSetNode.VERSION_ATTRIBUTE);
if (StringUtils.isNotEmpty(version)) {
return new PullProcessorTableMetaData(xmlStreamReader, Integer.parseInt(version));
} else {
return new PullProcessorTableMetaData(xmlStreamReader, 1);
}
} finally {
// abandon any remaining content
Closeables.closeQuietly(inputStream);
}
} | 3.68 |
morf_MySqlDialect_getDatabaseType | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getDatabaseType()
*/
@Override
public DatabaseType getDatabaseType() {
return DatabaseType.Registry.findByIdentifier(MySql.IDENTIFIER);
} | 3.68 |
hbase_AbstractFSWAL_cleanOldLogs | /**
* Archive old logs. A WAL is eligible for archiving if all its WALEdits have been flushed.
* <p/>
* Use synchronized because we may call this method in different threads, normally when replacing
* writer, and since now close writer may be asynchronous, we will also call this method in the
* closeExecutor, right after we actually close a WAL writer.
*/
private synchronized void cleanOldLogs() {
List<Pair<Path, Long>> logsToArchive = null;
long now = System.nanoTime();
boolean mayLogTooOld = nextLogTooOldNs <= now;
ArrayList<byte[]> regionsBlockingWal = null;
// For each log file, look at its Map of regions to highest sequence id; if all sequence ids
// are older than what is currently in memory, the WAL can be GC'd.
for (Map.Entry<Path, WALProps> e : this.walFile2Props.entrySet()) {
if (!e.getValue().closed) {
LOG.debug("{} is not closed yet, will try archiving it next time", e.getKey());
continue;
}
Path log = e.getKey();
ArrayList<byte[]> regionsBlockingThisWal = null;
long ageNs = now - e.getValue().rollTimeNs;
if (ageNs > walTooOldNs) {
if (mayLogTooOld && regionsBlockingWal == null) {
regionsBlockingWal = new ArrayList<>();
}
regionsBlockingThisWal = regionsBlockingWal;
}
Map<byte[], Long> sequenceNums = e.getValue().encodedName2HighestSequenceId;
if (this.sequenceIdAccounting.areAllLower(sequenceNums, regionsBlockingThisWal)) {
if (logsToArchive == null) {
logsToArchive = new ArrayList<>();
}
logsToArchive.add(Pair.newPair(log, e.getValue().logSize));
if (LOG.isTraceEnabled()) {
LOG.trace("WAL file ready for archiving " + log);
}
} else if (regionsBlockingThisWal != null) {
StringBuilder sb = new StringBuilder(log.toString()).append(" has not been archived for ")
.append(TimeUnit.NANOSECONDS.toSeconds(ageNs)).append(" seconds; blocked by: ");
boolean isFirst = true;
for (byte[] region : regionsBlockingThisWal) {
if (!isFirst) {
sb.append("; ");
}
isFirst = false;
sb.append(Bytes.toString(region));
}
LOG.warn(sb.toString());
nextLogTooOldNs = now + SURVIVED_TOO_LONG_LOG_INTERVAL_NS;
regionsBlockingThisWal.clear();
}
}
if (logsToArchive != null) {
final List<Pair<Path, Long>> localLogsToArchive = logsToArchive;
// make it async
for (Pair<Path, Long> log : localLogsToArchive) {
logArchiveExecutor.execute(() -> {
archive(log);
});
this.walFile2Props.remove(log.getFirst());
}
}
} | 3.68 |
pulsar_KeyValueSchemaInfo_decodeKeyValueEncodingType | /**
* Decode the kv encoding type from the schema info.
*
* @param schemaInfo the schema info
* @return the kv encoding type
*/
public static KeyValueEncodingType decodeKeyValueEncodingType(SchemaInfo schemaInfo) {
checkArgument(SchemaType.KEY_VALUE == schemaInfo.getType(),
"Not a KeyValue schema");
String encodingTypeStr = schemaInfo.getProperties().get(KV_ENCODING_TYPE);
if (StringUtils.isEmpty(encodingTypeStr)) {
return KeyValueEncodingType.INLINE;
} else {
return KeyValueEncodingType.valueOf(encodingTypeStr);
}
} | 3.68 |
querydsl_BooleanExpression_not | /**
* Create a {@code !this} expression
*
* <p>Returns a negation of this boolean expression</p>
*
* @return !this
*/
@Override
public BooleanExpression not() {
if (not == null) {
// uses this, because it makes unwrapping easier
not = Expressions.booleanOperation(Ops.NOT, this);
}
return not;
} | 3.68 |
framework_VTabsheet_onFocus | /**
* Delegate method for the onFocus event occurring on Tab.
*
* @since 7.2.6
* @param newFocusTab
* the new focused tab.
* @see #onBlur(Tab)
*/
public void onFocus(Tab newFocusTab) {
if (connector.hasEventListener(EventId.FOCUS)) {
// Send the focus event only first time when we focus on any
// tab. The focused tab will be reseted on the last blur.
if (focusedTab == null) {
connector.getRpcProxy(FocusAndBlurServerRpc.class).focus();
}
}
cancelLastBlurSchedule();
setFocusedTab(newFocusTab);
} | 3.68 |
hbase_RegionServerObserver_preStopRegionServer | /**
* Called before stopping region server.
* @param ctx the environment to interact with the framework and region server.
*/
default void preStopRegionServer(final ObserverContext<RegionServerCoprocessorEnvironment> ctx)
throws IOException {
} | 3.68 |
flink_ThreadInfoSample_from | /**
* Constructs a collection of {@link ThreadInfoSample}s from a collection of {@link ThreadInfo}
* samples.
*
* @param threadInfos the collection of {@link ThreadInfo}.
* @return the collection of the corresponding {@link ThreadInfoSample}s.
*/
public static Map<Long, ThreadInfoSample> from(Collection<ThreadInfo> threadInfos) {
return threadInfos.stream()
.collect(
Collectors.toMap(
ThreadInfo::getThreadId,
threadInfo ->
new ThreadInfoSample(
threadInfo.getThreadState(),
threadInfo.getStackTrace())));
} | 3.68 |
hadoop_RawErasureDecoder_preferDirectBuffer | /**
* Tell if direct buffer is preferred or not. It's for callers to
* decide how to allocate coding chunk buffers, using DirectByteBuffer or
* bytes array. It will return false by default.
* @return true if native buffer is preferred for performance consideration,
* otherwise false.
*/
public boolean preferDirectBuffer() {
return false;
} | 3.68 |
graphhopper_GraphHopper_setCountryRuleFactory | /**
* Sets the factory used to create country rules. Use `null` to disable country rules
*/
public GraphHopper setCountryRuleFactory(CountryRuleFactory countryRuleFactory) {
this.countryRuleFactory = countryRuleFactory;
return this;
} | 3.68 |
hadoop_TimelineEntityType_matches | /**
* Whether the type of this entity matches the type indicated by the input
* argument.
*
* @param typeString entity type represented as a string.
* @return true, if string representation of this entity type matches the
* entity type passed.
*/
public boolean matches(String typeString) {
return toString().equals(typeString);
} | 3.68 |
querydsl_CollQueryFactory_delete | /**
* Create a new delete clause
*
* @param path source expression
* @param col source collection
* @return delete clause
*/
public static <A> CollDeleteClause<A> delete(Path<A> path, Collection<A> col) {
return new CollDeleteClause<A>(path, col);
} | 3.68 |
flink_BashJavaUtils_getTmResourceParams | /**
* Generate and print JVM parameters and dynamic configs of task executor resources. The last
* two lines of the output should be JVM parameters and dynamic configs respectively.
*/
private static List<String> getTmResourceParams(Configuration configuration) {
Configuration configurationWithFallback =
TaskExecutorProcessUtils.getConfigurationMapLegacyTaskManagerHeapSizeToConfigOption(
configuration, TaskManagerOptions.TOTAL_FLINK_MEMORY);
TaskExecutorProcessSpec taskExecutorProcessSpec =
TaskExecutorProcessUtils.processSpecFromConfig(configurationWithFallback);
logTaskExecutorConfiguration(taskExecutorProcessSpec);
return Arrays.asList(
ProcessMemoryUtils.generateJvmParametersStr(taskExecutorProcessSpec),
TaskExecutorProcessUtils.generateDynamicConfigsStr(taskExecutorProcessSpec));
} | 3.68 |
MagicPlugin_ColorHD_convertHSBtoRGB | // Borrowed from Sun AWT Color class
public static int[] convertHSBtoRGB(float hue, float saturation, float brightness) {
int r = 0;
int g = 0;
int b = 0;
if (saturation == 0) {
r = g = b = (int) (brightness * 255.0f + 0.5f);
} else {
float h = (hue - (float)Math.floor(hue)) * 6.0f;
float f = h - (float)java.lang.Math.floor(h);
float p = brightness * (1.0f - saturation);
float q = brightness * (1.0f - saturation * f);
float t = brightness * (1.0f - (saturation * (1.0f - f)));
switch ((int) h) {
case 0:
r = (int) (brightness * 255.0f + 0.5f);
g = (int) (t * 255.0f + 0.5f);
b = (int) (p * 255.0f + 0.5f);
break;
case 1:
r = (int) (q * 255.0f + 0.5f);
g = (int) (brightness * 255.0f + 0.5f);
b = (int) (p * 255.0f + 0.5f);
break;
case 2:
r = (int) (p * 255.0f + 0.5f);
g = (int) (brightness * 255.0f + 0.5f);
b = (int) (t * 255.0f + 0.5f);
break;
case 3:
r = (int) (p * 255.0f + 0.5f);
g = (int) (q * 255.0f + 0.5f);
b = (int) (brightness * 255.0f + 0.5f);
break;
case 4:
r = (int) (t * 255.0f + 0.5f);
g = (int) (p * 255.0f + 0.5f);
b = (int) (brightness * 255.0f + 0.5f);
break;
case 5:
r = (int) (brightness * 255.0f + 0.5f);
g = (int) (p * 255.0f + 0.5f);
b = (int) (q * 255.0f + 0.5f);
break;
}
}
components[0] = r;
components[1] = g;
components[2] = b;
return components;
} | 3.68 |
hbase_ResponseConverter_getCheckAndMutateResult | /**
* Create a CheckAndMutateResult object from a protocol buffer MutateResponse
* @return a CheckAndMutateResult object
*/
public static CheckAndMutateResult getCheckAndMutateResult(
ClientProtos.MutateResponse mutateResponse, CellScanner cells) throws IOException {
boolean success = mutateResponse.getProcessed();
Result result = null;
if (mutateResponse.hasResult()) {
result = ProtobufUtil.toResult(mutateResponse.getResult(), cells);
}
return new CheckAndMutateResult(success, result);
} | 3.68 |
framework_VAbsoluteLayout_cleanupWrappers | /**
* Cleanup old wrappers which have been left empty by other inner layouts
* moving the widget from the wrapper into their own hierarchy. This usually
* happens when a call to setWidget(widget) is done in an inner layout which
* automatically detaches the widget from the parent, in this case the
* wrapper, and re-attaches it somewhere else. This has to be done in the
* layout phase since the order of the hierarchy events are not defined.
*/
public void cleanupWrappers() {
for (Widget widget : getChildren()) {
if (widget instanceof AbsoluteWrapper) {
AbsoluteWrapper wrapper = (AbsoluteWrapper) widget;
if (wrapper.getWidget() == null) {
wrapper.destroy();
super.remove(wrapper);
continue;
}
}
}
} | 3.68 |
hadoop_DataNodeVolumeMetrics_getTransferIoSampleCount | // Based on transferIoRate
public long getTransferIoSampleCount() {
return transferIoRate.lastStat().numSamples();
} | 3.68 |
hadoop_HttpExceptionUtils_createJerseyExceptionResponse | /**
* Creates a HTTP JAX-RPC response serializing the exception in it as JSON.
*
* @param status the error code to set in the response
* @param ex the exception to serialize in the response
* @return the JAX-RPC response with the set error and JSON encoded exception
*/
public static Response createJerseyExceptionResponse(Response.Status status,
Throwable ex) {
Map<String, Object> json = new LinkedHashMap<String, Object>();
json.put(ERROR_MESSAGE_JSON, getOneLineMessage(ex));
json.put(ERROR_EXCEPTION_JSON, ex.getClass().getSimpleName());
json.put(ERROR_CLASSNAME_JSON, ex.getClass().getName());
Map<String, Object> response = Collections.singletonMap(ERROR_JSON, json);
return Response.status(status).type(MediaType.APPLICATION_JSON).
entity(response).build();
} | 3.68 |
hudi_SparkRecordMergingUtils_getCachedFieldNameToIdMapping | /**
* @param avroSchema Avro schema.
* @return The field name to ID mapping.
*/
public static Map<String, Integer> getCachedFieldNameToIdMapping(Schema avroSchema) {
return FIELD_NAME_TO_ID_MAPPING_CACHE.computeIfAbsent(avroSchema, schema -> {
StructType structType = HoodieInternalRowUtils.getCachedSchema(schema);
Map<String, Integer> schemaFieldIdMapping = new HashMap<>();
int fieldId = 0;
for (StructField field : structType.fields()) {
schemaFieldIdMapping.put(field.name(), fieldId);
fieldId++;
}
return schemaFieldIdMapping;
});
} | 3.68 |
hadoop_TimelineEntity_addEvents | /**
* Add a list of events related to the entity to the existing event list
*
* @param events
* a list of events related to the entity
*/
public void addEvents(List<TimelineEvent> events) {
this.events.addAll(events);
} | 3.68 |
morf_TableOutputter_createCell | /**
* Creates the cell at the given position.
*
* @param currentWorkSheet to add the cell to
* @param column The meta data for the column in the source table
* @param columnNumber The column number to insert at (0 based)
* @param rowIndex The row number to insert at (0 based)
* @param record The source record
* @param format The format to apply to the cell
*/
private void createCell(final WritableSheet currentWorkSheet, Column column, int columnNumber, int rowIndex, Record record, WritableCellFormat format) {
WritableCell writableCell;
switch (column.getType()) {
case STRING:
writableCell = new Label(columnNumber, rowIndex, record.getString(column.getName()));
break;
case DECIMAL:
BigDecimal decimalValue = record.getBigDecimal(column.getName());
try {
writableCell = decimalValue == null ? createBlankWriteableCell(columnNumber, rowIndex) : new jxl.write.Number(columnNumber, rowIndex, decimalValue.doubleValue());
} catch (Exception e) {
throw new UnsupportedOperationException("Cannot generate Excel cell (parseDouble) for data [" + decimalValue + "]" + unsupportedOperationExceptionMessageSuffix(column, currentWorkSheet), e);
}
break;
case BIG_INTEGER:
case INTEGER:
Long longValue = record.getLong(column.getName());
try {
writableCell = longValue == null ? createBlankWriteableCell(columnNumber, rowIndex) : new jxl.write.Number(columnNumber, rowIndex, longValue);
} catch (Exception e) {
throw new UnsupportedOperationException("Cannot generate Excel cell (parseInt) for data [" + longValue + "]" + unsupportedOperationExceptionMessageSuffix(column, currentWorkSheet), e);
}
break;
case CLOB:
try {
String stringValue = record.getString(column.getName());
writableCell = stringValue == null ? createBlankWriteableCell(columnNumber, rowIndex) : new Label(columnNumber, rowIndex, StringUtils.substring(stringValue, 0, MAX_CELL_CHARACTERS));
} catch (Exception e) {
throw new UnsupportedOperationException("Cannot generate Excel cell for CLOB data" + unsupportedOperationExceptionMessageSuffix(column, currentWorkSheet), e);
}
break;
default:
throw new UnsupportedOperationException("Cannot output data type [" + column.getType() + "] to a spreadsheet");
}
writableCell.setCellFormat(format);
try {
currentWorkSheet.addCell(writableCell);
} catch (Exception e) {
throw new RuntimeException("Error writing value to spreadsheet", e);
}
} | 3.68 |
flink_FutureUtils_whenCompleteAsyncIfNotDone | /**
* This function takes a {@link CompletableFuture} and a bi-consumer to call on completion of
* this future. If the input future is already done, this function returns {@link
* CompletableFuture#whenComplete(BiConsumer)}. Otherwise, the return value is {@link
* CompletableFuture#whenCompleteAsync(BiConsumer, Executor)} with the given executor.
*
* @param completableFuture the completable future for which we want to call #whenComplete.
* @param executor the executor to run the whenComplete function if the future is not yet done.
* @param whenCompleteFun the bi-consumer function to call when the future is completed.
* @param <IN> type of the input future.
* @return the new completion stage.
*/
public static <IN> CompletableFuture<IN> whenCompleteAsyncIfNotDone(
CompletableFuture<IN> completableFuture,
Executor executor,
BiConsumer<? super IN, ? super Throwable> whenCompleteFun) {
return completableFuture.isDone()
? completableFuture.whenComplete(whenCompleteFun)
: completableFuture.whenCompleteAsync(whenCompleteFun, executor);
} | 3.68 |
flink_MathUtils_log2floor | /**
* Computes the logarithm of the given value to the base of 2, rounded down. It corresponds to
* the position of the highest non-zero bit. The position is counted, starting with 0 from the
* least significant bit to the most significant bit. For example, <code>log2floor(16) = 4
* </code>, and <code>log2floor(10) = 3</code>.
*
* @param value The value to compute the logarithm for.
* @return The logarithm (rounded down) to the base of 2.
* @throws ArithmeticException Thrown, if the given value is zero.
*/
public static int log2floor(int value) throws ArithmeticException {
if (value == 0) {
throw new ArithmeticException("Logarithm of zero is undefined.");
}
return 31 - Integer.numberOfLeadingZeros(value);
} | 3.68 |
hadoop_SampleQuantiles_clear | /**
* Resets the estimator, clearing out all previously inserted items
*/
synchronized public void clear() {
count = 0;
bufferCount = 0;
samples.clear();
} | 3.68 |
framework_UIDL_hasVariable | /**
* Checks if the named variable is available.
*
* @param name
* the name of the variable desired
* @return true if the variable exists, false otherwise
*/
public boolean hasVariable(String name) {
return hasVariables() && var().containsKey(name);
} | 3.68 |
flink_ExecutionVertex_getPreferredLocationBasedOnState | /**
* Gets the preferred location to execute the current task execution attempt, based on the state
* that the execution attempt will resume.
*/
public Optional<TaskManagerLocation> getPreferredLocationBasedOnState() {
// only restore to same execution if it has state
if (currentExecution.getTaskRestore() != null
&& currentExecution.getTaskRestore().getTaskStateSnapshot().hasState()) {
return findLastLocation();
}
return Optional.empty();
} | 3.68 |
framework_BasicEventResizeHandler_setDates | /**
* Set the start and end dates for the event.
*
* @param event
* The event that the start and end dates should be set
* @param start
* The start date
* @param end
* The end date
*/
protected void setDates(EditableCalendarEvent event, Date start, Date end) {
event.setStart(start);
event.setEnd(end);
} | 3.68 |
flink_SlidingWindowCheckMapper_verifyPreviousOccurences | /**
* Verifies if all values from previous windows appear in the new one. Returns union of all
* events seen so far that were not seen <b>slideFactor</b> number of times yet.
*/
private List<Tuple2<Event, Integer>> verifyPreviousOccurences(
List<Tuple2<Event, Integer>> previousWindowValues,
List<Event> newValues,
Long lastSequenceNumberSeenSoFar,
Collector<String> out) {
List<Tuple2<Event, Integer>> newEventsSeenSoFar = new ArrayList<>();
List<Event> seenEvents = new ArrayList<>();
for (Tuple2<Event, Integer> windowValue : previousWindowValues) {
if (!newValues.contains(windowValue.f0)) {
failWithEventNotSeenAlertMessage(windowValue, newValues, out);
} else {
seenEvents.add(windowValue.f0);
preserveOrDiscardIfSeenSlideFactorTimes(newEventsSeenSoFar, windowValue);
}
}
addNotSeenValues(
newEventsSeenSoFar, newValues, seenEvents, lastSequenceNumberSeenSoFar, out);
return newEventsSeenSoFar;
} | 3.68 |
hbase_RegistryEndpointsRefresher_refreshNow | /**
* Notifies the refresher thread to refresh the configuration. This does not guarantee a refresh.
* See class comment for details.
*/
synchronized void refreshNow() {
refreshNow = true;
notifyAll();
} | 3.68 |
hbase_ColumnValueFilter_getQualifier | /** Returns the qualifier */
public byte[] getQualifier() {
return qualifier;
} | 3.68 |
hudi_RocksDBDAO_deleteInBatch | /**
* Helper to add delete operation in batch.
*
* @param batch Batch Handle
* @param columnFamilyName Column Family
* @param key Key
*/
public <K extends Serializable> void deleteInBatch(WriteBatch batch, String columnFamilyName, K key) {
try {
batch.delete(managedHandlesMap.get(columnFamilyName), SerializationUtils.serialize(key));
} catch (Exception e) {
throw new HoodieException(e);
}
} | 3.68 |
hbase_ZKListener_nodeChildrenChanged | /**
* Called when an existing node has a child node added or removed.
* @param path full path of the node whose children have changed
*/
public void nodeChildrenChanged(String path) {
// no-op
} | 3.68 |
flink_StreamTaskActionExecutor_getMutex | /**
* @return an object used for mutual exclusion of all operations that involve data and state
* mutation. (a.k.a. checkpoint lock).
*/
public Object getMutex() {
return mutex;
} | 3.68 |
flink_MultipleParameterTool_getMultiParameterRequired | /**
* Returns the Collection of String values for the given key. If the key does not exist it will
* throw a {@link RuntimeException}.
*/
public Collection<String> getMultiParameterRequired(String key) {
addToDefaults(key, null);
Collection<String> value = getMultiParameter(key);
if (value == null) {
throw new RuntimeException("No data for required key '" + key + "'");
}
return value;
} | 3.68 |
hbase_MemStoreFlusher_isDelay | /** Returns True if the entry is a delay flush task */
protected boolean isDelay() {
return this.whenToExpire > this.createTime;
} | 3.68 |
hbase_MasterObserver_preGetRSGroupInfo | /**
* Called before getting region server group info of the passed groupName.
* @param ctx the environment to interact with the framework and master
* @param groupName name of the group to get RSGroupInfo for
*/
default void preGetRSGroupInfo(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String groupName) throws IOException {
} | 3.68 |
hbase_RandomRowFilter_parseFrom | /**
* Parse a serialized representation of {@link RandomRowFilter}
* @param pbBytes A pb serialized {@link RandomRowFilter} instance
* @return An instance of {@link RandomRowFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static RandomRowFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.RandomRowFilter proto;
try {
proto = FilterProtos.RandomRowFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new RandomRowFilter(proto.getChance());
} | 3.68 |
dubbo_TriHttp2RemoteFlowController_decrementPendingBytes | /**
* If this frame is in the pending queue, decrements the number of pending bytes for the stream.
*/
private void decrementPendingBytes(int bytes, boolean updateStreamableBytes) {
incrementPendingBytes(-bytes, updateStreamableBytes);
} | 3.68 |
hbase_TraceUtil_trace | /**
* Trace the execution of {@code runnable}.
*/
public static <T extends Throwable> void trace(final ThrowingRunnable<T> runnable,
final Supplier<Span> spanSupplier) throws T {
Span span = spanSupplier.get();
try (Scope ignored = span.makeCurrent()) {
runnable.run();
span.setStatus(StatusCode.OK);
} catch (Throwable e) {
setError(span, e);
throw e;
} finally {
span.end();
}
} | 3.68 |
flink_SavepointMetadata_getOperatorState | /**
* @return Operator state for the given UID.
* @throws IOException If the savepoint does not contain operator state with the given uid.
*/
public OperatorState getOperatorState(String uid) throws IOException {
OperatorID operatorID = OperatorIDGenerator.fromUid(uid);
OperatorStateSpec operatorState = operatorStateIndex.get(operatorID);
if (operatorState == null || operatorState.isNewStateTransformation()) {
throw new IOException("Savepoint does not contain state with operator uid " + uid);
}
return operatorState.asExistingState();
} | 3.68 |
hbase_QuotaSettingsFactory_unthrottleNamespace | /**
* Remove the throttling for the specified namespace.
* @param namespace the namespace
* @return the quota settings
*/
public static QuotaSettings unthrottleNamespace(final String namespace) {
return throttle(null, null, namespace, null, null, 0, null, QuotaScope.MACHINE);
} | 3.68 |
shardingsphere-elasticjob_NettyRestfulServiceConfiguration_addFilterInstances | /**
* Add instances of {@link Filter}.
*
* @param instances instances of Filter
*/
public void addFilterInstances(final Filter... instances) {
filterInstances.addAll(Arrays.asList(instances));
} | 3.68 |
aws-saas-boost_SaaSBoostArtifactsBucket_getBucketUrl | /**
* @see <a href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html">S3 Documentation</a>
* @return the S3 URL the Bucket object represents
*/
public String getBucketUrl() {
return String.format("https://%s.s3.%s.%s/", bucketName, region, Utils.endpointSuffix(region));
} | 3.68 |
flink_BinarySegmentUtils_setLong | /**
* set long from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setLong(MemorySegment[] segments, int offset, long value) {
if (inFirstSegment(segments, offset, 8)) {
segments[0].putLong(offset, value);
} else {
setLongMultiSegments(segments, offset, value);
}
} | 3.68 |
hadoop_CompositeService_getServices | /**
* Get a cloned list of services
* @return a list of child services at the time of invocation -
* added services will not be picked up.
*/
public List<Service> getServices() {
synchronized (serviceList) {
return Collections.unmodifiableList(new ArrayList<>(serviceList));
}
} | 3.68 |
pulsar_AuthenticationSasl_newRequestHeader | // set header according to previous response
@Override
public Set<Entry<String, String>> newRequestHeader(String hostName,
AuthenticationDataProvider authData,
Map<String, String> previousRespHeaders) throws Exception {
Map<String, String> headers = new HashMap<>();
if (authData.hasDataForHttp()) {
authData.getHttpHeaders().forEach(header ->
headers.put(header.getKey(), header.getValue())
);
}
// role token expired in last check. remove role token, new sasl client, restart auth.
if (isRoleTokenExpired(previousRespHeaders)) {
previousRespHeaders = null;
saslRoleToken = null;
authData = getAuthData(hostName);
}
// role token is not expired and OK to use.
// 1. first time request, send server to check if expired.
// 2. server checked, and return SASL_STATE_COMPLETE, ask server to complete auth
// 3. server checked, and not return SASL_STATE_COMPLETE
if (saslRoleToken != null) {
headers.put(SASL_AUTH_ROLE_TOKEN, saslRoleToken);
if (previousRespHeaders == null) {
// first time auth, ask server to check the role token expired or not.
if (log.isDebugEnabled()) {
log.debug("request builder add token: Check token");
}
headers.put(SASL_HEADER_STATE, SASL_STATE_SERVER_CHECK_TOKEN);
} else if (previousRespHeaders.get(SASL_HEADER_STATE).equalsIgnoreCase(SASL_STATE_COMPLETE)) {
headers.put(SASL_HEADER_STATE, SASL_STATE_COMPLETE);
if (log.isDebugEnabled()) {
log.debug("request builder add token. role verified by server");
}
} else {
if (log.isDebugEnabled()) {
log.debug("request builder add token. NOT complete. state: {}",
previousRespHeaders.get(SASL_HEADER_STATE));
}
headers.put(SASL_HEADER_STATE, SASL_STATE_NEGOTIATE);
}
return headers.entrySet();
}
// role token is null, need do auth.
if (previousRespHeaders == null) {
if (log.isDebugEnabled()) {
log.debug("Init authn in client side");
}
// first time init
headers.put(SASL_HEADER_STATE, SASL_STATE_CLIENT_INIT);
AuthData initData = authData.authenticate(AuthData.INIT_AUTH_DATA);
headers.put(SASL_AUTH_TOKEN,
Base64.getEncoder().encodeToString(initData.getBytes()));
} else {
AuthData brokerData = AuthData.of(
Base64.getDecoder().decode(
previousRespHeaders.get(SASL_AUTH_TOKEN)));
AuthData clientData = authData.authenticate(brokerData);
headers.put(SASL_STATE_SERVER, previousRespHeaders.get(SASL_STATE_SERVER));
headers.put(SASL_HEADER_TYPE, SASL_TYPE_VALUE);
headers.put(SASL_HEADER_STATE, SASL_STATE_NEGOTIATE);
headers.put(SASL_AUTH_TOKEN,
Base64.getEncoder().encodeToString(clientData.getBytes()));
}
return headers.entrySet();
} | 3.68 |
hbase_SequenceIdAccounting_areAllLower | /**
* See if passed <code>sequenceids</code> are lower -- i.e. earlier -- than any outstanding
* sequenceids, sequenceids we are holding on to in this accounting instance.
* @param sequenceids Keyed by encoded region name. Cannot be null (doesn't make sense for it to
* be null).
* @param keysBlocking An optional collection that is used to return the specific keys that are
* causing this method to return false.
* @return true if all sequenceids are lower, older than, the old sequenceids in this instance.
*/
boolean areAllLower(Map<byte[], Long> sequenceids, Collection<byte[]> keysBlocking) {
Map<byte[], Long> flushing = null;
Map<byte[], Long> unflushed = null;
synchronized (this.tieLock) {
// Get a flattened -- only the oldest sequenceid -- copy of current flushing and unflushed
// data structures to use in tests below.
flushing = flattenToLowestSequenceId(this.flushingSequenceIds);
unflushed = flattenToLowestSequenceId(this.lowestUnflushedSequenceIds);
}
boolean result = true;
for (Map.Entry<byte[], Long> e : sequenceids.entrySet()) {
long oldestFlushing = Long.MAX_VALUE;
long oldestUnflushed = Long.MAX_VALUE;
if (flushing != null && flushing.containsKey(e.getKey())) {
oldestFlushing = flushing.get(e.getKey());
}
if (unflushed != null && unflushed.containsKey(e.getKey())) {
oldestUnflushed = unflushed.get(e.getKey());
}
long min = Math.min(oldestFlushing, oldestUnflushed);
if (min <= e.getValue()) {
if (keysBlocking == null) {
return false;
}
result = false;
keysBlocking.add(e.getKey());
// Continue examining the map so we could log all regions blocking this WAL.
}
}
return result;
} | 3.68 |
AreaShop_Utils_durationStringToLong | /**
* Methode to tranlate a duration string to a millisecond value.
* @param duration The duration string
* @return The duration in milliseconds translated from the durationstring, or if it is invalid then 0
*/
public static long durationStringToLong(String duration) {
if(duration == null) {
return 0;
} else if(duration.equalsIgnoreCase("disabled") || duration.equalsIgnoreCase("unlimited") || duration.isEmpty()) {
return -1;
} else if(duration.indexOf(' ') == -1) {
return 0;
}
Calendar calendar = Calendar.getInstance();
calendar.setTimeInMillis(0);
String durationString = duration.substring(duration.indexOf(' ') + 1);
int durationInt = 0;
try {
durationInt = Integer.parseInt(duration.substring(0, duration.indexOf(' ')));
} catch(NumberFormatException exception) {
// No Number found, add zero
}
if(seconds.contains(durationString)) {
calendar.add(Calendar.SECOND, durationInt);
} else if(minutes.contains(durationString)) {
calendar.add(Calendar.MINUTE, durationInt);
} else if(hours.contains(durationString)) {
calendar.add(Calendar.HOUR, durationInt);
} else if(days.contains(durationString)) {
calendar.add(Calendar.DAY_OF_MONTH, durationInt);
} else if(weeks.contains(durationString)) {
calendar.add(Calendar.DAY_OF_MONTH, durationInt * 7);
} else if(months.contains(durationString)) {
calendar.add(Calendar.MONTH, durationInt);
} else if(years.contains(durationString)) {
calendar.add(Calendar.YEAR, durationInt);
} else {
AreaShop.warn("Unknown duration indicator:", durationString, "check if config.yml has the correct time indicators");
}
return calendar.getTimeInMillis();
} | 3.68 |
hadoop_RouterDistCpProcedure_disableWrite | /**
* Disable write by making the mount entry readonly.
*/
@Override
protected void disableWrite(FedBalanceContext context) throws IOException {
Configuration conf = context.getConf();
String mount = context.getMount();
MountTableProcedure.disableWrite(mount, conf);
updateStage(Stage.FINAL_DISTCP);
} | 3.68 |
framework_ListSelectElement_selectByText | /**
* Selects the option with the given text.
* <p>
* For a ListSelect in multi select mode, adds the given option(s) to the
* current selection.
*
* @param text
* the text of the option
*/
public void selectByText(String text) {
if (isReadOnly()) {
throw new ReadOnlyException();
}
select.selectByVisibleText(text);
if (isPhantomJS() && select.isMultiple()) {
// Phantom JS does not fire a change event when
// selecting/deselecting items in a multi select
fireChangeEvent(selectElement);
}
} | 3.68 |
flink_BinarySegmentUtils_byteIndex | /**
* Given a bit index, return the byte index containing it.
*
* @param bitIndex the bit index.
* @return the byte index.
*/
private static int byteIndex(int bitIndex) {
return bitIndex >>> ADDRESS_BITS_PER_WORD;
} | 3.68 |
framework_VDragEvent_setCurrentGwtEvent | /**
* Sets the latest {@link NativeEvent} that relates to this drag and drop
* operation. For example on {@link VDropHandler#dragEnter(VDragEvent)} this
* is commonly a {@link MouseOverEvent}.
*
* @param event
* the latest event
*/
public void setCurrentGwtEvent(NativeEvent event) {
currentGwtEvent = event;
} | 3.68 |
framework_TargetDetailsImpl_getMouseEvent | /**
* @return details about the actual event that caused the event details.
* Practically mouse move or mouse up.
*/
public MouseEventDetails getMouseEvent() {
return MouseEventDetails.deSerialize((String) getData("mouseEvent"));
} | 3.68 |
flink_RequestedGlobalProperties_getCustomPartitioner | /**
* Gets the custom partitioner associated with these properties.
*
* @return The custom partitioner associated with these properties.
*/
public Partitioner<?> getCustomPartitioner() {
return customPartitioner;
} | 3.68 |
framework_VaadinService_lockSession | /**
* Locks the given session for this service instance. Typically you want to
* call {@link VaadinSession#lock()} instead of this method.
* <p>
* Note: The method and its signature has been changed to return lock
* instance in Vaadin 8.14.0. If you have overriden this method, you need
* to update your implementation.
* <p>
* Note: Overriding this method is not recommended, for custom lock storage
* strategy override {@link #getSessionLock(WrappedSession)} and
* {@link #setSessionLock(WrappedSession,Lock)} instead.
*
* @param wrappedSession
* The session to lock
* @return Lock instance
*
* @throws IllegalStateException
* if the session is invalidated before it can be locked
*/
protected Lock lockSession(WrappedSession wrappedSession) {
Lock lock = getSessionLock(wrappedSession);
if (lock == null) {
/*
* No lock found in the session attribute. Ensure only one lock is
* created and used by everybody by doing double checked locking.
* Assumes there is a memory barrier for the attribute (i.e. that
* the CPU flushes its caches and reads the value directly from main
* memory).
*/
synchronized (VaadinService.class) {
lock = getSessionLock(wrappedSession);
if (lock == null) {
lock = new ReentrantLock();
setSessionLock(wrappedSession, lock);
}
}
}
lock.lock();
try {
// Someone might have invalidated the session between fetching the
// lock and acquiring it. Guard for this by calling a method that's
// specified to throw IllegalStateException if invalidated
// (#12282)
wrappedSession.getAttribute(getLockAttributeName());
} catch (IllegalStateException e) {
lock.unlock();
throw e;
}
return lock;
} | 3.68 |
pulsar_ManagedCursorContainer_siftDown | /**
* Push the item down towards the bottom of the tree (the highest reading position).
*/
private void siftDown(final Item item) {
while (true) {
Item j = null;
Item right = getRight(item);
if (right != null && right.position.compareTo(item.position) < 0) {
Item left = getLeft(item);
if (left != null && left.position.compareTo(right.position) < 0) {
j = left;
} else {
j = right;
}
} else {
Item left = getLeft(item);
if (left != null && left.position.compareTo(item.position) < 0) {
j = left;
}
}
if (j != null) {
swap(item, j);
} else {
break;
}
}
} | 3.68 |
hadoop_HsController_about | /**
* Render a page about the current server.
*/
public void about() {
render(aboutPage());
} | 3.68 |
flink_Path_getName | /**
* Returns the final component of this path, i.e., everything that follows the last separator.
*
* @return the final component of the path
*/
public String getName() {
final String path = uri.getPath();
final int slash = path.lastIndexOf(SEPARATOR);
return path.substring(slash + 1);
} | 3.68 |
hudi_OptionsResolver_getDefaultPlanStrategyClassName | /**
* Returns the default plan strategy class.
*/
public static String getDefaultPlanStrategyClassName(Configuration conf) {
return OptionsResolver.isConsistentHashingBucketIndexType(conf) ? FlinkConsistentBucketClusteringPlanStrategy.class.getName() :
FlinkOptions.CLUSTERING_PLAN_STRATEGY_CLASS.defaultValue();
} | 3.68 |
framework_TreeData_moveAfterSibling | /**
* Moves an item to the position immediately after a sibling item. The two
* items must have the same parent. After making changes to the tree data,
* {@link TreeDataProvider#refreshAll()} should be called.
*
* @param item
* the item to be moved
* @param sibling
* the item after which the moved item will be located, or {@code
* null} to move item to first position
* @since 8.1
*/
public void moveAfterSibling(T item, T sibling) {
if (!contains(item)) {
throw new IllegalArgumentException(
"Item '" + item + "' not in the hierarchy");
}
if (sibling == null) {
List<T> children = itemToWrapperMap.get(getParent(item))
.getChildren();
// Move item to first position
children.remove(item);
children.add(0, item);
} else {
if (!contains(sibling)) {
throw new IllegalArgumentException(
"Item '" + sibling + "' not in the hierarchy");
}
T parent = itemToWrapperMap.get(item).getParent();
if (!Objects.equals(parent,
itemToWrapperMap.get(sibling).getParent())) {
throw new IllegalArgumentException("Items '" + item + "' and '"
+ sibling + "' don't have the same parent");
}
List<T> children = itemToWrapperMap.get(parent).getChildren();
// Move item to the position after the sibling
children.remove(item);
children.add(children.indexOf(sibling) + 1, item);
}
} | 3.68 |
framework_Flash_getParameterNames | /**
* Returns an iterable with declared parameter names.
*
* @see #setParameter(String, String)
* @see #getParameter(String)
* @since 7.4.1
* @return An iterable with declared parameter names.
*/
public Iterable<String> getParameterNames() {
Map<String, String> map = getState(false).embedParams;
if (map == null) {
return Collections.emptySet();
} else {
return Collections.unmodifiableSet(map.keySet());
}
} | 3.68 |
pulsar_BlockAwareSegmentInputStreamImpl_calculateBlockSize | // Calculate the block size after uploaded `entryBytesAlreadyWritten` bytes
public static int calculateBlockSize(int maxBlockSize, ReadHandle readHandle,
long firstEntryToWrite, long entryBytesAlreadyWritten) {
return (int) Math.min(
maxBlockSize,
(readHandle.getLastAddConfirmed() - firstEntryToWrite + 1) * ENTRY_HEADER_SIZE
+ (readHandle.getLength() - entryBytesAlreadyWritten)
+ DataBlockHeaderImpl.getDataStartOffset());
} | 3.68 |
flink_TwoInputStreamTask_getCanEmitBatchOfRecords | // This is needed for StreamMultipleInputProcessor#processInput to preserve the existing
// behavior of choosing an input every time a record is emitted. This behavior is good for
// fairness between input consumption. But it can reduce throughput due to added control
// flow cost on the per-record code path.
@Override
public CanEmitBatchOfRecordsChecker getCanEmitBatchOfRecords() {
return () -> false;
} | 3.68 |
hudi_HoodiePartitionMetadata_getPartitionMetafilePath | /**
* Returns the name of the partition metadata.
*
* @return Name of the partition metafile or empty option
*/
public static Option<Path> getPartitionMetafilePath(FileSystem fs, Path partitionPath) {
// The partition listing is a costly operation so instead we are searching for existence of the files instead.
// This is in expected order as properties file based partition metafiles should be the most common.
try {
Option<Path> textFormatPath = textFormatMetaPathIfExists(fs, partitionPath);
if (textFormatPath.isPresent()) {
return textFormatPath;
} else {
return baseFormatMetaPathIfExists(fs, partitionPath);
}
} catch (IOException ioe) {
throw new HoodieException("Error checking Hoodie partition metadata for " + partitionPath, ioe);
}
} | 3.68 |
hadoop_RawErasureEncoder_encode | /**
* Encode with inputs and generates outputs. More see above.
*
* @param inputs input buffers to read data from
* @param outputs output buffers to put the encoded data into, read to read
* after the call
* @throws IOException if the encoder is closed.
*/
public void encode(ECChunk[] inputs, ECChunk[] outputs) throws IOException {
ByteBuffer[] newInputs = ECChunk.toBuffers(inputs);
ByteBuffer[] newOutputs = ECChunk.toBuffers(outputs);
encode(newInputs, newOutputs);
} | 3.68 |
hbase_MetaTableAccessor_addDaughtersToPut | /**
* Adds split daughters to the Put
*/
public static Put addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo splitB)
throws IOException {
if (splitA != null) {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
.setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITA_QUALIFIER)
.setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(splitA))
.build());
}
if (splitB != null) {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
.setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITB_QUALIFIER)
.setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(splitB))
.build());
}
return put;
} | 3.68 |
framework_VAbstractSplitPanel_makeScrollable | /**
* Ensures the panels are scrollable e.g. after style name changes
* <p>
* For internal use only. May be removed or replaced in the future.
*/
public void makeScrollable() {
if (touchScrollHandler == null) {
touchScrollHandler = TouchScrollDelegate.enableTouchScrolling(this);
}
touchScrollHandler.addElement(firstContainer);
touchScrollHandler.addElement(secondContainer);
} | 3.68 |
morf_FieldReference_equals | /**
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
// TODO incorrect - permits other types. Can't change this - need to fix existing misuse in subtypes
if (obj instanceof FieldReference) {
FieldReference other = (FieldReference) obj;
return new EqualsBuilder()
.appendSuper(super.equals(obj))
.append(this.direction, other.direction)
.append(this.name, other.name)
.append(this.nullValueHandling, other.nullValueHandling)
.append(this.table, other.table)
.isEquals();
}
return false;
} | 3.68 |
hadoop_BoundedAppender_length | /**
* Get current length of messages considering truncates
* without header and ellipses.
*
* @return current length
*/
public int length() {
return messages.length();
} | 3.68 |
framework_BasicEvent_getCaption | /*
* (non-Javadoc)
*
* @see com.vaadin.addon.calendar.event.CalendarEvent#getCaption()
*/
@Override
public String getCaption() {
return caption;
} | 3.68 |
hudi_HoodieExampleDataGenerator_generateInserts | /**
* Generates new inserts, uniformly across the partition paths above. It also updates the list of existing keys.
*/
public List<HoodieRecord<T>> generateInserts(String commitTime, Integer n) {
return generateInsertsStream(commitTime, n).collect(Collectors.toList());
} | 3.68 |
hudi_HiveSchemaUtils_toHiveFieldSchema | /**
* Create Hive field schemas from Flink table schema including the hoodie metadata fields.
*/
public static List<FieldSchema> toHiveFieldSchema(TableSchema schema, boolean withOperationField) {
List<FieldSchema> columns = new ArrayList<>();
Collection<String> metaFields = new ArrayList<>(HoodieRecord.HOODIE_META_COLUMNS);
if (withOperationField) {
metaFields.add(HoodieRecord.OPERATION_METADATA_FIELD);
}
for (String metaField : metaFields) {
columns.add(new FieldSchema(metaField, "string", null));
}
columns.addAll(createHiveColumns(schema));
return columns;
} | 3.68 |
flink_FlinkPreparingTableBase_getAllowedAccess | /**
* Obtains the access type of the table.
*
* @return all access types including SELECT/UPDATE/INSERT/DELETE
*/
public SqlAccessType getAllowedAccess() {
return SqlAccessType.ALL;
} | 3.68 |
framework_TableScrollAfterAddRow_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.UI#init(com.vaadin.server.VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
final int totalRows = 100;
final VerticalLayout layout = new VerticalLayout();
final IndexedContainer datasource = new IndexedContainer();
datasource.addContainerProperty("value", Integer.class, -1);
for (int i = 0; i < totalRows; i++) {
addRow(datasource);
}
final Table table = new Table();
table.setContainerDataSource(datasource);
layout.addComponent(table);
addComponent(layout);
final Label label = new Label("");
layout.addComponent(label);
NativeButton addRowButton = new NativeButton("Add row",
event -> addRow(datasource));
NativeButton jumpToLastRowButton = new NativeButton("Jump to last row",
event -> jumpToLastRow(table));
NativeButton jumpTo15thRowButton = new NativeButton("Jump to 15th row",
event -> jumpToFifteenthRow(table));
NativeButton jumpToFirstRowButton = new NativeButton(
"Jump to first row", event -> jumpToFirstRow(table));
NativeButton updateLabelButton = new NativeButton("UpdateLabel",
event -> label.setValue(Integer
.toString(table.getCurrentPageFirstItemIndex())));
layout.addComponent(addRowButton);
layout.addComponent(jumpToLastRowButton);
layout.addComponent(jumpTo15thRowButton);
layout.addComponent(jumpToFirstRowButton);
layout.addComponent(updateLabelButton);
} | 3.68 |
morf_ChangeIndex_getFromIndex | /**
* Gets the index prior to the change
*
* @return the index prior to the change
*/
public Index getFromIndex() {
return fromIndex;
} | 3.68 |
flink_ResultInfo_getFieldGetters | /**
* Create the {@link FieldGetter} to get column value in the results.
*
* <p>With {@code JSON} format, it uses the {@link ResolvedSchema} to build the getters.
* However, it uses {@link StringData}'s {@link FieldGetter} to get the column values.
*/
public List<FieldGetter> getFieldGetters() {
if (rowFormat == RowFormat.JSON) {
List<LogicalType> columnTypes =
columnInfos.stream()
.map(ColumnInfo::getLogicalType)
.collect(Collectors.toList());
return IntStream.range(0, columnTypes.size())
.mapToObj(i -> RowData.createFieldGetter(columnTypes.get(i), i))
.collect(Collectors.toList());
} else {
return IntStream.range(0, columnInfos.size())
.mapToObj(i -> RowData.createFieldGetter(STRING_TYPE, i))
.collect(Collectors.toList());
}
} | 3.68 |
hadoop_BatchedRequests_iterator | /**
* Exposes SchedulingRequest Iterator interface which can be used
* to traverse requests using different heuristics i.e. Tag Popularity
* @return SchedulingRequest Iterator.
*/
@Override
public Iterator<SchedulingRequest> iterator() {
switch (this.iteratorType) {
case SERIAL:
return new SerialIterator(requests);
case POPULAR_TAGS:
return new PopularTagsIterator(requests);
default:
return null;
}
} | 3.68 |
hadoop_AllocateRequest_updateRequests | /**
* Set the <code>updateRequests</code> of the request.
* @see AllocateRequest#setUpdateRequests(List)
* @param updateRequests <code>updateRequests</code> of the request
* @return {@link AllocateRequestBuilder}
*/
@Public
@Unstable
public AllocateRequestBuilder updateRequests(
List<UpdateContainerRequest> updateRequests) {
allocateRequest.setUpdateRequests(updateRequests);
return this;
} | 3.68 |
flink_RocksDBNativeMetricOptions_isEnabled | /**
* {{@link RocksDBNativeMetricMonitor}} is enabled if any property or ticker type is set.
*
* @return true if {{RocksDBNativeMetricMonitor}} should be enabled, false otherwise.
*/
public boolean isEnabled() {
return !properties.isEmpty() || isStatisticsEnabled();
} | 3.68 |
flink_SqlLikeUtils_sqlToRegexSimilar | /** Translates SQL SIMILAR pattern to Java regex pattern. */
public static String sqlToRegexSimilar(String sqlPattern, char escapeChar) {
similarEscapeRuleChecking(sqlPattern, escapeChar);
boolean insideCharacterEnumeration = false;
final StringBuilder javaPattern = new StringBuilder(sqlPattern.length() * 2);
final int len = sqlPattern.length();
for (int i = 0; i < len; i++) {
char c = sqlPattern.charAt(i);
if (c == escapeChar) {
if (i == (len - 1)) {
// It should never reach here after the escape rule
// checking.
throw invalidEscapeSequence(sqlPattern, i);
}
char nextChar = sqlPattern.charAt(i + 1);
if (SQL_SIMILAR_SPECIALS.indexOf(nextChar) >= 0) {
// special character, use \ to replace the escape char.
if (JAVA_REGEX_SPECIALS.indexOf(nextChar) >= 0) {
javaPattern.append('\\');
}
javaPattern.append(nextChar);
} else if (nextChar == escapeChar) {
javaPattern.append(nextChar);
} else {
// It should never reach here after the escape rule
// checking.
throw invalidEscapeSequence(sqlPattern, i);
}
i++; // we already process the next char.
} else {
switch (c) {
case '_':
javaPattern.append('.');
break;
case '%':
javaPattern.append("(?s:.*)");
break;
case '[':
javaPattern.append('[');
insideCharacterEnumeration = true;
i =
sqlSimilarRewriteCharEnumeration(
sqlPattern, javaPattern, i, escapeChar);
break;
case ']':
if (!insideCharacterEnumeration) {
throw invalidRegularExpression(sqlPattern, i);
}
insideCharacterEnumeration = false;
javaPattern.append(']');
break;
case '\\':
javaPattern.append("\\\\");
break;
case '$':
// $ is special character in java regex, but regular in
// SQL regex.
javaPattern.append("\\$");
break;
default:
javaPattern.append(c);
}
}
}
if (insideCharacterEnumeration) {
throw invalidRegularExpression(sqlPattern, len);
}
return javaPattern.toString();
} | 3.68 |
framework_VLayoutSlot_setAlignment | /**
* Sets the alignment data for this slot.
*
* @param alignment
* the alignment data, can be {@code null}
*/
public void setAlignment(AlignmentInfo alignment) {
this.alignment = alignment;
// if alignment is something other than topLeft then we need to align
// the component inside this slot
if (alignment != null && (!alignment.isLeft() || !alignment.isTop())) {
widget.getElement().getStyle().setPosition(Position.ABSOLUTE);
}
} | 3.68 |
flink_HiveTableUtil_maskFlinkProperties | /**
* Add a prefix to Flink-created properties to distinguish them from Hive-created properties.
*/
private static Map<String, String> maskFlinkProperties(Map<String, String> properties) {
return properties.entrySet().stream()
.filter(e -> e.getKey() != null && e.getValue() != null)
.map(e -> new Tuple2<>(FLINK_PROPERTY_PREFIX + e.getKey(), e.getValue()))
.collect(Collectors.toMap(t -> t.f0, t -> t.f1));
} | 3.68 |
hadoop_DataNodeVolumeMetrics_getFlushIoSampleCount | // Based on flushIoRate
public long getFlushIoSampleCount() {
return flushIoRate.lastStat().numSamples();
} | 3.68 |
hbase_ZKTableArchiveClient_disableHFileBackup | /**
* Disable hfile backups for all tables.
* <p>
* Previously backed up files are still retained (if present).
* <p>
* Asynchronous operation - some extra HFiles may be retained, in the archive directory after
* disable is called, dependent on the latency in zookeeper to the servers.
* @throws IOException if an unexpected exception occurs
* @throws KeeperException if zookeeper can't be reached
*/
public void disableHFileBackup() throws IOException, KeeperException {
createHFileArchiveManager().disableHFileBackup().stop();
} | 3.68 |
hudi_HoodieAvroUtils_getFieldVal | /**
* Obtain value of the provided key, when set returnNullIfNotFound false,
* it is consistent with avro after 1.10
*/
public static Object getFieldVal(GenericRecord record, String key, boolean returnNullIfNotFound) {
Schema.Field field = record.getSchema().getField(key);
if (field == null) {
if (returnNullIfNotFound) {
return null;
} else {
// Since avro 1.10, arvo will throw AvroRuntimeException("Not a valid schema field: " + key)
// rather than return null like the previous version if record doesn't contain this key.
// Here we simulate this behavior.
throw new AvroRuntimeException("Not a valid schema field: " + key);
}
} else {
return record.get(field.pos());
}
} | 3.68 |
hudi_BucketStreamWriteFunction_isBucketToLoad | /**
* Determine whether the current fileID belongs to the current task.
* (partition + curBucket) % numPartitions == this taskID belongs to this task.
*/
public boolean isBucketToLoad(int bucketNumber, String partition) {
final int partitionIndex = (partition.hashCode() & Integer.MAX_VALUE) % parallelism;
int globalIndex = partitionIndex + bucketNumber;
return BucketIdentifier.mod(globalIndex, parallelism) == taskID;
} | 3.68 |
hadoop_ManifestCommitter_executeCleanup | /**
* Perform the cleanup operation for job cleanup or abort.
* @param statisticName statistic/stage name
* @param jobContext job context
* @param committerConfig committer config
* @throws IOException failure
* @return the outcome
*/
private CleanupJobStage.Result executeCleanup(
final String statisticName,
final JobContext jobContext,
final ManifestCommitterConfig committerConfig) throws IOException {
try (CloseableTaskPoolSubmitter ioProcs =
committerConfig.createSubmitter()) {
return new CleanupJobStage(
committerConfig.createStageConfig()
.withOperations(createManifestStoreOperations())
.withIOProcessors(ioProcs)
.build())
.apply(cleanupStageOptionsFromConfig(
statisticName,
jobContext.getConfiguration()));
}
} | 3.68 |
querydsl_SQLExpressions_left | /**
* Get the rhs leftmost characters of lhs
*
* @param lhs string
* @param rhs character amount
* @return rhs leftmost characters
*/
public static StringExpression left(Expression<String> lhs, Expression<Integer> rhs) {
return Expressions.stringOperation(Ops.StringOps.LEFT, lhs, rhs);
} | 3.68 |
morf_AbstractSelectStatementBuilder_from | /**
* Selects fields from one or more inner selects:
*
* <blockquote><pre>
* SelectStatement statement = select().from(select().from("Foo"));
* </pre></blockquote>
*
* @param fromSelect the select statements to select from
* @return this, for method chaining.
*/
public T from(SelectStatement... fromSelect) {
fromSelects.addAll(Arrays.asList(fromSelect));
return castToChild(this);
} | 3.68 |
pulsar_ProducerImpl_run | /**
* Process sendTimeout events.
*/
@Override
public void run(Timeout timeout) throws Exception {
if (timeout.isCancelled()) {
return;
}
long timeToWaitMs;
synchronized (this) {
// If it's closing/closed we need to ignore this timeout and not schedule next timeout.
if (getState() == State.Closing || getState() == State.Closed) {
return;
}
OpSendMsg firstMsg = pendingMessages.peek();
if (firstMsg == null && (batchMessageContainer == null || batchMessageContainer.isEmpty())) {
// If there are no pending messages, reset the timeout to the configured value.
timeToWaitMs = conf.getSendTimeoutMs();
} else {
long createdAt;
if (firstMsg != null) {
createdAt = firstMsg.createdAt;
} else {
// Because we don't flush batch messages while disconnected, we consider them "createdAt" when
// they would have otherwise been flushed.
createdAt = lastBatchSendNanoTime
+ TimeUnit.MICROSECONDS.toNanos(conf.getBatchingMaxPublishDelayMicros());
}
// If there is at least one message, calculate the diff between the message timeout and the elapsed
// time since first message was created.
long diff = conf.getSendTimeoutMs()
- TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - createdAt);
if (diff <= 0) {
// The diff is less than or equal to zero, meaning that the message has been timed out.
// Set the callback to timeout on every message, then clear the pending queue.
log.info("[{}] [{}] Message send timed out. Failing {} messages", topic, producerName,
getPendingQueueSize());
String msg = format("The producer %s can not send message to the topic %s within given timeout",
producerName, topic);
if (firstMsg != null) {
PulsarClientException te = new PulsarClientException.TimeoutException(msg, firstMsg.sequenceId);
failPendingMessages(cnx(), te);
} else {
failPendingBatchMessages(new PulsarClientException.TimeoutException(msg));
}
// Since the pending queue is cleared now, set timer to expire after configured value.
timeToWaitMs = conf.getSendTimeoutMs();
} else {
// The diff is greater than zero, set the timeout to the diff value
timeToWaitMs = diff;
}
}
sendTimeout = client.timer().newTimeout(this, timeToWaitMs, TimeUnit.MILLISECONDS);
}
} | 3.68 |
flink_SummaryAggregatorFactory_create | /**
* Create a SummaryAggregator for the supplied type.
*
* @param <T> the type to aggregate
* @param <R> the result type of the aggregation
*/
@SuppressWarnings("unchecked")
public static <T, R> Aggregator<T, R> create(Class<T> type) {
if (type == Long.class) {
return (Aggregator<T, R>) new LongSummaryAggregator();
} else if (type == LongValue.class) {
return (Aggregator<T, R>) new ValueSummaryAggregator.LongValueSummaryAggregator();
} else if (type == Integer.class) {
return (Aggregator<T, R>) new IntegerSummaryAggregator();
} else if (type == IntValue.class) {
return (Aggregator<T, R>) new ValueSummaryAggregator.IntegerValueSummaryAggregator();
} else if (type == Double.class) {
return (Aggregator<T, R>) new DoubleSummaryAggregator();
} else if (type == DoubleValue.class) {
return (Aggregator<T, R>) new ValueSummaryAggregator.DoubleValueSummaryAggregator();
} else if (type == Float.class) {
return (Aggregator<T, R>) new FloatSummaryAggregator();
} else if (type == FloatValue.class) {
return (Aggregator<T, R>) new ValueSummaryAggregator.FloatValueSummaryAggregator();
} else if (type == Short.class) {
return (Aggregator<T, R>) new ShortSummaryAggregator();
} else if (type == ShortValue.class) {
return (Aggregator<T, R>) new ValueSummaryAggregator.ShortValueSummaryAggregator();
} else if (type == Boolean.class) {
return (Aggregator<T, R>) new BooleanSummaryAggregator();
} else if (type == BooleanValue.class) {
return (Aggregator<T, R>) new ValueSummaryAggregator.BooleanValueSummaryAggregator();
} else if (type == String.class) {
return (Aggregator<T, R>) new StringSummaryAggregator();
} else if (type == StringValue.class) {
return (Aggregator<T, R>) new ValueSummaryAggregator.StringValueSummaryAggregator();
} else {
// rather than error for unsupported types do something very generic
return (Aggregator<T, R>) new ObjectSummaryAggregator();
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.