name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_HeterogeneousRegionCountCostFunction_findLimitForRS | /**
* Find the limit for a ServerName. If not found then return the default value
* @param serverName the server we are looking for
* @return the limit
*/
int findLimitForRS(final ServerName serverName) {
boolean matched = false;
int limit = -1;
for (final Map.Entry<Pattern, Integer> entry : this.limitPerRule.entrySet()) {
if (entry.getKey().matcher(serverName.getHostname()).matches()) {
matched = true;
limit = entry.getValue();
break;
}
}
if (!matched) {
limit = this.defaultNumberOfRegions;
}
// Feeding cache
this.limitPerRS.put(serverName, limit);
return limit;
} | 3.68 |
hmily_PropertyName_append | /**
* Append property name.
*
* @param elementValue the element value
* @return the property name
*/
public PropertyName append(final String elementValue) {
if (elementValue == null) {
return this;
}
process(elementValue, (e, indexed) -> {
if (StringUtils.isBlank(e.get()) && LOGGER.isDebugEnabled()) {
LOGGER.debug("{} Did not find the corresponding property.", elementValue);
}
});
if (!isIndexed(elementValue)) {
List<Character> invalidChars = ElementValidator.getInvalidChars(elementValue);
if (!invalidChars.isEmpty()) {
throw new ConfigException("config property name " + elementValue + " is not valid");
}
}
int length = this.elements.length;
String[] elements = new String[length + 1];
System.arraycopy(this.elements, 0, elements, 0, length);
elements[length] = elementValue;
return new PropertyName(elements);
} | 3.68 |
hibernate-validator_ReflectionHelper_typeOf | /**
* @param member The {@code Member} instance for which to retrieve the type.
*
* @return Returns the {@code Type} of the given {@code Field} or {@code Method}.
*
* @throws IllegalArgumentException in case {@code member} is not a {@code Field} or {@code Method}.
*/
public static Type typeOf(Member member) {
Type type;
if ( member instanceof Field ) {
type = ( (Field) member ).getGenericType();
}
else if ( member instanceof Method ) {
type = ( (Method) member ).getGenericReturnType();
}
else if ( member instanceof Constructor<?> ) {
type = member.getDeclaringClass();
}
//TODO HV-571 change log method name
else {
throw LOG.getMemberIsNeitherAFieldNorAMethodException( member );
}
if ( type instanceof TypeVariable ) {
type = TypeHelper.getErasedType( type );
}
return type;
} | 3.68 |
flink_BinaryHashPartition_spillPartition | /**
* Spills this partition to disk and sets it up such that it continues spilling records that are
* added to it. The spilling process must free at least one buffer, either in the partition's
* record buffers, or in the memory segments for overflow buckets. The partition immediately
* takes back one buffer to use it for further spilling.
*
* @param ioAccess The I/O manager to be used to create a writer to disk.
* @param targetChannel The id of the target channel for this partition.
* @return The number of buffers that were freed by spilling this partition.
* @throws IOException Thrown, if the writing failed.
*/
int spillPartition(
IOManager ioAccess,
FileIOChannel.ID targetChannel,
LinkedBlockingQueue<MemorySegment> bufferReturnQueue)
throws IOException {
// sanity checks
if (!isInMemory()) {
throw new RuntimeException(
"Bug in Hybrid Hash Join: "
+ "Request to spill a partition that has already been spilled.");
}
if (getNumOccupiedMemorySegments() < 2) {
throw new RuntimeException(
"Bug in Hybrid Hash Join: "
+ "Request to spill a partition with less than two buffers.");
}
// create the channel block writer and spill the current buffers
// that keep the build side buffers current block, as it is most likely not full, yet
// we return the number of blocks that become available
this.buildSideChannel =
FileChannelUtil.createBlockChannelWriter(
ioAccess,
targetChannel,
bufferReturnQueue,
compressionEnable,
compressionCodecFactory,
compressionBlockSize,
memorySegmentSize);
return this.buildSideWriteBuffer.spill(this.buildSideChannel);
} | 3.68 |
flink_StreamTableSourceFactory_createStreamTableSource | /**
* Creates and configures a {@link StreamTableSource} using the given properties.
*
* @param properties normalized properties describing a stream table source.
* @return the configured stream table source.
* @deprecated {@link Context} contains more information, and already contains table schema too.
* Please use {@link #createTableSource(Context)} instead.
*/
@Deprecated
default StreamTableSource<T> createStreamTableSource(Map<String, String> properties) {
return null;
} | 3.68 |
hudi_FlinkConcatAndReplaceHandle_write | /**
* Write old record as is w/o merging with incoming record.
*/
@Override
public void write(HoodieRecord oldRecord) {
Schema oldSchema = config.populateMetaFields() ? writeSchemaWithMetaFields : writeSchema;
String key = oldRecord.getRecordKey(oldSchema, keyGeneratorOpt);
try {
fileWriter.write(key, oldRecord, writeSchema);
} catch (IOException | RuntimeException e) {
String errMsg = String.format("Failed to write old record into new file for key %s from old file %s to new file %s with writerSchema %s",
key, getOldFilePath(), newFilePath, writeSchemaWithMetaFields.toString(true));
LOG.debug("Old record is " + oldRecord);
throw new HoodieUpsertException(errMsg, e);
}
recordsWritten++;
} | 3.68 |
flink_AbstractStreamOperatorV2_snapshotState | /**
* Stream operators with state, which want to participate in a snapshot need to override this
* hook method.
*
* @param context context that provides information and means required for taking a snapshot
*/
@Override
public void snapshotState(StateSnapshotContext context) throws Exception {} | 3.68 |
hbase_ReplicationPeerConfigUtil_getPeerClusterConfiguration | /**
* Returns the configuration needed to talk to the remote slave cluster.
* @param conf the base configuration
* @param peer the description of replication peer
* @return the configuration for the peer cluster, null if it was unable to get the configuration
* @throws IOException when create peer cluster configuration failed
*/
public static Configuration getPeerClusterConfiguration(Configuration conf,
ReplicationPeerDescription peer) throws IOException {
ReplicationPeerConfig peerConfig = peer.getPeerConfig();
Configuration otherConf;
try {
otherConf = HBaseConfiguration.createClusterConf(conf, peerConfig.getClusterKey());
} catch (IOException e) {
throw new IOException("Can't get peer configuration for peerId=" + peer.getPeerId(), e);
}
if (!peerConfig.getConfiguration().isEmpty()) {
CompoundConfiguration compound = new CompoundConfiguration();
compound.add(otherConf);
compound.addStringMap(peerConfig.getConfiguration());
return compound;
}
return otherConf;
} | 3.68 |
morf_SqlDialect_appendOrderBy | /**
* appends order by clause to the result
*
* @param result order by clause will be appended here
* @param stmt statement with order by clause
* @param <T> The type of AbstractSelectStatement
*/
protected <T extends AbstractSelectStatement<T>> void appendOrderBy(StringBuilder result, AbstractSelectStatement<T> stmt) {
if (!stmt.getOrderBys().isEmpty()) {
result.append(" ORDER BY ");
boolean firstOrderByField = true;
for (AliasedField currentOrderByField : stmt.getOrderBys()) {
if (!firstOrderByField) {
result.append(", ");
}
result.append(getSqlForOrderByField(currentOrderByField));
firstOrderByField = false;
}
}
} | 3.68 |
hudi_HoodieLogBlock_inflate | /**
* When lazyReading of blocks is turned on, inflate the content of a log block from disk.
*/
protected void inflate() throws HoodieIOException {
checkState(!content.isPresent(), "Block has already been inflated");
checkState(inputStream != null, "Block should have input-stream provided");
try {
content = Option.of(new byte[(int) this.getBlockContentLocation().get().getBlockSize()]);
inputStream.seek(this.getBlockContentLocation().get().getContentPositionInLogFile());
inputStream.readFully(content.get(), 0, content.get().length);
inputStream.seek(this.getBlockContentLocation().get().getBlockEndPos());
} catch (IOException e) {
// TODO : fs.open() and return inputstream again, need to pass FS configuration
// because the inputstream might close/timeout for large number of log blocks to be merged
inflate();
}
} | 3.68 |
framework_FieldGroup_isValid | /**
* Checks the validity of the bound fields.
* <p>
* Call the {@link Field#validate()} for the fields to get the individual
* error messages.
*
* @return true if all bound fields are valid, false otherwise.
*/
public boolean isValid() {
try {
for (Field<?> field : getFields()) {
field.validate();
}
return true;
} catch (InvalidValueException e) {
return false;
}
} | 3.68 |
hadoop_RollingWindow_safeReset | /**
* Safely reset the bucket state considering concurrent updates (inc) and
* resets.
*
* @param time the current time
*/
void safeReset(long time) {
// At any point in time, only one thread is allowed to reset the
// bucket
synchronized (this) {
if (isStaleNow(time)) {
// reset the value before setting the time, it allows other
// threads to safely assume that the value is updated if the
// time is not stale
value.set(0);
updateTime.set(time);
}
// else a concurrent thread has already reset it: do nothing
}
} | 3.68 |
hbase_Bytes_binaryIncrementNeg | /* increment/deincrement for negative value */
private static byte[] binaryIncrementNeg(byte[] value, long amount) {
long amo = amount;
int sign = 1;
if (amount < 0) {
amo = -amount;
sign = -1;
}
for (int i = 0; i < value.length; i++) {
int cur = ((int) amo % 256) * sign;
amo = (amo >> 8);
int val = (~value[value.length - i - 1] & 0x0ff) + 1;
int total = cur - val;
if (total >= 0) {
amo += sign;
} else if (total < -256) {
amo -= sign;
total %= 256;
}
value[value.length - i - 1] = (byte) total;
if (amo == 0) return value;
}
return value;
} | 3.68 |
morf_Function_monthsBetween | /**
* The number of whole months between two dates. The logic used is equivalent to
* {@link Months#monthsBetween(org.joda.time.ReadableInstant, org.joda.time.ReadableInstant)}.
*
* <p>As an example, assuming two dates are in the same year and the {@code fromDate} is from two months prior to
* the {@code toDate} (i.e. {@code MONTH(toDate) - MONTH(fromDate) = 2)} then:</p>
* <ul>
* <li> If the {@code toDate} day of the month is greater than or equal to the {@code fromDate}
* day of the month, then the difference is two months;
* <li> If the {@code toDate} day of the month lies on the end of the month, then the difference is
* two months, to account for month length differences (e.g. 31 Jan > 28 Feb = 1; 30 Jan > 27 Feb = 0);
* <li> Otherwise, the difference is one (e.g. if the day of {@code fromDate} > day of {@code toDate}).
* </ul>
*
* @param fromDate Lower bound.
* @param toDate Upper bound.
* @return function An instance of the "months between" function.
*/
public static Function monthsBetween(AliasedField fromDate, AliasedField toDate) {
return new Function(FunctionType.MONTHS_BETWEEN, toDate, fromDate);
} | 3.68 |
flink_StreamingSemiAntiJoinOperator_processElement1 | /**
* Process an input element and output incremental joined records, retraction messages will be
* sent in some scenarios.
*
* <p>Following is the pseudo code to describe the core logic of this method.
*
* <pre>
* if there is no matched rows on the other side
* if anti join, send input record
* if there are matched rows on the other side
* if semi join, send input record
* if the input record is accumulate, state.add(record, matched size)
* if the input record is retract, state.retract(record)
* </pre>
*/
@Override
public void processElement1(StreamRecord<RowData> element) throws Exception {
RowData input = element.getValue();
AssociatedRecords associatedRecords =
AssociatedRecords.of(input, true, rightRecordStateView, joinCondition);
if (associatedRecords.isEmpty()) {
if (isAntiJoin) {
collector.collect(input);
}
} else { // there are matched rows on the other side
if (!isAntiJoin) {
collector.collect(input);
}
}
if (RowDataUtil.isAccumulateMsg(input)) {
// erase RowKind for state updating
input.setRowKind(RowKind.INSERT);
leftRecordStateView.addRecord(input, associatedRecords.size());
} else { // input is retract
// erase RowKind for state updating
input.setRowKind(RowKind.INSERT);
leftRecordStateView.retractRecord(input);
}
} | 3.68 |
morf_Criterion_drive | /**
* @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser)
*/
@Override
public void drive(ObjectTreeTraverser traverser) {
traverser
.dispatch(field)
.dispatch(value)
.dispatch(selectStatement)
.dispatch(criteria);
} | 3.68 |
hadoop_ZStandardDecompressor_needsDictionary | // dictionary is not supported.
@Override
public boolean needsDictionary() {
return false;
} | 3.68 |
framework_CustomizedSystemMessages_setAuthenticationErrorURL | /**
* Sets the URL to go to when there is a authentication error.
*
* @param authenticationErrorURL
* the URL to go to, or null to reload current
*/
public void setAuthenticationErrorURL(String authenticationErrorURL) {
this.authenticationErrorURL = authenticationErrorURL;
} | 3.68 |
hadoop_JobTokenSecretManager_createPassword | /**
* Create a new password/secret for the given job token identifier.
* @param identifier the job token identifier
* @return token password/secret
*/
@Override
public byte[] createPassword(JobTokenIdentifier identifier) {
byte[] result = createPassword(identifier.getBytes(), masterKey);
return result;
} | 3.68 |
hadoop_FlowRunRowKey_getRowKeyAsString | /**
* Constructs a row key for the flow run table as follows:
* {@code clusterId!userId!flowName!Flow Run Id}.
* @return String representation of row key
*/
public String getRowKeyAsString() {
return flowRunRowKeyConverter.encodeAsString(this);
} | 3.68 |
framework_Label_removeListener | /**
* @deprecated As of 7.0, replaced by
* {@link #removeValueChangeListener(Property.ValueChangeListener)}
*/
@Override
@Deprecated
public void removeListener(Property.ValueChangeListener listener) {
removeValueChangeListener(listener);
} | 3.68 |
druid_SpringIbatisBeanTypeAutoProxyCreator_getAdvicesAndAdvisorsForBean | /**
* Identify as bean to proxy if the bean name is in the configured list of names.
*/
@SuppressWarnings("rawtypes")
protected Object[] getAdvicesAndAdvisorsForBean(Class beanClass, String beanName, TargetSource targetSource) {
for (String mappedName : this.beanNames) {
if (FactoryBean.class.isAssignableFrom(beanClass)) {
if (!mappedName.startsWith(BeanFactory.FACTORY_BEAN_PREFIX)) {
continue;
}
mappedName = mappedName.substring(BeanFactory.FACTORY_BEAN_PREFIX.length());
}
if (isMatch(beanName, mappedName)) {
return PROXY_WITHOUT_ADDITIONAL_INTERCEPTORS;
}
}
return DO_NOT_PROXY;
} | 3.68 |
morf_GraphBasedUpgrade_getNumberOfNodes | /**
* @return number of upgrade nodes in this graph, without the no-op root
*/
public int getNumberOfNodes() {
return numberOfNodes;
} | 3.68 |
flink_AbstractMultipleInputTransformation_getInputTypes | /** Returns the {@code TypeInformation} for the elements from the inputs. */
public List<TypeInformation<?>> getInputTypes() {
return inputs.stream().map(Transformation::getOutputType).collect(Collectors.toList());
} | 3.68 |
dubbo_NacosNamingServiceUtils_createNamingService | /**
* Create an instance of {@link NamingService} from specified {@link URL connection url}
*
* @param connectionURL {@link URL connection url}
* @return {@link NamingService}
* @since 2.7.5
*/
public static NacosNamingServiceWrapper createNamingService(URL connectionURL) {
boolean check = connectionURL.getParameter(NACOS_CHECK_KEY, true);
int retryTimes = connectionURL.getPositiveParameter(NACOS_RETRY_KEY, 10);
int sleepMsBetweenRetries = connectionURL.getPositiveParameter(NACOS_RETRY_WAIT_KEY, 10);
NacosConnectionManager nacosConnectionManager =
new NacosConnectionManager(connectionURL, check, retryTimes, sleepMsBetweenRetries);
return new NacosNamingServiceWrapper(nacosConnectionManager, retryTimes, sleepMsBetweenRetries);
} | 3.68 |
morf_MySqlDialect_fetchSizeForBulkSelects | /**
* MySQL defaults to <a href="http://stackoverflow.com/questions/20496616/fetchsize-in-resultset-set-to-0-by-default">fetching
* <em>all</em> records</a> into memory when a JDBC query is executed, which causes OOM
* errors when used with large data sets (Cryo and ETLs being prime offenders). Ideally
* we would use a nice big paging size here (like 200 as used in {@link OracleDialect})
* but as noted in the link above, MySQL only supports one record at a time or all at
* once, with nothing in between. As a result, we default to one record for bulk loads
* as the only safe choice.
*
* @see org.alfasoftware.morf.jdbc.SqlDialect#fetchSizeForBulkSelects()
*/
@Override
public int fetchSizeForBulkSelects() {
return Integer.MIN_VALUE;
} | 3.68 |
framework_AbstractContainer_fireItemSetChange | /**
* Sends an Item set change event to all registered interested listeners.
*
* @param event
* the item set change event to send, optionally with additional
* information
*/
protected void fireItemSetChange(ItemSetChangeEvent event) {
if (getItemSetChangeListeners() != null) {
for (Object l : getItemSetChangeListeners().toArray()) {
((Container.ItemSetChangeListener) l)
.containerItemSetChange(event);
}
}
} | 3.68 |
graphhopper_Service_activeOn | /**
* Is this service active on the specified date?
*/
public boolean activeOn (LocalDate date) {
// first check for exceptions
CalendarDate exception = calendar_dates.get(date);
if (exception != null)
return exception.exception_type == 1;
else if (calendar == null)
return false;
else {
int gtfsDate = date.getYear() * 10000 + date.getMonthValue() * 100 + date.getDayOfMonth();
boolean withinValidityRange = calendar.end_date >= gtfsDate && calendar.start_date <= gtfsDate;
if (!withinValidityRange) return false;
switch (date.getDayOfWeek()) {
case MONDAY:
return calendar.monday == 1;
case TUESDAY:
return calendar.tuesday == 1;
case WEDNESDAY:
return calendar.wednesday == 1;
case THURSDAY:
return calendar.thursday == 1;
case FRIDAY:
return calendar.friday == 1;
case SATURDAY:
return calendar.saturday == 1;
case SUNDAY:
return calendar.sunday == 1;
default:
throw new IllegalArgumentException("unknown day of week constant!");
}
}
} | 3.68 |
hbase_DisableTableProcedure_preDisable | /**
* Action before disabling table.
* @param env MasterProcedureEnv
* @param state the procedure state
*/
protected void preDisable(final MasterProcedureEnv env, final DisableTableState state)
throws IOException, InterruptedException {
runCoprocessorAction(env, state);
} | 3.68 |
flink_HiveInspectors_toFlinkObject | /** Converts a Hive object to Flink object with an ObjectInspector. */
public static Object toFlinkObject(ObjectInspector inspector, Object data, HiveShim hiveShim) {
if (data == null || inspector instanceof VoidObjectInspector) {
return null;
}
if (inspector instanceof PrimitiveObjectInspector) {
if (inspector instanceof BooleanObjectInspector
|| inspector instanceof StringObjectInspector
|| inspector instanceof ByteObjectInspector
|| inspector instanceof ShortObjectInspector
|| inspector instanceof IntObjectInspector
|| inspector instanceof LongObjectInspector
|| inspector instanceof FloatObjectInspector
|| inspector instanceof DoubleObjectInspector
|| inspector instanceof BinaryObjectInspector) {
PrimitiveObjectInspector poi = (PrimitiveObjectInspector) inspector;
return poi.getPrimitiveJavaObject(data);
} else if (inspector instanceof DateObjectInspector) {
PrimitiveObjectInspector poi = (PrimitiveObjectInspector) inspector;
return hiveShim.toFlinkDate(poi.getPrimitiveJavaObject(data));
} else if (inspector instanceof TimestampObjectInspector) {
PrimitiveObjectInspector poi = (PrimitiveObjectInspector) inspector;
return hiveShim.toFlinkTimestamp(poi.getPrimitiveJavaObject(data));
} else if (inspector instanceof HiveCharObjectInspector) {
HiveCharObjectInspector oi = (HiveCharObjectInspector) inspector;
return oi.getPrimitiveJavaObject(data).getValue();
} else if (inspector instanceof HiveVarcharObjectInspector) {
HiveVarcharObjectInspector oi = (HiveVarcharObjectInspector) inspector;
return oi.getPrimitiveJavaObject(data).getValue();
} else if (inspector instanceof HiveDecimalObjectInspector) {
HiveDecimalObjectInspector oi = (HiveDecimalObjectInspector) inspector;
return oi.getPrimitiveJavaObject(data).bigDecimalValue();
} else if (inspector instanceof HiveIntervalYearMonthObjectInspector) {
HiveIntervalYearMonthObjectInspector oi =
(HiveIntervalYearMonthObjectInspector) inspector;
HiveIntervalYearMonth hiveIntervalYearMonth = oi.getPrimitiveJavaObject(data);
return Period.of(
hiveIntervalYearMonth.getYears(), hiveIntervalYearMonth.getMonths(), 0);
} else if (inspector instanceof HiveIntervalDayTimeObjectInspector) {
HiveIntervalDayTimeObjectInspector oi =
(HiveIntervalDayTimeObjectInspector) inspector;
HiveIntervalDayTime hiveIntervalDayTime = oi.getPrimitiveJavaObject(data);
return Duration.ofSeconds(
hiveIntervalDayTime.getTotalSeconds(), hiveIntervalDayTime.getNanos());
}
}
if (inspector instanceof ListObjectInspector) {
ListObjectInspector listInspector = (ListObjectInspector) inspector;
List<?> list = listInspector.getList(data);
if (list == null) {
return null;
}
// flink expects a specific array type (e.g. Integer[] instead of Object[]), so we have
// to get the element class
ObjectInspector elementInspector = listInspector.getListElementObjectInspector();
Object[] result =
(Object[])
Array.newInstance(
HiveTypeUtil.toFlinkType(elementInspector).getConversionClass(),
list.size());
for (int i = 0; i < list.size(); i++) {
result[i] = toFlinkObject(elementInspector, list.get(i), hiveShim);
}
return result;
}
if (inspector instanceof MapObjectInspector) {
MapObjectInspector mapInspector = (MapObjectInspector) inspector;
Map<?, ?> map = mapInspector.getMap(data);
if (map == null) {
return null;
}
Map<Object, Object> result = CollectionUtil.newHashMapWithExpectedSize(map.size());
for (Map.Entry<?, ?> entry : map.entrySet()) {
result.put(
toFlinkObject(
mapInspector.getMapKeyObjectInspector(), entry.getKey(), hiveShim),
toFlinkObject(
mapInspector.getMapValueObjectInspector(),
entry.getValue(),
hiveShim));
}
return result;
}
if (inspector instanceof StructObjectInspector) {
StructObjectInspector structInspector = (StructObjectInspector) inspector;
List<? extends StructField> fields = structInspector.getAllStructFieldRefs();
Row row = new Row(fields.size());
// StandardStructObjectInspector.getStructFieldData in Hive-1.2.1 only accepts array or
// list as data
if (!data.getClass().isArray()
&& !(data instanceof List)
&& (inspector instanceof StandardStructObjectInspector)) {
data = new Object[] {data};
}
for (int i = 0; i < row.getArity(); i++) {
row.setField(
i,
toFlinkObject(
fields.get(i).getFieldObjectInspector(),
structInspector.getStructFieldData(data, fields.get(i)),
hiveShim));
}
return row;
}
throw new FlinkHiveUDFException(
String.format("Unwrap does not support ObjectInspector '%s' yet", inspector));
} | 3.68 |
hudi_HoodieTablePreCommitFileSystemView_getLatestBaseFiles | /**
* Combine committed base files + new files created/replaced for given partition.
*/
public final Stream<HoodieBaseFile> getLatestBaseFiles(String partitionStr) {
// get fileIds replaced by current inflight commit
List<String> replacedFileIdsForPartition = partitionToReplaceFileIds.getOrDefault(partitionStr, Collections.emptyList());
// get new files written by current inflight commit
Map<String, HoodieBaseFile> newFilesWrittenForPartition = filesWritten.stream()
.filter(file -> partitionStr.equals(file.getPartitionPath()))
.collect(Collectors.toMap(HoodieWriteStat::getFileId, writeStat ->
new HoodieBaseFile(new CachingPath(tableMetaClient.getBasePath(), writeStat.getPath()).toString(), writeStat.getFileId(), preCommitInstantTime, null)));
Stream<HoodieBaseFile> committedBaseFiles = this.completedCommitsFileSystemView.getLatestBaseFiles(partitionStr);
Map<String, HoodieBaseFile> allFileIds = committedBaseFiles
// Remove files replaced by current inflight commit
.filter(baseFile -> !replacedFileIdsForPartition.contains(baseFile.getFileId()))
.collect(Collectors.toMap(HoodieBaseFile::getFileId, baseFile -> baseFile));
allFileIds.putAll(newFilesWrittenForPartition);
return allFileIds.values().stream();
} | 3.68 |
pulsar_SinkContext_getSubscriptionType | /**
* Get subscription type used by the source providing data for the sink.
*
* @return subscription type
*/
default SubscriptionType getSubscriptionType() {
throw new UnsupportedOperationException("Context does not provide SubscriptionType");
} | 3.68 |
framework_VTabsheet_getNearestShownTabIndex | /**
* After removing a tab, find a new scroll position. In most cases the
* scroll position does not change, but if the tab at the scroll
* position was removed, we need to find a nearby tab that is visible.
* The search is performed first to the right from the original tab
* (less need to scroll), then to the left.
*
* @param oldPosition
* the index to start the search from
* @return the index of the nearest shown tab, or {@code -1} if there
* are none
*/
private int getNearestShownTabIndex(int oldPosition) {
for (int i = oldPosition; i < getTabCount(); i++) {
Tab tab = getTab(i);
if (!tab.isHiddenOnServer()) {
return i;
}
}
for (int i = oldPosition - 1; i >= 0; i--) {
Tab tab = getTab(i);
if (tab != null && !tab.isHiddenOnServer()) {
return i;
}
}
return -1;
} | 3.68 |
framework_LayoutManager_getOuterHeightDouble | /**
* Gets the outer height (including margins, paddings and borders) of the
* given element, provided that it has been measured. These elements are
* guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* -1 is returned if the element has not been measured. If 0 is returned, it
* might indicate that the element is not attached to the DOM.
*
* @since 7.5.1
* @param element
* the element to get the measured size for
* @return the measured outer height (including margins, paddings and
* borders) of the element in pixels.
*/
public final double getOuterHeightDouble(Element element) {
assert needsMeasure(
element) : "Getting measurement for element that is not measured";
return getMeasuredSize(element, nullSize).getOuterHeight();
} | 3.68 |
framework_VCalendar_getWeekGrid | /**
* Get he week grid component.
*
* @return
*/
public WeekGrid getWeekGrid() {
return weekGrid;
} | 3.68 |
hbase_FileCleanerDelegate_isEmptyDirDeletable | /**
* Check if a empty directory with no subdirs or subfiles can be deleted
* @param dir Path of the directory
* @return True if the directory can be deleted, otherwise false
*/
default boolean isEmptyDirDeletable(Path dir) {
return true;
} | 3.68 |
hadoop_ResourceUsage_getCachedUsed | // Cache Used
public Resource getCachedUsed() {
return _get(NL, ResourceType.CACHED_USED);
} | 3.68 |
hadoop_TypedBytesOutput_writeDouble | /**
* Writes a double as a typed bytes sequence.
*
* @param d the double to be written
* @throws IOException
*/
public void writeDouble(double d) throws IOException {
out.write(Type.DOUBLE.code);
out.writeDouble(d);
} | 3.68 |
flink_AbstractPagedOutputView_getSegmentSize | /**
* Gets the size of the segments used by this view.
*
* @return The memory segment size.
*/
public int getSegmentSize() {
return this.segmentSize;
} | 3.68 |
flink_DriverUtils_checkNotNull | /**
* Ensures that the given object reference is not null. Upon violation, a {@code
* NullPointerException} with the given message is thrown.
*
* @param reference The object reference
* @param errorMessage The message for the {@code NullPointerException} that is thrown if the
* check fails.
* @return The object reference itself (generically typed).
* @throws NullPointerException Thrown, if the passed reference was null.
*/
public static <T> T checkNotNull(@Nullable T reference, @Nullable String errorMessage) {
if (reference == null) {
throw new NullPointerException(String.valueOf(errorMessage));
}
return reference;
} | 3.68 |
hmily_PropertyName_isEmpty | /**
* Is empty boolean.
*
* @return the boolean
*/
public boolean isEmpty() {
return this.getElementSize() == 0;
} | 3.68 |
hbase_RowResource_checkAndDelete | /**
* Validates the input request parameters, parses columns from CellSetModel, and invokes
* checkAndDelete on HTable.
* @param model instance of CellSetModel
* @return Response 200 OK, 304 Not modified, 400 Bad request
*/
Response checkAndDelete(final CellSetModel model) {
Table table = null;
Delete delete = null;
try {
table = servlet.getTable(tableResource.getName());
if (model.getRows().size() != 1) {
servlet.getMetrics().incrementFailedDeleteRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Number of rows specified is not 1." + CRLF).build();
}
RowModel rowModel = model.getRows().get(0);
byte[] key = rowModel.getKey();
if (key == null) {
key = rowspec.getRow();
}
if (key == null) {
servlet.getMetrics().incrementFailedDeleteRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Row key found to be null." + CRLF).build();
}
List<CellModel> cellModels = rowModel.getCells();
int cellModelCount = cellModels.size();
delete = new Delete(key);
boolean retValue;
CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount - 1);
byte[] valueToDeleteColumn = valueToDeleteCell.getColumn();
if (valueToDeleteColumn == null) {
try {
valueToDeleteColumn = rowspec.getColumns()[0];
} catch (final ArrayIndexOutOfBoundsException e) {
servlet.getMetrics().incrementFailedDeleteRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Column not specified for check." + CRLF).build();
}
}
byte[][] parts;
// Copy all the cells to the Delete request if extra cells are sent
if (cellModelCount > 1) {
for (int i = 0, n = cellModelCount - 1; i < n; i++) {
CellModel cell = cellModels.get(i);
byte[] col = cell.getColumn();
if (col == null) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Column found to be null." + CRLF).build();
}
parts = CellUtil.parseColumn(col);
if (parts.length == 1) {
// Only Column Family is specified
delete.addFamily(parts[0], cell.getTimestamp());
} else if (parts.length == 2) {
delete.addColumn(parts[0], parts[1], cell.getTimestamp());
} else {
servlet.getMetrics().incrementFailedDeleteRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Column to delete incorrectly specified." + CRLF).build();
}
}
}
parts = CellUtil.parseColumn(valueToDeleteColumn);
if (parts.length == 2) {
if (parts[1].length != 0) {
// To support backcompat of deleting a cell
// if that is the only cell passed to the rest api
if (cellModelCount == 1) {
delete.addColumns(parts[0], parts[1]);
}
retValue = table.checkAndMutate(key, parts[0]).qualifier(parts[1])
.ifEquals(valueToDeleteCell.getValue()).thenDelete(delete);
} else {
// The case of empty qualifier.
if (cellModelCount == 1) {
delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
}
retValue = table.checkAndMutate(key, parts[0]).ifEquals(valueToDeleteCell.getValue())
.thenDelete(delete);
}
} else {
servlet.getMetrics().incrementFailedDeleteRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
.entity("Bad request: Column to check incorrectly specified." + CRLF).build();
}
if (LOG.isTraceEnabled()) {
LOG.trace("CHECK-AND-DELETE " + delete.toString() + ", returns " + retValue);
}
if (!retValue) {
servlet.getMetrics().incrementFailedDeleteRequests(1);
return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT)
.entity(" Delete check failed." + CRLF).build();
}
ResponseBuilder response = Response.ok();
servlet.getMetrics().incrementSucessfulDeleteRequests(1);
return response.build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedDeleteRequests(1);
return processException(e);
} finally {
if (table != null) {
try {
table.close();
} catch (IOException ioe) {
LOG.debug("Exception received while closing the table", ioe);
}
}
}
} | 3.68 |
flink_StateMetaInfoSnapshotReadersWriters_getReader | /**
* Returns a reader for {@link StateMetaInfoSnapshot} with the requested state type and version
* number.
*
* @param readVersion the format version to read.
* @return the requested reader.
*/
@Nonnull
public static StateMetaInfoReader getReader(int readVersion) {
checkArgument(
readVersion <= CURRENT_STATE_META_INFO_SNAPSHOT_VERSION,
"Unsupported read version for state meta info [%s]",
readVersion);
if (readVersion < 6) {
// versions before 5 still had different state meta info formats between keyed /
// operator state
throw new UnsupportedOperationException(
String.format(
"No longer supported version [%d]. Please upgrade first to Flink 1.16. ",
readVersion));
}
return CurrentReaderImpl.INSTANCE;
} | 3.68 |
pulsar_ReaderConfiguration_getReceiverQueueSize | /**
* @return the configure receiver queue size value
*/
public int getReceiverQueueSize() {
return conf.getReceiverQueueSize();
} | 3.68 |
flink_DefaultLookupCache_cacheMissingKey | /**
* Specifies whether to cache empty value into the cache.
*
* <p>Please note that "empty" means a collection without any rows in it instead of null.
* The cache will not accept any null key or value.
*/
public Builder cacheMissingKey(boolean cacheMissingKey) {
this.cacheMissingKey = cacheMissingKey;
return this;
} | 3.68 |
hbase_HQuorumPeer_main | /**
* Parse ZooKeeper configuration from HBase XML config and run a QuorumPeer.
* @param args String[] of command line arguments. Not used.
*/
public static void main(String[] args) {
Configuration conf = HBaseConfiguration.create();
try {
Properties zkProperties = ZKConfig.makeZKProps(conf);
writeMyID(zkProperties);
QuorumPeerConfig zkConfig = new QuorumPeerConfig();
zkConfig.parseProperties(zkProperties);
// login the zookeeper server principal (if using security)
ZKAuthentication.loginServer(conf, HConstants.ZK_SERVER_KEYTAB_FILE,
HConstants.ZK_SERVER_KERBEROS_PRINCIPAL, zkConfig.getClientPortAddress().getHostName());
runZKServer(zkConfig);
} catch (Exception e) {
LOG.error("Failed to start ZKServer", e);
System.exit(-1);
}
} | 3.68 |
hbase_Compactor_createWriter | /**
* Creates a writer for a new file.
* @param fd The file details.
* @return Writer for a new StoreFile
* @throws IOException if creation failed
*/
protected final StoreFileWriter createWriter(FileDetails fd, boolean shouldDropBehind,
boolean major, Consumer<Path> writerCreationTracker) throws IOException {
// When all MVCC readpoints are 0, don't write them.
// See HBASE-8166, HBASE-12600, and HBASE-13389.
return store.getStoreEngine()
.createWriter(createParams(fd, shouldDropBehind, major, writerCreationTracker));
} | 3.68 |
streampipes_EpProperties_booleanEp | /**
* Creates a new primitive property of type boolean and the provided domain property.
*
* @param runtimeName The field identifier of the event property at runtime.
* @param domainProperty The semantics of the list property as a String. The string should correspond to a URI
* provided by a vocabulary. Use one of the vocabularies provided in
* {@link org.apache.streampipes.vocabulary} or create your own domain-specific vocabulary.
* @return {@link org.apache.streampipes.model.schema.EventPropertyPrimitive}
*/
public static EventPropertyPrimitive booleanEp(Label label, String runtimeName, String domainProperty) {
return ep(label, XSD.BOOLEAN.toString(), runtimeName, domainProperty);
} | 3.68 |
rocketmq-connect_KafkaConnectAdaptorSource_processSourceRecord | /**
* process source record
*
* @param record
* @return
*/
@Override
public ConnectRecord processSourceRecord(SourceRecord record) {
record = this.transforms(record);
ConnectRecord connectRecord = Converters.fromSourceRecord(record);
return connectRecord;
} | 3.68 |
dubbo_RpcContextAttachment_getAttachment | /**
* also see {@link #getObjectAttachment(String)}.
*
* @param key
* @return attachment
*/
@Override
public String getAttachment(String key) {
Object value = attachments.get(key);
if (value instanceof String) {
return (String) value;
}
return null; // or JSON.toString(value);
} | 3.68 |
hbase_Strings_padFront | /**
* Push the input string to the right by appending a character before it, usually a space.
* @param input the string to pad
* @param padding the character to repeat to the left of the input string
* @param length the desired total length including the padding
* @return padding characters + input
*/
public static String padFront(String input, char padding, int length) {
if (input.length() > length) {
throw new IllegalArgumentException("input \"" + input + "\" longer than maxLength=" + length);
}
int numPaddingCharacters = length - input.length();
return StringUtils.repeat(padding, numPaddingCharacters) + input;
} | 3.68 |
hadoop_AbstractRESTRequestInterceptor_shutdown | /**
* Disposes the {@link RESTRequestInterceptor}.
*/
@Override
public void shutdown() {
if (this.nextInterceptor != null) {
this.nextInterceptor.shutdown();
}
} | 3.68 |
framework_ScrollbarBundle_getHandlerManager | /**
* Returns the handler manager for this scrollbar bundle.
*
* @return the handler manager
*/
protected HandlerManager getHandlerManager() {
if (handlerManager == null) {
handlerManager = new HandlerManager(this);
}
return handlerManager;
} | 3.68 |
flink_StreamConfig_serializeAllConfigs | /**
* Serialize all object configs synchronously. Only used for operators which need to reconstruct
* the StreamConfig internally or test.
*/
public void serializeAllConfigs() {
toBeSerializedConfigObjects.forEach(
(key, object) -> {
try {
InstantiationUtil.writeObjectToConfig(object, this.config, key);
} catch (IOException e) {
throw new StreamTaskException(
String.format("Could not serialize object for key %s.", key), e);
}
});
} | 3.68 |
morf_SchemaValidator_validateView | /**
* Validates a {@link View} meets the rules.
*
* @param view The {@link View} to validate.
*/
private void validateView(View view) {
validateName(view.getName());
if (view.knowsSelectStatement()) {
validateColumnNames(FluentIterable.from(view.getSelectStatement().getFields()).transform(FIELD_TO_NAME).toList(), view.getName());
}
} | 3.68 |
pulsar_LinuxInfoUtils_isCGroupEnabled | /**
* Determine whether the OS enable CG Group.
*/
public static boolean isCGroupEnabled() {
try {
if (metrics == null) {
return Files.exists(Paths.get(CGROUPS_CPU_USAGE_PATH));
}
String provider = (String) getMetricsProviderMethod.invoke(metrics);
log.info("[LinuxInfo] The system metrics provider is: {}", provider);
return provider.contains("cgroup");
} catch (Exception e) {
log.warn("[LinuxInfo] Failed to check cgroup CPU: {}", e.getMessage());
return false;
}
} | 3.68 |
hadoop_ConverterUtils_toString | /*
* This method is deprecated, use {@link ContainerId#toString()} instead.
*/
@Public
@Deprecated
public static String toString(ContainerId cId) {
return cId == null ? null : cId.toString();
} | 3.68 |
flink_MapStateDescriptor_getKeySerializer | /**
* Gets the serializer for the keys in the state.
*
* @return The serializer for the keys in the state.
*/
public TypeSerializer<UK> getKeySerializer() {
final TypeSerializer<Map<UK, UV>> rawSerializer = getSerializer();
if (!(rawSerializer instanceof MapSerializer)) {
throw new IllegalStateException("Unexpected serializer type.");
}
return ((MapSerializer<UK, UV>) rawSerializer).getKeySerializer();
} | 3.68 |
hbase_BalancerClusterState_computeRegionServerRegionCacheRatio | /**
* Populate the maps containing information about how much a region is cached on a region server.
*/
private void computeRegionServerRegionCacheRatio() {
regionIndexServerIndexRegionCachedRatio = new HashMap<>();
regionServerIndexWithBestRegionCachedRatio = new int[numRegions];
for (int region = 0; region < numRegions; region++) {
float bestRegionCacheRatio = 0.0f;
int serverWithBestRegionCacheRatio = 0;
for (int server = 0; server < numServers; server++) {
float regionCacheRatio = getRegionCacheRatioOnRegionServer(region, server);
if (regionCacheRatio > 0.0f || server == regionIndexToServerIndex[region]) {
// A region with cache ratio 0 on a server means nothing. Hence, just make a note of
// cache ratio only if the cache ratio is greater than 0.
Pair<Integer, Integer> regionServerPair = new Pair<>(region, server);
regionIndexServerIndexRegionCachedRatio.put(regionServerPair, regionCacheRatio);
}
if (regionCacheRatio > bestRegionCacheRatio) {
serverWithBestRegionCacheRatio = server;
// If the server currently hosting the region has equal cache ratio to a historical
// server, consider the current server to keep hosting the region
bestRegionCacheRatio = regionCacheRatio;
} else if (
regionCacheRatio == bestRegionCacheRatio && server == regionIndexToServerIndex[region]
) {
// If two servers have same region cache ratio, then the server currently hosting the
// region
// should retain the region
serverWithBestRegionCacheRatio = server;
}
}
regionServerIndexWithBestRegionCachedRatio[region] = serverWithBestRegionCacheRatio;
Pair<Integer, Integer> regionServerPair =
new Pair<>(region, regionIndexToServerIndex[region]);
float tempRegionCacheRatio = regionIndexServerIndexRegionCachedRatio.get(regionServerPair);
if (tempRegionCacheRatio > bestRegionCacheRatio) {
LOG.warn(
"INVALID CONDITION: region {} on server {} cache ratio {} is greater than the "
+ "best region cache ratio {} on server {}",
regions[region].getEncodedName(), servers[regionIndexToServerIndex[region]],
tempRegionCacheRatio, bestRegionCacheRatio, servers[serverWithBestRegionCacheRatio]);
}
}
} | 3.68 |
framework_ColorPickerPopup_checkIfTabsNeeded | /**
* Checks if tabs are needed and hides them if not
*/
private void checkIfTabsNeeded() {
tabs.setTabsVisible(tabs.getComponentCount() > 1);
} | 3.68 |
dubbo_LoadingStrategy_includedPackagesInCompatibleType | /**
* To restrict some class that should not be loaded from `org.alibaba.dubbo`(for compatible purpose)
* package type SPI class.
* For example, we can restrict the implementation class which package is `org.xxx.xxx`
* can be loaded as SPI implementation
*
* @return packages can be loaded in `org.alibaba.dubbo`'s SPI
*/
default String[] includedPackagesInCompatibleType() {
// default match all
return null;
} | 3.68 |
flink_TableEnvironment_fromValues | /**
* Creates a Table from given collection of objects with a given row type.
*
* <p>The difference between this method and {@link #fromValues(Object...)} is that the schema
* can be manually adjusted. It might be helpful for assigning more generic types like e.g.
* DECIMAL or naming the columns.
*
* <p>Examples:
*
* <pre>{@code
* tEnv.fromValues(
* DataTypes.ROW(
* DataTypes.FIELD("id", DataTypes.DECIMAL(10, 2)),
* DataTypes.FIELD("name", DataTypes.STRING())
* ),
* row(1, "ABC"),
* row(2L, "ABCDE")
* )
* }</pre>
*
* <p>will produce a Table with a schema as follows:
*
* <pre>{@code
* root
* |-- id: DECIMAL(10, 2)
* |-- name: STRING
* }</pre>
*
* <p>For more examples see {@link #fromValues(Object...)}.
*
* @param rowType Expected row type for the values.
* @param values Expressions for constructing rows of the VALUES table.
* @see #fromValues(Object...)
*/
default Table fromValues(AbstractDataType<?> rowType, Object... values) {
// It is necessary here to implement TableEnvironment#fromValues(Object...) for
// BatchTableEnvImpl.
// In scala varargs are translated to Seq. Due to the type erasure Seq<Expression> and
// Seq<Object>
// are the same. It is not a problem in java as varargs in java are translated to an array.
return fromValues(rowType, Arrays.asList(values));
} | 3.68 |
framework_LogSection_setScrollLock | /**
* Activates or deactivates scroll lock
*
* @param locked
*/
void setScrollLock(boolean locked) {
if (locked && scrollTimer != null) {
scrollTimer.cancel();
scrollTimer = null;
} else if (!locked && scrollTimer == null) {
scrollTimer = new Timer() {
@Override
public void run() {
Element el = (Element) contentElement.getLastChild();
if (el != null) {
el = el.getFirstChildElement();
if (el != null) {
el.scrollIntoView();
}
}
}
};
}
scroll.setStyleDependentName(VDebugWindow.STYLENAME_ACTIVE, locked);
} | 3.68 |
hbase_ScanWildcardColumnTracker_done | /**
* We can never know a-priori if we are done, so always return false.
*/
@Override
public boolean done() {
return false;
} | 3.68 |
framework_VAbstractSplitPanel_getFirstContainer | /**
* Gets the first region's container element.
*
* @since 7.5.1
* @return the container element
*/
protected Element getFirstContainer() {
return firstContainer;
} | 3.68 |
framework_GridRowDragger_getSourceDataProviderUpdater | /**
* Returns the source grid data provider updater.
* <p>
* Default is {@code null} and the items are just removed from the source
* grid, which only works for {@link ListDataProvider}.
*
* @return the source grid drop handler
*/
public SourceDataProviderUpdater<T> getSourceDataProviderUpdater() {
return sourceDataProviderUpdater;
} | 3.68 |
hbase_Tag_getValueAsString | /**
* Converts the value bytes of the given tag into a String value
* @param tag The Tag
* @return value as String
*/
public static String getValueAsString(Tag tag) {
if (tag.hasArray()) {
return Bytes.toString(tag.getValueArray(), tag.getValueOffset(), tag.getValueLength());
}
return Bytes.toString(cloneValue(tag));
} | 3.68 |
hudi_LazyIterableIterator_start | /**
* Called once, before any elements are processed.
*/
protected void start() {} | 3.68 |
flink_HiveParserUtils_getGenericUDAFEvaluator | // Returns the GenericUDAFEvaluator for the aggregation. This is called once for each GroupBy
// aggregation.
// TODO: Requiring a GenericUDAFEvaluator means we only support hive UDAFs. Need to avoid this
// to support flink UDAFs.
public static GenericUDAFEvaluator getGenericUDAFEvaluator(
String aggName,
ArrayList<ExprNodeDesc> aggParameters,
HiveParserASTNode aggTree,
boolean isDistinct,
boolean isAllColumns,
SqlOperatorTable opTable)
throws SemanticException {
ArrayList<ObjectInspector> originalParameterTypeInfos =
getWritableObjectInspector(aggParameters);
GenericUDAFEvaluator result =
FunctionRegistry.getGenericUDAFEvaluator(
aggName, originalParameterTypeInfos, isDistinct, isAllColumns);
if (result == null) {
// this happens for temp functions
SqlOperator sqlOperator =
getSqlOperator(aggName, opTable, SqlFunctionCategory.USER_DEFINED_FUNCTION);
if (isBridgingSqlAggFunction(sqlOperator)
&& getBridgingSqlFunctionDefinition(sqlOperator) instanceof HiveGenericUDAF) {
HiveGenericUDAF hiveGenericUDAF =
(HiveGenericUDAF) getBridgingSqlFunctionDefinition(sqlOperator);
result =
hiveGenericUDAF.createEvaluator(
originalParameterTypeInfos.toArray(new ObjectInspector[0]));
}
}
if (null == result) {
String reason =
"Looking for UDAF Evaluator\""
+ aggName
+ "\" with parameters "
+ originalParameterTypeInfos;
throw new SemanticException(
HiveParserErrorMsg.getMsg(
ErrorMsg.INVALID_FUNCTION_SIGNATURE, aggTree.getChild(0), reason));
}
return result;
} | 3.68 |
MagicPlugin_MapController_forceReloadPlayerPortrait | /**
* Force reload of a player headshot.
*/
@Override
public void forceReloadPlayerPortrait(String worldName, String playerName) {
String url = CompatibilityLib.getSkinUtils().getOnlineSkinURL(playerName);
if (url != null) {
forceReload(worldName, url, 8, 8, 8, 8);
}
} | 3.68 |
hudi_CompactionAdminClient_validateCompactionPlan | /**
* Validate all compaction operations in a compaction plan. Verifies the file-slices are consistent with corresponding
* compaction operations.
*
* @param metaClient Hoodie Table Meta Client
* @param compactionInstant Compaction Instant
*/
public List<ValidationOpResult> validateCompactionPlan(HoodieTableMetaClient metaClient, String compactionInstant,
int parallelism) throws IOException {
HoodieCompactionPlan plan = getCompactionPlan(metaClient, compactionInstant);
HoodieTableFileSystemView fsView =
new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline());
if (plan.getOperations() != null) {
List<CompactionOperation> ops = plan.getOperations().stream()
.map(CompactionOperation::convertFromAvroRecordInstance).collect(Collectors.toList());
context.setJobStatus(this.getClass().getSimpleName(), "Validate compaction operations: " + config.getTableName());
return context.map(ops, op -> {
try {
return validateCompactionOperation(metaClient, compactionInstant, op, Option.of(fsView));
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
}, parallelism);
}
return new ArrayList<>();
} | 3.68 |
hudi_HoodieMergeHandle_init | /**
* Load the new incoming records in a map and return partitionPath.
*/
protected void init(String fileId, Iterator<HoodieRecord<T>> newRecordsItr) {
initializeIncomingRecordsMap();
while (newRecordsItr.hasNext()) {
HoodieRecord<T> record = newRecordsItr.next();
// update the new location of the record, so we know where to find it next
if (needsUpdateLocation()) {
record.unseal();
record.setNewLocation(new HoodieRecordLocation(instantTime, fileId));
record.seal();
}
// NOTE: Once Records are added to map (spillable-map), DO NOT change it as they won't persist
keyToNewRecords.put(record.getRecordKey(), record);
}
LOG.info("Number of entries in MemoryBasedMap => "
+ ((ExternalSpillableMap) keyToNewRecords).getInMemoryMapNumEntries()
+ ", Total size in bytes of MemoryBasedMap => "
+ ((ExternalSpillableMap) keyToNewRecords).getCurrentInMemoryMapSize() + ", Number of entries in BitCaskDiskMap => "
+ ((ExternalSpillableMap) keyToNewRecords).getDiskBasedMapNumEntries() + ", Size of file spilled to disk => "
+ ((ExternalSpillableMap) keyToNewRecords).getSizeOfFileOnDiskInBytes());
} | 3.68 |
framework_CurrentInstance_set | /**
* Sets the current instance of the given type.
*
* @see ThreadLocal
*
* @param type
* the class that should be used when getting the current
* instance back
* @param instance
* the actual instance
*/
public static <T> CurrentInstance set(Class<T> type, T instance) {
Map<Class<?>, CurrentInstance> map = INSTANCES.get();
CurrentInstance previousInstance = null;
if (instance == null) {
// remove the instance
if (map != null) {
previousInstance = map.remove(type);
if (map.isEmpty()) {
INSTANCES.remove();
map = null;
}
}
} else {
assert type.isInstance(instance) : "Invald instance type";
if (map == null) {
map = new HashMap<>();
INSTANCES.set(map);
}
previousInstance = map.put(type, new CurrentInstance(instance));
}
if (previousInstance == null) {
previousInstance = CURRENT_INSTANCE_NULL;
}
return previousInstance;
} | 3.68 |
framework_NestedMethodProperty_readObject | /* Special serialization to handle method references */
private void readObject(ObjectInputStream in)
throws IOException, ClassNotFoundException {
in.defaultReadObject();
initialize(instance.getClass(), propertyName);
} | 3.68 |
flink_LocalityAwareSplitAssigner_getNextUnassignedMinLocalCountSplit | /**
* Retrieves a LocatableInputSplit with minimum local count. InputSplits which have already
* been assigned (i.e., which are not contained in the provided set) are filtered out. The
* returned input split is NOT removed from the provided set.
*
* @param unassignedSplits Set of unassigned input splits.
* @return An input split with minimum local count or null if all splits have been assigned.
*/
@Nullable
SplitWithInfo getNextUnassignedMinLocalCountSplit(Set<SplitWithInfo> unassignedSplits) {
if (splits.size() == 0) {
return null;
}
do {
elementCycleCount--;
// take first split of the list
SplitWithInfo split = splits.pollFirst();
if (unassignedSplits.contains(split)) {
int localCount = split.getLocalCount();
// still unassigned, check local count
if (localCount > minLocalCount) {
// re-insert at end of the list and continue to look for split with smaller
// local count
splits.offerLast(split);
// check and update second smallest local count
if (nextMinLocalCount == -1 || split.getLocalCount() < nextMinLocalCount) {
nextMinLocalCount = split.getLocalCount();
}
split = null;
}
} else {
// split was already assigned
split = null;
}
if (elementCycleCount == 0) {
// one full cycle, but no split with min local count found
// update minLocalCnt and element cycle count for next pass over the splits
minLocalCount = nextMinLocalCount;
nextMinLocalCount = -1;
elementCycleCount = splits.size();
}
if (split != null) {
// found a split to assign
return split;
}
} while (elementCycleCount > 0);
// no split left
return null;
} | 3.68 |
flink_DecimalData_toBigDecimal | /** Converts this {@link DecimalData} into an instance of {@link BigDecimal}. */
public BigDecimal toBigDecimal() {
BigDecimal bd = decimalVal;
if (bd == null) {
decimalVal = bd = BigDecimal.valueOf(longVal, scale);
}
return bd;
} | 3.68 |
streampipes_StatementHandler_fillPreparedStatement | /**
* Fills a prepared statement with the actual values base on {@link StatementHandler#eventParameterMap}. If
* {@link StatementHandler#eventParameterMap} is empty or not complete (which should only happen once in the
* beginning), it calls
* {@link StatementHandler#generatePreparedStatement
* (DbDescription, TableDescription, Connection, Map)} to generate a new one.
*
* @param event
* @param pre
* @throws SQLException
* @throws SpRuntimeException
*/
private void fillPreparedStatement(DbDescription dbDescription, TableDescription tableDescription,
Connection connection, final Map<String, Object> event, String pre)
throws SQLException, SpRuntimeException {
//TODO: Possible error: when the event does not contain all objects of the parameter list
for (Map.Entry<String, Object> pair : event.entrySet()) {
String newKey = pre + pair.getKey();
if (pair.getValue() instanceof Map) {
// recursively extracts nested values
fillPreparedStatement(dbDescription, tableDescription, connection, (Map<String, Object>) pair.getValue(),
newKey + "_");
} else {
if (!eventParameterMap.containsKey(newKey)) {
//TODO: start the for loop all over again
generatePreparedStatement(dbDescription, tableDescription, connection, event);
}
ParameterInformation p = eventParameterMap.get(newKey);
StatementUtils.setValue(p, pair.getValue(), this.getPreparedStatement());
}
}
} | 3.68 |
hbase_UnsafeAccess_toInt | /**
* Reads a int value at the given Object's offset considering it was written in big-endian format.
* @return int value at offset
*/
public static int toInt(Object ref, long offset) {
if (LITTLE_ENDIAN) {
return Integer.reverseBytes(HBasePlatformDependent.getInt(ref, offset));
}
return HBasePlatformDependent.getInt(ref, offset);
} | 3.68 |
framework_VCheckBoxGroup_focus | /**
* Set focus to the first check box.
*/
@Override
public void focus() {
// If focus is set on creation, need to wait until options are populated
Scheduler.get().scheduleDeferred(() -> {
getWidget().focusFirstEnabledChild();
});
} | 3.68 |
hbase_RpcServer_getCurrentCall | /**
* Needed for features such as delayed calls. We need to be able to store the current call so that
* we can complete it later or ask questions of what is supported by the current ongoing call.
* @return An RpcCallContext backed by the currently ongoing call (gotten from a thread local)
*/
public static Optional<RpcCall> getCurrentCall() {
return Optional.ofNullable(CurCall.get());
} | 3.68 |
dubbo_UrlUtils_allSerializations | /**
* Get the all serializations,ensure insertion order
*
* @param url url
* @return {@link List}<{@link String}>
*/
@SuppressWarnings("unchecked")
public static Collection<String> allSerializations(URL url) {
// preferSerialization -> serialization -> default serialization
Set<String> serializations = new LinkedHashSet<>(preferSerialization(url));
Optional.ofNullable(url.getParameter(SERIALIZATION_KEY))
.filter(StringUtils::isNotBlank)
.ifPresent(serializations::add);
serializations.add(DefaultSerializationSelector.getDefaultRemotingSerialization());
return Collections.unmodifiableSet(serializations);
} | 3.68 |
hbase_HRegion_setClosing | /**
* Exposed for some very specific unit tests.
*/
public void setClosing(boolean closing) {
this.closing.set(closing);
} | 3.68 |
dubbo_ApplicationModel_getEnvironment | /**
* @deprecated Replace to {@link ScopeModel#modelEnvironment()}
*/
@Deprecated
public static Environment getEnvironment() {
return defaultModel().modelEnvironment();
} | 3.68 |
hadoop_LightWeightLinkedSet_resetBookmark | /**
* Resets the bookmark to the beginning of the list.
*/
public void resetBookmark() {
this.bookmark.next = this.head;
} | 3.68 |
flink_DynamicTableFactory_forwardOptions | /**
* Returns a set of {@link ConfigOption} that are directly forwarded to the runtime
* implementation but don't affect the final execution topology.
*
* <p>Options declared here can override options of the persisted plan during an enrichment
* phase. Since a restored topology is static, an implementer has to ensure that the declared
* options don't affect fundamental abilities such as {@link SupportsProjectionPushDown} or
* {@link SupportsFilterPushDown}.
*
* <p>For example, given a database connector, if an option defines the connection timeout,
* changing this value does not affect the pipeline topology and can be allowed. However, an
* option that defines whether the connector supports {@link SupportsReadingMetadata} or not is
* not allowed. The planner might not react to changed abilities anymore.
*
* @see DynamicTableFactory.Context#getEnrichmentOptions()
* @see TableFactoryHelper#getOptions()
* @see FormatFactory#forwardOptions()
*/
default Set<ConfigOption<?>> forwardOptions() {
return Collections.emptySet();
} | 3.68 |
hadoop_SnappyCompressor_setInputFromSavedData | /**
* If a write would exceed the capacity of the direct buffers, it is set
* aside to be loaded by this function while the compressed data are
* consumed.
*/
void setInputFromSavedData() {
if (0 >= userBufLen) {
return;
}
finished = false;
uncompressedDirectBufLen = Math.min(userBufLen, directBufferSize);
((ByteBuffer) uncompressedDirectBuf).put(userBuf, userBufOff,
uncompressedDirectBufLen);
// Note how much data is being fed to snappy
userBufOff += uncompressedDirectBufLen;
userBufLen -= uncompressedDirectBufLen;
} | 3.68 |
pulsar_InMemoryDelayedDeliveryTracker_checkAndUpdateHighest | /**
* Check that new delivery time comes after the current highest, or at
* least within a single tick time interval of 1 second.
*/
private void checkAndUpdateHighest(long deliverAt) {
if (deliverAt < (highestDeliveryTimeTracked - tickTimeMillis)) {
messagesHaveFixedDelay = false;
}
highestDeliveryTimeTracked = Math.max(highestDeliveryTimeTracked, deliverAt);
} | 3.68 |
flink_ResourceGuard_getLeaseCount | /** Returns the current count of open leases. */
public int getLeaseCount() {
return leaseCount;
} | 3.68 |
hbase_RSGroupInfoManagerImpl_refresh | /**
* Read rsgroup info from the source of truth, the hbase:rsgroup table. Update zk cache. Called on
* startup of the manager.
*/
private synchronized void refresh(boolean forceOnline) throws IOException {
List<RSGroupInfo> groupList = new ArrayList<>();
// Overwrite anything read from zk, group table is source of truth
// if online read from GROUP table
if (forceOnline || isOnline()) {
LOG.debug("Refreshing in Online mode.");
groupList.addAll(retrieveGroupListFromGroupTable());
} else {
LOG.debug("Refreshing in Offline mode.");
groupList.addAll(retrieveGroupListFromZookeeper());
}
// This is added to the last of the list so it overwrites the 'default' rsgroup loaded
// from region group table or zk
groupList.add(new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, getDefaultServers(groupList)));
// populate the data
HashMap<String, RSGroupInfo> newGroupMap = Maps.newHashMap();
for (RSGroupInfo group : groupList) {
newGroupMap.put(group.getName(), group);
}
resetRSGroupMap(newGroupMap);
updateCacheOfRSGroups(newGroupMap.keySet());
} | 3.68 |
framework_VComboBox_selectItem | /*
* Sets the selected item in the popup menu.
*/
private void selectItem(final MenuItem newSelectedItem) {
menu.selectItem(newSelectedItem);
// Set the icon.
ComboBoxSuggestion suggestion = (ComboBoxSuggestion) newSelectedItem
.getCommand();
setSelectedItemIcon(suggestion.getIconUri());
// Set the text.
setText(suggestion.getReplacementString());
} | 3.68 |
flink_BinaryStringData_compareMultiSegments | /** Find the boundaries of segments, and then compare MemorySegment. */
private int compareMultiSegments(BinaryStringData other) {
if (binarySection.sizeInBytes == 0 || other.binarySection.sizeInBytes == 0) {
return binarySection.sizeInBytes - other.binarySection.sizeInBytes;
}
int len = Math.min(binarySection.sizeInBytes, other.binarySection.sizeInBytes);
MemorySegment seg1 = binarySection.segments[0];
MemorySegment seg2 = other.binarySection.segments[0];
int segmentSize = binarySection.segments[0].size();
int otherSegmentSize = other.binarySection.segments[0].size();
int sizeOfFirst1 = segmentSize - binarySection.offset;
int sizeOfFirst2 = otherSegmentSize - other.binarySection.offset;
int varSegIndex1 = 1;
int varSegIndex2 = 1;
// find the first segment of this string.
while (sizeOfFirst1 <= 0) {
sizeOfFirst1 += segmentSize;
seg1 = binarySection.segments[varSegIndex1++];
}
while (sizeOfFirst2 <= 0) {
sizeOfFirst2 += otherSegmentSize;
seg2 = other.binarySection.segments[varSegIndex2++];
}
int offset1 = segmentSize - sizeOfFirst1;
int offset2 = otherSegmentSize - sizeOfFirst2;
int needCompare = Math.min(Math.min(sizeOfFirst1, sizeOfFirst2), len);
while (needCompare > 0) {
// compare in one segment.
for (int i = 0; i < needCompare; i++) {
int res = (seg1.get(offset1 + i) & 0xFF) - (seg2.get(offset2 + i) & 0xFF);
if (res != 0) {
return res;
}
}
if (needCompare == len) {
break;
}
len -= needCompare;
// next segment
if (sizeOfFirst1 < sizeOfFirst2) { // I am smaller
seg1 = binarySection.segments[varSegIndex1++];
offset1 = 0;
offset2 += needCompare;
sizeOfFirst1 = segmentSize;
sizeOfFirst2 -= needCompare;
} else if (sizeOfFirst1 > sizeOfFirst2) { // other is smaller
seg2 = other.binarySection.segments[varSegIndex2++];
offset2 = 0;
offset1 += needCompare;
sizeOfFirst2 = otherSegmentSize;
sizeOfFirst1 -= needCompare;
} else { // same, should go ahead both.
seg1 = binarySection.segments[varSegIndex1++];
seg2 = other.binarySection.segments[varSegIndex2++];
offset1 = 0;
offset2 = 0;
sizeOfFirst1 = segmentSize;
sizeOfFirst2 = otherSegmentSize;
}
needCompare = Math.min(Math.min(sizeOfFirst1, sizeOfFirst2), len);
}
checkArgument(needCompare == len);
return binarySection.sizeInBytes - other.binarySection.sizeInBytes;
} | 3.68 |
dubbo_RegistrySpecListener_onPost | /**
* Perform auto-increment on the monitored key,
* Can use a custom listener instead of this generic operation
*/
public static AbstractMetricsKeyListener onPost(MetricsKey metricsKey, CombMetricsCollector<?> collector) {
return AbstractMetricsKeyListener.onEvent(
metricsKey, event -> ((RegistryMetricsCollector) collector).incrMetricsNum(metricsKey, getRgs(event)));
} | 3.68 |
hudi_HoodieAvroHFileReader_readRecords | /**
* NOTE: THIS SHOULD ONLY BE USED FOR TESTING, RECORDS ARE MATERIALIZED EAGERLY
* <p>
* Reads all the records with given schema and filtering keys.
*/
public static List<IndexedRecord> readRecords(HoodieAvroHFileReader reader,
List<String> keys,
Schema schema) throws IOException {
Collections.sort(keys);
return toStream(reader.getIndexedRecordsByKeysIterator(keys, schema))
.collect(Collectors.toList());
} | 3.68 |
hbase_ByteBufferUtils_putCompressedInt | /**
* Put in buffer integer using 7 bit encoding. For each written byte: 7 bits are used to store
* value 1 bit is used to indicate whether there is next bit.
* @param value Int to be compressed.
* @param out Where to put compressed data
* @return Number of bytes written.
* @throws IOException on stream error
*/
public static int putCompressedInt(OutputStream out, final int value) throws IOException {
int i = 0;
int tmpvalue = value;
do {
byte b = (byte) (tmpvalue & VALUE_MASK);
tmpvalue >>>= NEXT_BIT_SHIFT;
if (tmpvalue != 0) {
b |= (byte) NEXT_BIT_MASK;
}
out.write(b);
i++;
} while (tmpvalue != 0);
return i;
} | 3.68 |
flink_AbstractBinaryWriter_writeString | /** See {@link BinarySegmentUtils#readStringData(MemorySegment[], int, int, long)}. */
@Override
public void writeString(int pos, StringData input) {
BinaryStringData string = (BinaryStringData) input;
if (string.getSegments() == null) {
String javaObject = string.toString();
writeBytes(pos, javaObject.getBytes(StandardCharsets.UTF_8));
} else {
int len = string.getSizeInBytes();
if (len <= 7) {
byte[] bytes = BinarySegmentUtils.allocateReuseBytes(len);
BinarySegmentUtils.copyToBytes(
string.getSegments(), string.getOffset(), bytes, 0, len);
writeBytesToFixLenPart(segment, getFieldOffset(pos), bytes, len);
} else {
writeSegmentsToVarLenPart(pos, string.getSegments(), string.getOffset(), len);
}
}
} | 3.68 |
hudi_HoodieTableConfig_isMetadataPartitionAvailable | /**
* Checks if metadata table is enabled and the specified partition has been initialized.
*
* @param partition The partition to check
* @returns true if the specific partition has been initialized, else returns false.
*/
public boolean isMetadataPartitionAvailable(MetadataPartitionType partition) {
return getMetadataPartitions().contains(partition.getPartitionPath());
} | 3.68 |
framework_AbstractDateField_isPreventInvalidInput | /**
* Check whether value change is emitted when user input value does not meet
* integrated range validator. The default is false.
*
* @return a Boolean value
*
* @since 8.13
*/
public boolean isPreventInvalidInput() {
return preventInvalidInput;
} | 3.68 |
hadoop_AMRMProxyService_finishApplicationMaster | /**
* This is called by the AMs started on this node to unregister from the RM.
* This method does the initial authorization and then forwards the request to
* the application instance specific interceptor chain.
*/
@Override
public FinishApplicationMasterResponse finishApplicationMaster(
FinishApplicationMasterRequest request) throws YarnException,
IOException {
this.metrics.incrRequestCount();
long startTime = clock.getTime();
try {
RequestInterceptorChainWrapper pipeline =
authorizeAndGetInterceptorChain();
LOG.info("Finishing application master for {}. Tracking Url: {}.",
pipeline.getApplicationAttemptId(), request.getTrackingUrl());
FinishApplicationMasterResponse response =
pipeline.getRootInterceptor().finishApplicationMaster(request);
long endTime = clock.getTime();
this.metrics.succeededFinishAMRequests(endTime - startTime);
LOG.info("FinishAM finished with isUnregistered = {} in {} ms for {}.",
response.getIsUnregistered(), endTime - startTime,
pipeline.getApplicationAttemptId());
return response;
} catch (Throwable t) {
this.metrics.incrFailedFinishAMRequests();
throw t;
}
} | 3.68 |
framework_ComboBox_setScrollToSelectedItem | /**
* Sets whether to scroll the selected item visible (directly open the page
* on which it is) when opening the combo box popup or not. Only applies to
* single select mode.
*
* This requires finding the index of the item, which can be expensive in
* many large lazy loading containers.
*
* @param scrollToSelectedItem
* true to find the page with the selected item when opening the
* selection popup
*/
public void setScrollToSelectedItem(boolean scrollToSelectedItem) {
this.scrollToSelectedItem = scrollToSelectedItem;
} | 3.68 |
hadoop_UserDefinedValueAggregatorDescriptor_createInstance | /**
* Create an instance of the given class
* @param className the name of the class
* @return a dynamically created instance of the given class
*/
public static Object createInstance(String className) {
Object retv = null;
try {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
Class<?> theFilterClass = Class.forName(className, true, classLoader);
Constructor<?> meth = theFilterClass.getDeclaredConstructor(argArray);
meth.setAccessible(true);
retv = meth.newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
return retv;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.