name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_StreamExecutionEnvironment_getStreamGraph | /**
* Getter of the {@link StreamGraph} of the streaming job with the option to clear previously
* registered {@link Transformation transformations}. Clearing the transformations allows, for
* example, to not re-execute the same operations when calling {@link #execute()} multiple
* times.
*
* @param clearTransformations Whether or not to clear previously registered transformations
* @return The stream graph representing the transformations
*/
@Internal
public StreamGraph getStreamGraph(boolean clearTransformations) {
final StreamGraph streamGraph = getStreamGraph(transformations);
if (clearTransformations) {
transformations.clear();
}
return streamGraph;
} | 3.68 |
MagicPlugin_BaseMagicProperties_clearProperty | /**
* This is used in some very specific cases where properties coming from a config file should not
* really be part of the config, and are more meta config.
*/
protected void clearProperty(String key) {
configuration.set(key, null);
} | 3.68 |
hadoop_ResourceVector_of | /**
* Creates a new {@code ResourceVector} with the values set in a
* {@code Resource} object.
* @param resource resource object the resource vector will be based on
* @return uniform resource vector
*/
public static ResourceVector of(Resource resource) {
ResourceVector resourceVector = new ResourceVector();
for (ResourceInformation resourceInformation : resource.getResources()) {
resourceVector.setValue(resourceInformation.getName(),
resourceInformation.getValue());
}
return resourceVector;
} | 3.68 |
morf_AllowParallelDmlHint_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return getClass().getSimpleName();
} | 3.68 |
framework_AutomaticImmediate_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 8029;
} | 3.68 |
querydsl_MetaDataExporter_setCreateScalaSources | /**
* Set true to create Scala sources instead of Java sources
*
* @param createScalaSources whether to create Scala sources (default: false)
*/
public void setCreateScalaSources(boolean createScalaSources) {
this.createScalaSources = createScalaSources;
} | 3.68 |
hbase_RestoreSnapshotHelper_getRegionsToRemove | /**
* Returns the list of regions removed during the on-disk restore. The caller is responsible to
* remove the regions from META. e.g. MetaTableAccessor.deleteRegions(...)
* @return the list of regions to remove from META
*/
public List<RegionInfo> getRegionsToRemove() {
return this.regionsToRemove;
} | 3.68 |
flink_HiveParserTypeCheckCtx_setSubqueryToRelNode | /** @param subqueryToRelNode the subqueryToRelNode to set */
public void setSubqueryToRelNode(Map<HiveParserASTNode, RelNode> subqueryToRelNode) {
this.subqueryToRelNode = subqueryToRelNode;
} | 3.68 |
flink_AbstractAutoCloseableRegistry_unregisterCloseable | /**
* Removes a {@link Closeable} from the registry.
*
* @param closeable instance to remove from the registry.
* @return true if the closeable was previously registered and became unregistered through this
* call.
*/
public final boolean unregisterCloseable(C closeable) {
if (null == closeable) {
return false;
}
synchronized (getSynchronizationLock()) {
return doUnRegister(closeable, closeableToRef);
}
} | 3.68 |
hadoop_SchedulerHealth_getLastReservationDetails | /**
* Get the details of last reservation.
*
* @return last reservation details
*/
public DetailedInformation getLastReservationDetails() {
return getDetailedInformation(Operation.RESERVATION);
} | 3.68 |
hadoop_FileIoProvider_list | /**
* Get a listing of the given directory using
* {@link FileUtil#listFiles(File)}.
*
* @param volume target volume. null if unavailable.
* @param dir directory to be listed.
* @return array of strings representing the directory entries.
* @throws IOException
*/
public String[] list(
@Nullable FsVolumeSpi volume, File dir) throws IOException {
final long begin = profilingEventHook.beforeMetadataOp(volume, LIST);
try {
faultInjectorEventHook.beforeMetadataOp(volume, LIST);
String[] children = FileUtil.list(dir);
profilingEventHook.afterMetadataOp(volume, LIST, begin);
return children;
} catch(Exception e) {
onFailure(volume, begin);
throw e;
}
} | 3.68 |
flink_SerializedValue_fromBytes | /**
* Constructs serialized value from serialized data.
*
* @param serializedData serialized data
* @param <T> type
* @return serialized value
* @throws NullPointerException if serialized data is null
* @throws IllegalArgumentException if serialized data is empty
*/
public static <T> SerializedValue<T> fromBytes(byte[] serializedData) {
return new SerializedValue<>(serializedData);
} | 3.68 |
flink_HiveParserUtils_genValuesRelNode | // creates LogicalValues node
public static RelNode genValuesRelNode(
RelOptCluster cluster, RelDataType rowType, List<List<RexLiteral>> rows) {
List<Object> immutableRows =
rows.stream().map(HiveParserUtils::toImmutableList).collect(Collectors.toList());
Class[] argTypes = new Class[] {RelOptCluster.class, RelDataType.class, null};
if (useShadedImmutableList) {
argTypes[2] = HiveParserUtils.shadedImmutableListClz;
} else {
argTypes[2] = HiveParserUtils.immutableListClz;
}
Method method = HiveReflectionUtils.tryGetMethod(LogicalValues.class, "create", argTypes);
Preconditions.checkState(method != null, "Cannot get the method to create LogicalValues");
try {
return (RelNode)
method.invoke(
null, cluster, rowType, HiveParserUtils.toImmutableList(immutableRows));
} catch (IllegalAccessException | InvocationTargetException e) {
throw new FlinkHiveException("Failed to create LogicalValues", e);
}
} | 3.68 |
hadoop_ConverterUtils_convertFromYarn | /**
* Convert a protobuf token into a rpc token and set its service.
*
* @param protoToken the yarn token
* @param service the service for the token
* @param <T> Generic Type T.
* @return rpc token
*/
public static <T extends TokenIdentifier> Token<T> convertFromYarn(
org.apache.hadoop.yarn.api.records.Token protoToken,
Text service) {
Token<T> token = new Token<T>(protoToken.getIdentifier().array(),
protoToken.getPassword().array(),
new Text(protoToken.getKind()),
new Text(protoToken.getService()));
if (service != null) {
token.setService(service);
}
return token;
} | 3.68 |
hudi_HoodieRealtimeInputFormatUtils_addProjectionField | /**
* Add a field to the existing fields projected.
*/
private static Configuration addProjectionField(Configuration conf, String fieldName, int fieldIndex) {
String readColNames = conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, "");
String readColIds = conf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "");
String readColNamesPrefix = readColNames + ",";
if (readColNames == null || readColNames.isEmpty()) {
readColNamesPrefix = "";
}
String readColIdsPrefix = readColIds + ",";
if (readColIds == null || readColIds.isEmpty()) {
readColIdsPrefix = "";
}
if (!Arrays.asList(readColNames.split(",")).contains(fieldName)) {
// If not already in the list - then add it
conf.set(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, readColNamesPrefix + fieldName);
conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, readColIdsPrefix + fieldIndex);
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Adding extra column " + fieldName + ", to enable log merging cols (%s) ids (%s) ",
conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR),
conf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR)));
}
}
return conf;
} | 3.68 |
querydsl_AbstractSQLUpdateClause_addBatch | /**
* Add the current state of bindings as a batch item
*
* @return the current object
*/
public C addBatch() {
batches.add(new SQLUpdateBatch(metadata, updates));
updates = new LinkedHashMap<>();
metadata = new DefaultQueryMetadata();
metadata.addJoin(JoinType.DEFAULT, entity);
return (C) this;
} | 3.68 |
hbase_HBaseSaslRpcServer_dispose | /** Release resources used by wrapped saslServer */
public void dispose() {
SaslUtil.safeDispose(saslServer);
} | 3.68 |
flink_ExecutionEnvironment_generateSequence | /**
* Creates a new data set that contains a sequence of numbers. The data set will be created in
* parallel, so there is no guarantee about the order of the elements.
*
* @param from The number to start at (inclusive).
* @param to The number to stop at (inclusive).
* @return A DataSet, containing all number in the {@code [from, to]} interval.
*/
public DataSource<Long> generateSequence(long from, long to) {
return fromParallelCollection(
new NumberSequenceIterator(from, to),
BasicTypeInfo.LONG_TYPE_INFO,
Utils.getCallLocationName());
} | 3.68 |
framework_AbstractJunctionFilter_appliesToProperty | /**
* Returns true if a change in the named property may affect the filtering
* result. If some of the sub-filters are not in-memory filters, true is
* returned.
*
* By default, all sub-filters are iterated to check if any of them applies.
* If there are no sub-filters, false is returned - override in subclasses
* to change this behavior.
*/
@Override
public boolean appliesToProperty(Object propertyId) {
for (Filter filter : getFilters()) {
if (filter.appliesToProperty(propertyId)) {
return true;
}
}
return false;
} | 3.68 |
flink_SqlWindowTableFunction_inferRowType | /** Helper for {@link #ARG0_TABLE_FUNCTION_WINDOWING}. */
private static RelDataType inferRowType(SqlOperatorBinding opBinding) {
final RelDataTypeFactory typeFactory = opBinding.getTypeFactory();
final RelDataType inputRowType = opBinding.getOperandType(0);
final RelDataType descriptorType = opBinding.getOperandType(1);
final RelDataTypeField timeField = descriptorType.getFieldList().get(0);
final RelDataType timeAttributeType;
if (timeField.getType().getSqlTypeName() == SqlTypeName.NULL) {
// the type is not inferred yet, we should infer the type here,
// see org.apache.flink.table.planner.functions.sql.SqlDescriptorOperator.deriveType
RelDataTypeField field = inputRowType.getField(timeField.getName(), false, false);
if (field == null) {
throw new IllegalArgumentException(
String.format(
"Can't find the time attribute field '%s' in the input schema %s.",
timeField.getName(), inputRowType.getFullTypeString()));
}
timeAttributeType = field.getType();
} else {
// the type has been inferred, use it directly
timeAttributeType = timeField.getType();
}
return inferRowType(typeFactory, inputRowType, timeAttributeType);
} | 3.68 |
hbase_FilterList_parseFrom | /**
* Parse a seralized representation of {@link FilterList}
* @param pbBytes A pb serialized {@link FilterList} instance
* @return An instance of {@link FilterList} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static FilterList parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.FilterList proto;
try {
proto = FilterProtos.FilterList.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
List<Filter> rowFilters = new ArrayList<>(proto.getFiltersCount());
try {
List<FilterProtos.Filter> filtersList = proto.getFiltersList();
for (int i = 0, n = filtersList.size(); i < n; i++) {
rowFilters.add(ProtobufUtil.toFilter(filtersList.get(i)));
}
} catch (IOException ioe) {
throw new DeserializationException(ioe);
}
return new FilterList(Operator.valueOf(proto.getOperator().name()), rowFilters);
} | 3.68 |
hbase_ZKUtil_getParent | //
// Helper methods
//
/**
* Returns the full path of the immediate parent of the specified node.
* @param node path to get parent of
* @return parent of path, null if passed the root node or an invalid node
*/
public static String getParent(String node) {
int idx = node.lastIndexOf(ZNodePaths.ZNODE_PATH_SEPARATOR);
return idx <= 0 ? null : node.substring(0, idx);
} | 3.68 |
flink_CrossOperator_projectTuple16 | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>
ProjectCross<
I1,
I2,
Tuple16<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15>>
projectTuple16() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<
Tuple16<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15>>
tType =
new TupleTypeInfo<
Tuple16<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15>>(fTypes);
return new ProjectCross<
I1,
I2,
Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>>(
this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.68 |
hadoop_RMAppKillByClientEvent_getIp | /**
* returns the ip address stored in this event.
* @return remoteIP
*/
public final InetAddress getIp() {
return ip;
} | 3.68 |
flink_RunLengthDecoder_initFromStream | /** Init from input stream. */
void initFromStream(int valueCount, ByteBufferInputStream in) throws IOException {
this.in = in;
if (fixedWidth) {
// initialize for repetition and definition levels
if (readLength) {
int length = readIntLittleEndian();
this.in = in.sliceStream(length);
}
} else {
// initialize for values
if (in.available() > 0) {
initWidthAndPacker(in.read());
}
}
if (bitWidth == 0) {
// 0 bit width, treat this as an RLE run of valueCount number of 0's.
this.mode = MODE.RLE;
this.currentCount = valueCount;
this.currentValue = 0;
} else {
this.currentCount = 0;
}
} | 3.68 |
dubbo_AbstractServiceRestMetadataResolver_postProcessRestMethodMetadata | /**
* Post-Process for {@link RestMethodMetadata}, sub-type could override this method for further works
*
* @param processingEnv {@link ProcessingEnvironment}
* @param serviceType The type that @Service annotated
* @param method The public method of <code>serviceType</code>
* @param metadata {@link RestMethodMetadata} maybe updated
*/
protected void postProcessRestMethodMetadata(
ProcessingEnvironment processingEnv,
TypeElement serviceType,
ExecutableElement method,
RestMethodMetadata metadata) {} | 3.68 |
pulsar_BrokerInterceptor_messageAcked | /**
* Intercept after a message ack is processed.
*
* @param cnx client Connection
* @param ackCmd Command object
*/
default void messageAcked(ServerCnx cnx, Consumer consumer,
CommandAck ackCmd) {
} | 3.68 |
hibernate-validator_NodeImpl_createPropertyNode | //TODO It would be nicer if we could return PropertyNode
public static NodeImpl createPropertyNode(String name, NodeImpl parent) {
return new NodeImpl(
name,
parent,
false,
null,
null,
ElementKind.PROPERTY,
EMPTY_CLASS_ARRAY,
null,
null,
null,
null
);
} | 3.68 |
pulsar_LocalBrokerData_updateSystemResourceUsage | // Update resource usage given each individual usage.
private void updateSystemResourceUsage(final ResourceUsage cpu, final ResourceUsage memory,
final ResourceUsage directMemory, final ResourceUsage bandwidthIn, final ResourceUsage bandwidthOut) {
this.cpu = cpu;
this.memory = memory;
this.directMemory = directMemory;
this.bandwidthIn = bandwidthIn;
this.bandwidthOut = bandwidthOut;
} | 3.68 |
hbase_KeyOnlyFilter_parseFrom | /**
* Parse a serialized representation of {@link KeyOnlyFilter}
* @param pbBytes A pb serialized {@link KeyOnlyFilter} instance
* @return An instance of {@link KeyOnlyFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static KeyOnlyFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.KeyOnlyFilter proto;
try {
proto = FilterProtos.KeyOnlyFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new KeyOnlyFilter(proto.getLenAsVal());
} | 3.68 |
hudi_HoodieMergedLogRecordScanner_scanByKeyPrefixes | /**
* Provides incremental scanning capability where only keys matching provided key-prefixes
* will be looked up in the delta-log files, scanned and subsequently materialized into
* the internal cache
*
* @param keyPrefixes to be looked up
*/
public void scanByKeyPrefixes(List<String> keyPrefixes) {
// We can skip scanning in case reader is in full-scan mode, in which case all blocks
// are processed upfront (no additional scanning is necessary)
if (forceFullScan) {
return;
}
List<String> missingKeyPrefixes = keyPrefixes.stream()
.filter(keyPrefix ->
// NOTE: We can skip scanning the prefixes that have already
// been covered by the previous scans
scannedPrefixes.stream().noneMatch(keyPrefix::startsWith))
.collect(Collectors.toList());
if (missingKeyPrefixes.isEmpty()) {
// All the required records are already fetched, no-op
return;
}
// NOTE: When looking up by key-prefixes unfortunately we can't short-circuit
// and will have to scan every time as we can't know (based on just
// the records cached) whether particular prefix was scanned or just records
// matching the prefix looked up (by [[scanByFullKeys]] API)
scanInternal(Option.of(KeySpec.prefixKeySpec(missingKeyPrefixes)), false);
scannedPrefixes.addAll(missingKeyPrefixes);
} | 3.68 |
hadoop_DataNodeFaultInjector_delayWriteToOsCache | /**
* Used as a hook to delay writing a packet to os cache.
*/
public void delayWriteToOsCache() {} | 3.68 |
zxing_GlobalHistogramBinarizer_getBlackMatrix | // Does not sharpen the data, as this call is intended to only be used by 2D Readers.
@Override
public BitMatrix getBlackMatrix() throws NotFoundException {
LuminanceSource source = getLuminanceSource();
int width = source.getWidth();
int height = source.getHeight();
BitMatrix matrix = new BitMatrix(width, height);
// Quickly calculates the histogram by sampling four rows from the image. This proved to be
// more robust on the blackbox tests than sampling a diagonal as we used to do.
initArrays(width);
int[] localBuckets = buckets;
for (int y = 1; y < 5; y++) {
int row = height * y / 5;
byte[] localLuminances = source.getRow(row, luminances);
int right = (width * 4) / 5;
for (int x = width / 5; x < right; x++) {
int pixel = localLuminances[x] & 0xff;
localBuckets[pixel >> LUMINANCE_SHIFT]++;
}
}
int blackPoint = estimateBlackPoint(localBuckets);
// We delay reading the entire image luminance until the black point estimation succeeds.
// Although we end up reading four rows twice, it is consistent with our motto of
// "fail quickly" which is necessary for continuous scanning.
byte[] localLuminances = source.getMatrix();
for (int y = 0; y < height; y++) {
int offset = y * width;
for (int x = 0; x < width; x++) {
int pixel = localLuminances[offset + x] & 0xff;
if (pixel < blackPoint) {
matrix.set(x, y);
}
}
}
return matrix;
} | 3.68 |
hbase_HBaseZKTestingUtility_getClusterTestDir | /**
* @return Where the cluster will write data on the local subsystem. Creates it if it does not
* exist already. A subdir of {@code HBaseCommonTestingUtility#getBaseTestDir()}
*/
Path getClusterTestDir() {
if (clusterTestDir == null) {
setupClusterTestDir();
}
return new Path(clusterTestDir.getAbsolutePath());
} | 3.68 |
pulsar_LinuxInfoUtils_getCpuUsageForCGroup | /**
* Get CGroup cpu usage.
* @return Cpu usage
*/
public static long getCpuUsageForCGroup() {
try {
if (metrics != null && getCpuUsageMethod != null) {
return (long) getCpuUsageMethod.invoke(metrics);
}
return readLongFromFile(Paths.get(CGROUPS_CPU_USAGE_PATH));
} catch (Exception e) {
log.error("[LinuxInfo] Failed to read CPU usage from cgroup", e);
return -1;
}
} | 3.68 |
hadoop_SchedulerHealth_getLastPreemptionDetails | /**
* Get the details of last preemption.
*
* @return last preemption details
*/
public DetailedInformation getLastPreemptionDetails() {
return getDetailedInformation(Operation.PREEMPTION);
} | 3.68 |
morf_RenameIndex_getFromIndexName | /**
* Gets the name of the index prior to the change
*
* @return the name of the index prior to the change
*/
public String getFromIndexName() {
return fromIndexName;
} | 3.68 |
hadoop_CommitUtilsWithMR_jobIdString | /**
* Get a string value of a job ID; returns meaningful text if there is no ID.
* @param context job context
* @return a string for logs
*/
public static String jobIdString(JobContext context) {
JobID jobID = context.getJobID();
return jobID != null ? jobID.toString() : "(no job ID)";
} | 3.68 |
streampipes_AbstractMigrationManager_updateDescriptions | /**
* Update all descriptions of entities in the Core that are affected by migrations.
*
* @param migrationConfigs List of migrations to take in account
* @param serviceUrl Url of the extension service that provides the migrations.
*/
protected void updateDescriptions(List<ModelMigratorConfig> migrationConfigs, String serviceUrl) {
migrationConfigs
.stream()
.collect(
// We only need to update the description once per appId,
// because this is directly done with the newest version of the description and
// there is iterative migration required.
// To avoid unnecessary, multiple updates,
// we filter the migration configs such that every appId is unique.
// This ensures that every description is only updated once.
Collectors.toMap(
ModelMigratorConfig::targetAppId,
Function.identity(),
(existing, replacement) -> existing
)
)
.values()
.stream()
.peek(config -> {
var requestUrl = getRequestUrl(config.modelType(), config.targetAppId(), serviceUrl);
performUpdate(requestUrl);
})
.toList();
} | 3.68 |
hbase_MultiByteBuff_getInt | /**
* Returns the int value at the current position. Also advances the position by the size of int
* @return the int value at the current position
*/
@Override
public int getInt() {
checkRefCount();
int remaining = this.curItem.remaining();
if (remaining >= Bytes.SIZEOF_INT) {
return this.curItem.getInt();
}
int n = 0;
for (int i = 0; i < Bytes.SIZEOF_INT; i++) {
n <<= 8;
n ^= get() & 0xFF;
}
return n;
} | 3.68 |
framework_EventRouter_hasListeners | /**
* Checks if the given Event type is listened by a listener registered to
* this router.
*
* @param eventType
* the event type to be checked
* @return true if a listener is registered for the given event type
*/
public boolean hasListeners(Class<?> eventType) {
if (listenerList != null) {
for (ListenerMethod lm : listenerList) {
if (lm.isType(eventType)) {
return true;
}
}
}
return false;
} | 3.68 |
hudi_BaseHoodieWriteClient_restoreToSavepoint | /**
* Restore the data to the savepoint.
*
* WARNING: This rolls back recent commits and deleted data files and also pending compactions after savepoint time.
* Queries accessing the files will mostly fail. This is expected to be a manual operation and no concurrent write or
* compaction is expected to be running
*
* @param savepointTime Savepoint time to rollback to
*/
public void restoreToSavepoint(String savepointTime) {
boolean initializeMetadataTableIfNecessary = config.isMetadataTableEnabled();
if (initializeMetadataTableIfNecessary) {
try {
// Delete metadata table directly when users trigger savepoint rollback if mdt existed and if the savePointTime is beforeTimelineStarts
// or before the oldest compaction on MDT.
// We cannot restore to before the oldest compaction on MDT as we don't have the basefiles before that time.
HoodieTableMetaClient mdtMetaClient = HoodieTableMetaClient.builder()
.setConf(hadoopConf)
.setBasePath(getMetadataTableBasePath(config.getBasePath())).build();
Option<HoodieInstant> oldestMdtCompaction = mdtMetaClient.getCommitTimeline().filterCompletedInstants().firstInstant();
boolean deleteMDT = false;
if (oldestMdtCompaction.isPresent()) {
if (HoodieTimeline.LESSER_THAN_OR_EQUALS.test(savepointTime, oldestMdtCompaction.get().getTimestamp())) {
LOG.warn(String.format("Deleting MDT during restore to %s as the savepoint is older than oldest compaction %s on MDT",
savepointTime, oldestMdtCompaction.get().getTimestamp()));
deleteMDT = true;
}
}
// The instant required to sync rollback to MDT has been archived and the mdt syncing will be failed
// So that we need to delete the whole MDT here.
if (!deleteMDT) {
HoodieInstant syncedInstant = new HoodieInstant(false, HoodieTimeline.DELTA_COMMIT_ACTION, savepointTime);
if (mdtMetaClient.getCommitsTimeline().isBeforeTimelineStarts(syncedInstant.getTimestamp())) {
LOG.warn(String.format("Deleting MDT during restore to %s as the savepoint is older than the MDT timeline %s",
savepointTime, mdtMetaClient.getCommitsTimeline().firstInstant().get().getTimestamp()));
deleteMDT = true;
}
}
if (deleteMDT) {
HoodieTableMetadataUtil.deleteMetadataTable(config.getBasePath(), context);
// rollbackToSavepoint action will try to bootstrap MDT at first but sync to MDT will fail at the current scenario.
// so that we need to disable metadata initialized here.
initializeMetadataTableIfNecessary = false;
}
} catch (Exception e) {
// Metadata directory does not exist
}
}
HoodieTable<T, I, K, O> table = initTable(WriteOperationType.UNKNOWN, Option.empty(), initializeMetadataTableIfNecessary);
SavepointHelpers.validateSavepointPresence(table, savepointTime);
ValidationUtils.checkArgument(!config.shouldArchiveBeyondSavepoint(), "Restore is not supported when " + HoodieArchivalConfig.ARCHIVE_BEYOND_SAVEPOINT.key()
+ " is enabled");
restoreToInstant(savepointTime, initializeMetadataTableIfNecessary);
SavepointHelpers.validateSavepointRestore(table, savepointTime);
} | 3.68 |
flink_TableColumn_physical | /** Creates a regular table column that represents physical data. */
public static PhysicalColumn physical(String name, DataType type) {
Preconditions.checkNotNull(name, "Column name can not be null.");
Preconditions.checkNotNull(type, "Column type can not be null.");
return new PhysicalColumn(name, type);
} | 3.68 |
framework_TextArea_writeDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractTextField#writeDesign(org.jsoup.nodes.Element
* , com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void writeDesign(Element design, DesignContext designContext) {
super.writeDesign(design, designContext);
design.html(DesignFormatter.encodeForTextNode(getValue()));
} | 3.68 |
querydsl_Expressions_stringPath | /**
* Create a new Path expression
*
* @param metadata path metadata
* @return path expression
*/
public static StringPath stringPath(PathMetadata metadata) {
return new StringPath(metadata);
} | 3.68 |
hudi_HoodieAvroReadSupport_checkLegacyMode | /**
* Check whether write map/list with legacy mode.
* legacy:
* list:
* optional group obj_ids (LIST) {
* repeated binary array (UTF8);
* }
* map:
* optional group obj_ids (MAP) {
* repeated group map (MAP_KEY_VALUE) {
* required binary key (UTF8);
* required binary value (UTF8);
* }
* }
* non-legacy:
* optional group obj_ids (LIST) {
* repeated group list {
* optional binary element (UTF8);
* }
* }
* optional group obj_maps (MAP) {
* repeated group key_value {
* required binary key (UTF8);
* optional binary value (UTF8);
* }
* }
*/
private boolean checkLegacyMode(List<Type> parquetFields) {
for (Type type : parquetFields) {
if (!type.isPrimitive()) {
GroupType groupType = type.asGroupType();
OriginalType originalType = groupType.getOriginalType();
if (originalType == OriginalType.MAP
&& groupType.getFields().get(0).getOriginalType() != OriginalType.MAP_KEY_VALUE) {
return false;
}
if (originalType == OriginalType.LIST
&& !groupType.getType(0).getName().equals("array")) {
return false;
}
if (!checkLegacyMode(groupType.getFields())) {
return false;
}
}
}
return true;
} | 3.68 |
flink_ChannelWriterOutputView_getBlockCount | /**
* Gets the number of blocks used by this view.
*
* @return The number of blocks used.
*/
public int getBlockCount() {
return this.blockCount;
} | 3.68 |
flink_StreamExecutionEnvironment_readFileStream | /**
* Creates a data stream that contains the contents of file created while system watches the
* given path. The file will be read with the system's default character set.
*
* @param filePath The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path/")
* @param intervalMillis The interval of file watching in milliseconds
* @param watchType The watch type of file stream. When watchType is {@link
* org.apache.flink.streaming.api.functions.source.FileMonitoringFunction.WatchType#ONLY_NEW_FILES},
* the system processes only new files. {@link
* org.apache.flink.streaming.api.functions.source.FileMonitoringFunction.WatchType#REPROCESS_WITH_APPENDED}
* means that the system re-processes all contents of appended file. {@link
* org.apache.flink.streaming.api.functions.source.FileMonitoringFunction.WatchType#PROCESS_ONLY_APPENDED}
* means that the system processes only appended contents of files.
* @return The DataStream containing the given directory.
* @deprecated Use {@link #readFile(FileInputFormat, String, FileProcessingMode, long)} instead.
*/
@Deprecated
@SuppressWarnings("deprecation")
public DataStream<String> readFileStream(
String filePath, long intervalMillis, FileMonitoringFunction.WatchType watchType) {
DataStream<Tuple3<String, Long, Long>> source =
addSource(
new FileMonitoringFunction(filePath, intervalMillis, watchType),
"Read File Stream source");
return source.flatMap(new FileReadFunction());
} | 3.68 |
framework_AbsoluteLayout_setRightUnits | /**
* Sets the unit for the 'right' attribute.
*
* @param rightUnits
* See {@link Sizeable} UNIT_SYMBOLS for a description of the
* available units.
*/
public void setRightUnits(Unit rightUnits) {
this.rightUnits = rightUnits;
markAsDirty();
} | 3.68 |
hudi_RequestHandler_registerFileSlicesAPI | /**
* Register File Slices API calls.
*/
private void registerFileSlicesAPI() {
app.get(RemoteHoodieTableFileSystemView.LATEST_PARTITION_SLICES_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_PARTITION_SLICES", 1);
List<FileSliceDTO> dtos = sliceHandler.getLatestFileSlices(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_PARTITION_SLICES_STATELESS_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_PARTITION_SLICES_STATELESS", 1);
List<FileSliceDTO> dtos = sliceHandler.getLatestFileSlicesStateless(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_PARTITION_SLICE_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_PARTITION_SLICE", 1);
List<FileSliceDTO> dtos = sliceHandler.getLatestFileSlice(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.FILEID_PARAM, String.class).getOrThrow(e -> new HoodieException("FILEID is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_PARTITION_UNCOMPACTED_SLICES_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_PARTITION_UNCOMPACTED_SLICES", 1);
List<FileSliceDTO> dtos = sliceHandler.getLatestUnCompactedFileSlices(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_SLICES_URL, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_SLICES", 1);
List<FileSliceDTO> dtos = sliceHandler.getAllFileSlices(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_SLICES_RANGE_INSTANT_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_SLICE_RANGE_INSTANT", 1);
List<FileSliceDTO> dtos = sliceHandler.getLatestFileSliceInRange(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
Arrays.asList(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.INSTANTS_PARAM, String.class).getOrThrow(e -> new HoodieException("INSTANTS_PARAM is invalid")).split(",")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_SLICES_MERGED_BEFORE_ON_INSTANT_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_SLICES_MERGED_BEFORE_ON_INSTANT", 1);
List<FileSliceDTO> dtos = sliceHandler.getLatestMergedFileSlicesBeforeOrOn(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM, String.class).getOrThrow(e -> new HoodieException("MAX_INSTANT_PARAM is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.LATEST_SLICES_BEFORE_ON_INSTANT_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LATEST_SLICES_BEFORE_ON_INSTANT", 1);
List<FileSliceDTO> dtos = sliceHandler.getLatestFileSlicesBeforeOrOn(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM, String.class).getOrThrow(e -> new HoodieException("MAX_INSTANT_PARAM is invalid")),
Boolean.parseBoolean(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.INCLUDE_FILES_IN_PENDING_COMPACTION_PARAM, String.class)
.getOrThrow(e -> new HoodieException("INCLUDE_FILES_IN_PENDING_COMPACTION_PARAM is invalid"))));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_LATEST_SLICES_BEFORE_ON_INSTANT_URL, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_LATEST_SLICES_BEFORE_ON_INSTANT", 1);
Map<String, List<FileSliceDTO>> dtos = sliceHandler.getAllLatestFileSlicesBeforeOrOn(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM, String.class).getOrThrow(e -> new HoodieException("MAX_INSTANT_PARAM is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.PENDING_COMPACTION_OPS, new ViewHandler(ctx -> {
metricsRegistry.add("PEDING_COMPACTION_OPS", 1);
List<CompactionOpDTO> dtos = sliceHandler.getPendingCompactionOperations(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.PENDING_LOG_COMPACTION_OPS, new ViewHandler(ctx -> {
metricsRegistry.add("PEDING_LOG_COMPACTION_OPS", 1);
List<CompactionOpDTO> dtos = sliceHandler.getPendingLogCompactionOperations(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_FILEGROUPS_FOR_PARTITION_URL, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_FILEGROUPS_FOR_PARTITION", 1);
List<FileGroupDTO> dtos = sliceHandler.getAllFileGroups(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_FILEGROUPS_FOR_PARTITION_STATELESS_URL, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_FILEGROUPS_FOR_PARTITION_STATELESS", 1);
List<FileGroupDTO> dtos = sliceHandler.getAllFileGroupsStateless(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.post(RemoteHoodieTableFileSystemView.REFRESH_TABLE, new ViewHandler(ctx -> {
metricsRegistry.add("REFRESH_TABLE", 1);
boolean success = sliceHandler
.refreshTable(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")));
writeValueAsString(ctx, success);
}, false));
app.post(RemoteHoodieTableFileSystemView.LOAD_ALL_PARTITIONS_URL, new ViewHandler(ctx -> {
metricsRegistry.add("LOAD_ALL_PARTITIONS", 1);
boolean success = sliceHandler
.loadAllPartitions(ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")));
writeValueAsString(ctx, success);
}, false));
app.get(RemoteHoodieTableFileSystemView.ALL_REPLACED_FILEGROUPS_BEFORE_OR_ON, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_REPLACED_FILEGROUPS_BEFORE_OR_ON", 1);
List<FileGroupDTO> dtos = sliceHandler.getReplacedFileGroupsBeforeOrOn(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM, String.class).getOrDefault(""),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_REPLACED_FILEGROUPS_BEFORE, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_REPLACED_FILEGROUPS_BEFORE", 1);
List<FileGroupDTO> dtos = sliceHandler.getReplacedFileGroupsBefore(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MAX_INSTANT_PARAM, String.class).getOrDefault(""),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_REPLACED_FILEGROUPS_AFTER_OR_ON, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_REPLACED_FILEGROUPS_AFTER_OR_ON", 1);
List<FileGroupDTO> dtos = sliceHandler.getReplacedFileGroupsAfterOrOn(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.MIN_INSTANT_PARAM, String.class).getOrDefault(""),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.ALL_REPLACED_FILEGROUPS_PARTITION, new ViewHandler(ctx -> {
metricsRegistry.add("ALL_REPLACED_FILEGROUPS_PARTITION", 1);
List<FileGroupDTO> dtos = sliceHandler.getAllReplacedFileGroups(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")),
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.PARTITION_PARAM, String.class).getOrDefault(""));
writeValueAsString(ctx, dtos);
}, true));
app.get(RemoteHoodieTableFileSystemView.PENDING_CLUSTERING_FILEGROUPS, new ViewHandler(ctx -> {
metricsRegistry.add("PENDING_CLUSTERING_FILEGROUPS", 1);
List<ClusteringOpDTO> dtos = sliceHandler.getFileGroupsInPendingClustering(
ctx.queryParamAsClass(RemoteHoodieTableFileSystemView.BASEPATH_PARAM, String.class).getOrThrow(e -> new HoodieException("Basepath is invalid")));
writeValueAsString(ctx, dtos);
}, true));
} | 3.68 |
framework_ServerRpcHandler_handleInvocation | /**
* Handles the given Legacy variable change RPC method invocation for the
* given connector.
*
* @since 7.7
* @param ui
* the UI containing the connector
* @param connector
* the connector the RPC is targeted to
* @param legacyInvocation
* information about the rpc to invoke
*/
protected void handleInvocation(UI ui, ClientConnector connector,
LegacyChangeVariablesInvocation legacyInvocation) {
Map<String, Object> changes = legacyInvocation.getVariableChanges();
try {
if (connector instanceof VariableOwner) {
// The source parameter is never used anywhere
changeVariables(null, (VariableOwner) connector, changes);
} else {
throw new IllegalStateException(
"Received a legacy variable change for "
+ connector.getClass().getName() + " ("
+ connector.getConnectorId()
+ ") which is not a VariableOwner. The client-side connector sent these legacy variables: "
+ changes.keySet());
}
} catch (Exception e) {
ui.getSession().getCommunicationManager()
.handleConnectorRelatedException(connector, e);
}
} | 3.68 |
morf_DatabaseMetaDataProvider_isEmptyDatabase | /**
* @see org.alfasoftware.morf.metadata.Schema#isEmptyDatabase()
*/
@Override
public boolean isEmptyDatabase() {
return tableNames.get().isEmpty();
} | 3.68 |
hudi_HoodieTableMetadata_isMetadataTable | /**
* Returns {@code True} if the given path contains a metadata table.
*
* @param basePath The base path to check
*/
static boolean isMetadataTable(String basePath) {
if (basePath == null || basePath.isEmpty()) {
return false;
}
if (basePath.endsWith(Path.SEPARATOR)) {
basePath = basePath.substring(0, basePath.length() - 1);
}
return basePath.endsWith(HoodieTableMetaClient.METADATA_TABLE_FOLDER_PATH);
} | 3.68 |
flink_DecimalData_toUnscaledLong | /**
* Returns a long describing the <i>unscaled value</i> of this {@link DecimalData}.
*
* @throws ArithmeticException if this {@link DecimalData} does not exactly fit in a long.
*/
public long toUnscaledLong() {
if (isCompact()) {
return longVal;
} else {
return toBigDecimal().unscaledValue().longValueExact();
}
} | 3.68 |
morf_ExistingViewStateLoader_viewChanges | /**
* Loads the set of view changes which need to be made to match the target schema.
*
* @param sourceSchema the existing schema.
* @param targetSchema the target schema.
* @return the views which should not be present, and the views which need deploying.
*/
public Result viewChanges(Schema sourceSchema, Schema targetSchema) {
// Default to dropping all the views in the source schema unless we decide otherwise
Map<String, View> viewsToDrop = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
for (View view : sourceSchema.views()) {
viewsToDrop.put(view.getName().toUpperCase(), view);
}
// And creating all the ones in the target schema.
Set<View> viewsToDeploy = new HashSet<>(targetSchema.views());
// Work out if existing views which are deployed are OK, because we really
// don't want to refresh the views on every startup.
Optional<Map<String, String>> deployedViews = existingViewHashLoader.loadViewHashes(sourceSchema);
if (deployedViews.isPresent()) {
for (View view : targetSchema.views()) {
String targetViewName = view.getName().toUpperCase();
if (viewsToDrop.containsKey(targetViewName)) {
String existingHash = deployedViews.get().get(targetViewName);
String newHash = dialect.convertStatementToHash(view.getSelectStatement());
if (existingHash == null) {
log.info(String.format("View [%s] exists but hash not present in %s", targetViewName, DatabaseUpgradeTableContribution.DEPLOYED_VIEWS_NAME));
} else if (!newHash.equals(existingHash)) {
log.info(String.format("View [%s] exists in %s, but hash [%s] does not match target schema [%s]", targetViewName, DatabaseUpgradeTableContribution.DEPLOYED_VIEWS_NAME, existingHash, newHash));
} else if (!viewDeploymentValidator.validateExistingView(view, new UpgradeSchemas(sourceSchema, targetSchema))) {
log.info(String.format("View [%s] exists in %s, but was rejected by %s", targetViewName, DatabaseUpgradeTableContribution.DEPLOYED_VIEWS_NAME, viewDeploymentValidator.getClass()));
} else {
log.debug(String.format("View [%s] exists in %s and was validated", targetViewName, DatabaseUpgradeTableContribution.DEPLOYED_VIEWS_NAME));
// All good - leave it in place.
viewsToDrop.remove(targetViewName);
viewsToDeploy.remove(view);
}
} else {
if (deployedViews.get().containsKey(targetViewName)) {
log.info(String.format("View [%s] is missing, but %s entry exists; the view may have been deleted", targetViewName, DatabaseUpgradeTableContribution.DEPLOYED_VIEWS_NAME));
viewsToDrop.put(targetViewName, view);
} else if (!viewDeploymentValidator.validateMissingView(view, new UpgradeSchemas(sourceSchema, targetSchema))) {
log.info(String.format("View [%s] is missing, but was recognized by %s; the view may have been deleted", targetViewName, viewDeploymentValidator.getClass()));
viewsToDrop.put(targetViewName, view);
} else {
log.info(String.format("View [%s] is missing", targetViewName));
}
}
}
}
return new Result(viewsToDrop.values(), viewsToDeploy);
} | 3.68 |
morf_ArchiveDataSetWriter_openOutputStreamForTable | /**
* @see org.alfasoftware.morf.xml.XmlStreamProvider.XmlOutputStreamProvider#openOutputStreamForTable(java.lang.String)
*/
@Override
public OutputStream openOutputStreamForTable(String tableName) {
if (zipOutput == null) {
throw new IllegalStateException("Archive data set has not been opened");
}
try {
ZipEntry entry = new ZipEntry(tableName + ".xml");
zipOutput.putNextEntry(entry);
// Make sure the caller can't actually close the underlying stream
return zipOutput;
} catch (IOException e) {
throw new RuntimeException("Error creating new zip entry in archive [" + file + "]", e);
}
} | 3.68 |
graphhopper_MiniPerfTest_getMax | /**
* @return maximum time of every calls, in ms
*/
public double getMax() {
return max / NS_PER_MS;
} | 3.68 |
flink_BinaryInMemorySortBuffer_reset | /**
* Resets the sort buffer back to the state where it is empty. All contained data is discarded.
*/
public void reset() {
// reset all offsets
this.numRecords = 0;
this.currentSortIndexOffset = 0;
this.currentDataBufferOffset = 0;
this.sortIndexBytes = 0;
// return all memory
returnToSegmentPool();
// grab first buffers
this.currentSortIndexSegment = nextMemorySegment();
this.sortIndex.add(this.currentSortIndexSegment);
this.recordCollector.reset();
} | 3.68 |
flink_LookupCacheManager_registerCacheIfAbsent | /**
* Register a cache instance with identifier to the manager.
*
* <p>If the cache with the given identifier is already registered in the manager, this method
* will return the registered one, otherwise this method will register the given cache into the
* manager then return.
*
* @param cacheIdentifier identifier of the cache
* @param cache instance of cache trying to register
* @return instance of the shared cache
*/
public synchronized LookupCache registerCacheIfAbsent(
String cacheIdentifier, LookupCache cache) {
checkNotNull(cache, "Could not register null cache in the manager");
RefCountedCache refCountedCache =
managedCaches.computeIfAbsent(
cacheIdentifier, identifier -> new RefCountedCache(cache));
refCountedCache.retain();
return refCountedCache.cache;
} | 3.68 |
hadoop_AuditContextUpdater_updateCurrentAuditContext | /**
* Add job/task info to current audit context.
*/
public void updateCurrentAuditContext() {
final CommonAuditContext auditCtx = currentAuditContext();
if (jobId != null) {
auditCtx.put(AuditConstants.PARAM_JOB_ID, jobId);
} else {
currentAuditContext().remove(AuditConstants.PARAM_JOB_ID);
}
if (taskAttemptId != null) {
auditCtx.put(AuditConstants.PARAM_TASK_ATTEMPT_ID, taskAttemptId);
} else {
currentAuditContext().remove(CommitConstants.PARAM_TASK_ATTEMPT_ID);
}
} | 3.68 |
morf_AbstractSqlDialectTest_testAlterBigIntegerColumn | /**
* Test altering a big integer column.
*/
@Test
public void testAlterBigIntegerColumn() {
testAlterTableColumn(TEST_TABLE, AlterationType.ALTER, getColumn(TEST_TABLE, BIG_INTEGER_FIELD), column(BIG_INTEGER_FIELD, DataType.BIG_INTEGER).nullable(), expectedAlterTableAlterBigIntegerColumnStatement());
} | 3.68 |
flink_AdaptiveScheduler_shouldRescale | /**
* In regular mode, rescale the job if added resource meets {@link
* JobManagerOptions#MIN_PARALLELISM_INCREASE}. In force mode rescale if the parallelism has
* changed.
*/
@Override
public boolean shouldRescale(ExecutionGraph executionGraph, boolean forceRescale) {
final Optional<VertexParallelism> maybeNewParallelism =
slotAllocator.determineParallelism(
jobInformation, declarativeSlotPool.getAllSlotsInformation());
return maybeNewParallelism
.filter(
vertexParallelism -> {
RescalingController rescalingControllerToUse =
forceRescale ? forceRescalingController : rescalingController;
return rescalingControllerToUse.shouldRescale(
getCurrentParallelism(executionGraph), vertexParallelism);
})
.isPresent();
} | 3.68 |
hbase_RegionScannerImpl_filterRow | /**
* This function is to maintain backward compatibility for 0.94 filters. HBASE-6429 combines both
* filterRow & filterRow({@code List<KeyValue> kvs}) functions. While 0.94 code or older, it may
* not implement hasFilterRow as HBase-6429 expects because 0.94 hasFilterRow() only returns true
* when filterRow({@code List<KeyValue> kvs}) is overridden not the filterRow(). Therefore, the
* filterRow() will be skipped.
*/
private boolean filterRow() throws IOException {
// when hasFilterRow returns true, filter.filterRow() will be called automatically inside
// filterRowCells(List<Cell> kvs) so we skip that scenario here.
return filter != null && (!filter.hasFilterRow()) && filter.filterRow();
} | 3.68 |
hbase_Import_flushRegionsIfNecessary | /**
* If the durability is set to {@link Durability#SKIP_WAL} and the data is imported to hbase, we
* need to flush all the regions of the table as the data is held in memory and is also not
* present in the Write Ahead Log to replay in scenarios of a crash. This method flushes all the
* regions of the table in the scenarios of import data to hbase with {@link Durability#SKIP_WAL}
*/
public static void flushRegionsIfNecessary(Configuration conf)
throws IOException, InterruptedException {
String tableName = conf.get(TABLE_NAME);
Admin hAdmin = null;
Connection connection = null;
String durability = conf.get(WAL_DURABILITY);
// Need to flush if the data is written to hbase and skip wal is enabled.
if (
conf.get(BULK_OUTPUT_CONF_KEY) == null && durability != null
&& Durability.SKIP_WAL.name().equalsIgnoreCase(durability)
) {
LOG.info("Flushing all data that skipped the WAL.");
try {
connection = ConnectionFactory.createConnection(conf);
hAdmin = connection.getAdmin();
hAdmin.flush(TableName.valueOf(tableName));
} finally {
if (hAdmin != null) {
hAdmin.close();
}
if (connection != null) {
connection.close();
}
}
}
} | 3.68 |
hbase_Table_increment | /**
* Increments one or more columns within a single row.
* <p>
* This operation ensures atomicity to readers. Increments are done under a single row lock, so
* write operations to a row are synchronized, and readers are guaranteed to see this operation
* fully completed.
* @param increment object that specifies the columns and amounts to be used for the increment
* operations
* @throws IOException e
* @return values of columns after the increment
*/
default Result increment(final Increment increment) throws IOException {
throw new NotImplementedException("Add an implementation!");
} | 3.68 |
graphhopper_VectorTile_hasSintValue | /**
* <code>optional sint64 sint_value = 6;</code>
*/
public boolean hasSintValue() {
return ((bitField0_ & 0x00000020) == 0x00000020);
} | 3.68 |
rocketmq-connect_MemoryStateManagementServiceImpl_get | /**
* Get the current state of the connector.
*
* @param connector the connector name
* @return the state or null if there is none
*/
@Override
public synchronized ConnectorStatus get(String connector) {
return connectors.get(connector);
} | 3.68 |
hbase_Coprocessor_getServices | /**
* Coprocessor endpoints providing protobuf services should override this method.
* @return Iterable of {@link Service}s or empty collection. Implementations should never return
* null.
*/
default Iterable<Service> getServices() {
return Collections.EMPTY_SET;
} | 3.68 |
hbase_RegionSplitter_convertToBytes | /**
* Returns an array of bytes corresponding to an array of BigIntegers
* @param bigIntegers numbers to convert
* @return bytes corresponding to the bigIntegers
*/
public byte[][] convertToBytes(BigInteger[] bigIntegers) {
byte[][] returnBytes = new byte[bigIntegers.length][];
for (int i = 0; i < bigIntegers.length; i++) {
returnBytes[i] = convertToByte(bigIntegers[i]);
}
return returnBytes;
} | 3.68 |
flink_ConfigOptions_longType | /** Defines that the value of the option should be of {@link Long} type. */
public TypedConfigOptionBuilder<Long> longType() {
return new TypedConfigOptionBuilder<>(key, Long.class);
} | 3.68 |
zxing_MaskUtil_applyMaskPenaltyRule1 | /**
* Apply mask penalty rule 1 and return the penalty. Find repetitive cells with the same color and
* give penalty to them. Example: 00000 or 11111.
*/
static int applyMaskPenaltyRule1(ByteMatrix matrix) {
return applyMaskPenaltyRule1Internal(matrix, true) + applyMaskPenaltyRule1Internal(matrix, false);
} | 3.68 |
hadoop_ContainerServiceRecordProcessor_createAInfo | /**
* Creates a container A (IPv4) record descriptor.
* @param record service record.
* @throws Exception if the descriptor creation yields an issue.
*/
protected void createAInfo(ServiceRecord record) throws Exception {
AContainerRecordDescriptor recordInfo = new AContainerRecordDescriptor(
getPath(), record);
registerRecordDescriptor(Type.A, recordInfo);
} | 3.68 |
morf_SqlScriptExecutor_processWith | /**
* @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.QueryBuilder#processWith(org.alfasoftware.morf.jdbc.SqlScriptExecutor.ResultSetProcessor)
*/
@Override
public <T> T processWith(final ResultSetProcessor<T> resultSetProcessor) {
try {
final Holder<T> holder = new Holder<>();
Work work = new Work() {
@Override
public void execute(Connection innerConnection) throws SQLException {
holder.set(executeQuery(query, parameterMetadata, parameterData, innerConnection, resultSetProcessor, maxRows, queryTimeout, standalone));
}
};
if (connection == null) {
// Get a new connection, and use that...
doWork(work);
} else {
// Get out own connection, and use that...
work.execute(connection);
}
return holder.get();
} catch (SQLException e) {
throw reclassifiedRuntimeException(e, "Error with statement");
}
} | 3.68 |
framework_VScrollTable_handleCaptionEvent | /**
* Handles a event on the captions.
*
* @param event
* The event to handle
*/
protected void handleCaptionEvent(Event event) {
if (event.getTypeInt() == Event.ONMOUSEUP
|| event.getTypeInt() == Event.ONDBLCLICK) {
fireFooterClickedEvent(event);
}
} | 3.68 |
hudi_AvroConvertor_withKafkaFieldsAppended | /**
* this.schema is required to have kafka offsets for this to work
*/
public GenericRecord withKafkaFieldsAppended(ConsumerRecord consumerRecord) {
initSchema();
GenericRecord recordValue = (GenericRecord) consumerRecord.value();
GenericRecordBuilder recordBuilder = new GenericRecordBuilder(this.schema);
for (Schema.Field field : recordValue.getSchema().getFields()) {
recordBuilder.set(field, recordValue.get(field.name()));
}
String recordKey = StringUtils.objToString(consumerRecord.key());
recordBuilder.set(KAFKA_SOURCE_OFFSET_COLUMN, consumerRecord.offset());
recordBuilder.set(KAFKA_SOURCE_PARTITION_COLUMN, consumerRecord.partition());
recordBuilder.set(KAFKA_SOURCE_TIMESTAMP_COLUMN, consumerRecord.timestamp());
recordBuilder.set(KAFKA_SOURCE_KEY_COLUMN, recordKey);
return recordBuilder.build();
} | 3.68 |
hbase_TimeRange_withinTimeRange | /**
* Check if the specified timestamp is within this TimeRange.
* <p/>
* Returns true if within interval [minStamp, maxStamp), false if not.
* @param timestamp timestamp to check
* @return true if within TimeRange, false if not
*/
public boolean withinTimeRange(long timestamp) {
assert timestamp >= 0;
if (this.allTime) {
return true;
}
// check if >= minStamp
return (minStamp <= timestamp && timestamp < maxStamp);
} | 3.68 |
hudi_FailSafeConsistencyGuard_waitForFileVisibility | /**
* Helper function to wait till file either appears/disappears.
*
* @param filePath File Path
*/
private void waitForFileVisibility(Path filePath, FileVisibility visibility) throws TimeoutException {
long waitMs = consistencyGuardConfig.getInitialConsistencyCheckIntervalMs();
int attempt = 0;
while (attempt < consistencyGuardConfig.getMaxConsistencyChecks()) {
try {
if (checkFileVisibility(filePath, visibility)) {
return;
}
} catch (IOException ioe) {
LOG.warn("Got IOException waiting for file visibility. Retrying", ioe);
}
sleepSafe(waitMs);
waitMs = waitMs * 2; // double check interval every attempt
waitMs = Math.min(waitMs, consistencyGuardConfig.getMaxConsistencyCheckIntervalMs());
attempt++;
}
throw new TimeoutException("Timed-out waiting for the file to " + visibility.name());
} | 3.68 |
hudi_FileIOUtils_copy | /**
* Copies the file content from source path to destination path.
*
* @param fileSystem {@link FileSystem} instance.
* @param sourceFilePath Source file path.
* @param destFilePath Destination file path.
*/
public static void copy(
FileSystem fileSystem, org.apache.hadoop.fs.Path sourceFilePath,
org.apache.hadoop.fs.Path destFilePath) {
FSDataInputStream fsDataInputStream = null;
FSDataOutputStream fsDataOutputStream = null;
try {
fsDataInputStream = fileSystem.open(sourceFilePath);
fsDataOutputStream = fileSystem.create(destFilePath, false);
copy(fsDataInputStream, fsDataOutputStream);
} catch (IOException e) {
throw new HoodieIOException(String.format("Cannot copy from %s to %s",
sourceFilePath.toString(), destFilePath.toString()), e);
} finally {
closeQuietly(fsDataInputStream);
closeQuietly(fsDataOutputStream);
}
} | 3.68 |
flink_OperationManager_close | /** Closes the {@link OperationManager} and all operations. */
public void close() {
stateLock.writeLock().lock();
Exception closeException = null;
try {
isRunning = false;
IOUtils.closeAll(submittedOperations.values(), Throwable.class);
} catch (Exception e) {
closeException = e;
} finally {
submittedOperations.clear();
stateLock.writeLock().unlock();
}
// wait all operations closed
try {
operationLock.acquire();
} catch (Exception e) {
LOG.error("Failed to wait all operation closed.", e);
} finally {
operationLock.release();
}
LOG.debug("Closes the Operation Manager.");
if (closeException != null) {
throw new SqlExecutionException(
"Failed to close the OperationManager.", closeException);
}
} | 3.68 |
framework_ProgressBar_isIndeterminate | /**
* Gets whether or not this progress indicator is indeterminate. In
* indeterminate mode there is an animation indicating that the task is
* running but without providing any information about the current progress.
*
* @return <code>true</code> if set to indeterminate mode; otherwise
* <code>false</code>
*/
public boolean isIndeterminate() {
return getState(false).indeterminate;
} | 3.68 |
hudi_JenkinsHash_main | /**
* Compute the hash of the specified file
*
* @param args name of file to compute hash of.
* @throws IOException
*/
public static void main(String[] args) throws IOException {
if (args.length != 1) {
System.err.println("Usage: JenkinsHash filename");
System.exit(-1);
}
try (FileInputStream in = new FileInputStream(args[0])) {
byte[] bytes = new byte[512];
int value = 0;
JenkinsHash hash = new JenkinsHash();
for (int length = in.read(bytes); length > 0; length = in.read(bytes)) {
value = hash.hash(bytes, length, value);
}
System.out.println(Math.abs(value));
}
} | 3.68 |
MagicPlugin_BoundingBox_intersectsLine | // Source:
// [url]http://www.gamedev.net/topic/338987-aabb---line-segment-intersection-test/[/url]
public boolean intersectsLine(Vector p1, Vector p2)
{
final double epsilon = 0.0001f;
p1 = p1.clone();
p2 = p2.clone();
Vector d = p2.subtract(p1).multiply(0.5);
Vector e = max.clone().subtract(min).multiply(0.5);
Vector c = p1.add(d).subtract(min.clone().add(max).multiply(0.5));
Vector ad = new Vector(Math.abs(d.getX()), Math.abs(d.getY()), Math.abs(d.getZ()));
if (Math.abs(c.getX()) > e.getX() + ad.getX())
return false;
if (Math.abs(c.getY()) > e.getY() + ad.getY())
return false;
if (Math.abs(c.getZ()) > e.getZ() + ad.getZ())
return false;
if (Math.abs(d.getY() * c.getZ() - d.getZ() * c.getY()) > e.getY() * ad.getZ() + e.getZ() * ad.getY() + epsilon)
return false;
if (Math.abs(d.getZ() * c.getX() - d.getX() * c.getZ()) > e.getZ() * ad.getX() + e.getX() * ad.getZ() + epsilon)
return false;
if (Math.abs(d.getX() * c.getY() - d.getY() * c.getX()) > e.getX() * ad.getY() + e.getY() * ad.getX() + epsilon)
return false;
return true;
} | 3.68 |
flink_BatchTask_setLastOutputCollector | /**
* Sets the last output {@link Collector} of the collector chain of this {@link BatchTask}.
*
* <p>In case of chained tasks, the output collector of the last {@link ChainedDriver} is set.
* Otherwise it is the single collector of the {@link BatchTask}.
*
* @param newOutputCollector new output collector to set as last collector
*/
protected void setLastOutputCollector(Collector<OT> newOutputCollector) {
int numChained = this.chainedTasks.size();
if (numChained == 0) {
output = newOutputCollector;
return;
}
chainedTasks.get(numChained - 1).setOutputCollector(newOutputCollector);
} | 3.68 |
flink_EmptyFieldsCountAccumulator_add | /** Increases the result vector component at the specified position by 1. */
@Override
public void add(Integer position) {
updateResultVector(position, 1);
} | 3.68 |
hadoop_SchedulerNodeReport_getUtilization | /**
*
* @return utilization of this node
*/
public ResourceUtilization getUtilization() {
return utilization;
} | 3.68 |
hadoop_ApplicationMaster_setupContainerAskForRM | /**
* Setup the request that will be sent to the RM for the container ask.
*
* @return the setup ResourceRequest to be sent to RM
*/
private ContainerRequest setupContainerAskForRM(int memory, int vcores,
int priority, String nodeLabel) {
Priority pri = Records.newRecord(Priority.class);
pri.setPriority(priority);
// Set up resource type requirements
// For now, memory and CPU are supported so we set memory and cpu
// requirements
Resource capability = Records.newRecord(Resource.class);
capability.setMemorySize(memory);
capability.setVirtualCores(vcores);
return new ContainerRequest(capability, null, null, pri, true, nodeLabel);
} | 3.68 |
hudi_LegacyArchivedMetaEntryReader_readInstant | /**
* Reads the avro record for instant and details.
*/
private Pair<HoodieInstant, Option<byte[]>> readInstant(GenericRecord record) {
final String instantTime = record.get(HoodiePartitionMetadata.COMMIT_TIME_KEY).toString();
final String action = record.get(ACTION_TYPE_KEY).toString();
final String stateTransitionTime = (String) record.get(STATE_TRANSITION_TIME);
final Option<byte[]> details = getMetadataKey(action).map(key -> {
Object actionData = record.get(key);
if (actionData != null) {
if (action.equals(HoodieTimeline.COMPACTION_ACTION)) {
return HoodieAvroUtils.indexedRecordToBytes((IndexedRecord) actionData);
} else {
return getUTF8Bytes(actionData.toString());
}
}
return null;
});
HoodieInstant instant = new HoodieInstant(HoodieInstant.State.valueOf(record.get(ACTION_STATE).toString()), action,
instantTime, stateTransitionTime);
return Pair.of(instant,details);
} | 3.68 |
pulsar_LookupData_getNativeUrl | /**
* Legacy name, but client libraries are still using it so it needs to be included in Json.
*/
@Deprecated
public String getNativeUrl() {
return nativeUrl;
} | 3.68 |
morf_DataValueLookupMetadataRegistry_deduplicate | /**
* Relatively inefficient internment method for use when deserializing.
* Interns the metadata or identifies the existing interned object and returns
* the deuplicated result.
*
* @param potentialDuplicate The potential duplicate of an interned instance,
* to be interned and replaced
* @return The interned and deduplicated instance.
*/
static DataValueLookupMetadata deduplicate(DataValueLookupMetadata potentialDuplicate) {
Iterator<CaseInsensitiveString> columnsIterator = potentialDuplicate.getColumnNames().iterator();
DataValueLookupMetadata result = intern(columnsIterator.next());
while (columnsIterator.hasNext()) {
result = appendAndIntern(result, columnsIterator.next());
}
return result;
} | 3.68 |
flink_StatsSummarySnapshot_getAverage | /**
* Calculates the average over all seen values.
*
* @return Average over all seen values.
*/
public long getAverage() {
if (count == 0) {
return 0;
} else {
return sum / count;
}
} | 3.68 |
hadoop_StringInterner_internStringsInArray | /**
* Interns all the strings in the given array in place,
* returning the same array.
*
* @param strings strings.
* @return internStringsInArray.
*/
public static String[] internStringsInArray(String[] strings) {
for (int i = 0; i < strings.length; i++) {
strings[i] = weakIntern(strings[i]);
}
return strings;
} | 3.68 |
framework_AbstractEmbedded_getAlternateText | /**
* Gets this component's alternate text that can be presented instead of the
* component's normal content for accessibility purposes.
*
* @returns Alternate text
*/
public String getAlternateText() {
return getState(false).alternateText;
} | 3.68 |
hbase_HMaster_getAverageLoad | /**
* Compute the average load across all region servers. Currently, this uses a very naive
* computation - just uses the number of regions being served, ignoring stats about number of
* requests.
* @return the average load
*/
public double getAverageLoad() {
if (this.assignmentManager == null) {
return 0;
}
RegionStates regionStates = this.assignmentManager.getRegionStates();
if (regionStates == null) {
return 0;
}
return regionStates.getAverageLoad();
} | 3.68 |
morf_RecordHelper_copy | /**
* Creates a copy of a {@link Record}. Since {@link Record} does not guarantee immutability, use
* this for safety when holding onto records.
*
* @param record The record to copy.
* @param columns The columns in the record.
* @return The copy.
*/
public static Record copy(Record record, Iterable<Column> columns) {
RecordBuilder result = DataSetUtils.record();
for (Column column : columns) {
result.setObject(column.getName(), record.getObject(column));
}
return result;
} | 3.68 |
flink_DeduplicateFunctionHelper_updateDeduplicateResult | /**
* Collect the updated result for duplicate row.
*
* @param generateUpdateBefore flag to generate UPDATE_BEFORE message or not
* @param generateInsert flag to generate INSERT message or not
* @param preRow previous row under the key
* @param currentRow current row under the key which is the duplicate row
* @param out underlying collector
*/
static void updateDeduplicateResult(
boolean generateUpdateBefore,
boolean generateInsert,
RowData preRow,
RowData currentRow,
Collector<RowData> out) {
if (generateUpdateBefore || generateInsert) {
if (preRow == null) {
// the first row, send INSERT message
currentRow.setRowKind(RowKind.INSERT);
out.collect(currentRow);
} else {
if (generateUpdateBefore) {
final RowKind preRowKind = preRow.getRowKind();
preRow.setRowKind(RowKind.UPDATE_BEFORE);
out.collect(preRow);
preRow.setRowKind(preRowKind);
}
currentRow.setRowKind(RowKind.UPDATE_AFTER);
out.collect(currentRow);
}
} else {
currentRow.setRowKind(RowKind.UPDATE_AFTER);
out.collect(currentRow);
}
} | 3.68 |
hbase_HtmlQuoting_unquoteHtmlChars | /**
* Remove HTML quoting from a string.
* @param item the string to unquote
* @return the unquoted string
*/
public static String unquoteHtmlChars(String item) {
if (item == null) {
return null;
}
int next = item.indexOf('&');
// nothing was quoted
if (next == -1) {
return item;
}
int len = item.length();
int posn = 0;
StringBuilder buffer = new StringBuilder();
while (next != -1) {
buffer.append(item.substring(posn, next));
if (item.startsWith("&", next)) {
buffer.append('&');
next += 5;
} else if (item.startsWith("'", next)) {
buffer.append('\'');
next += 6;
} else if (item.startsWith(">", next)) {
buffer.append('>');
next += 4;
} else if (item.startsWith("<", next)) {
buffer.append('<');
next += 4;
} else if (item.startsWith(""", next)) {
buffer.append('"');
next += 6;
} else {
int end = item.indexOf(';', next) + 1;
if (end == 0) {
end = len;
}
throw new IllegalArgumentException("Bad HTML quoting for " + item.substring(next, end));
}
posn = next;
next = item.indexOf('&', posn);
}
buffer.append(item.substring(posn, len));
return buffer.toString();
} | 3.68 |
querydsl_JTSCurveExpression_endPoint | /**
* The end Point of this Curve.
*
* @return end point
*/
public JTSPointExpression<Point> endPoint() {
if (endPoint == null) {
endPoint = JTSGeometryExpressions.pointOperation(SpatialOps.END_POINT, mixin);
}
return endPoint;
} | 3.68 |
hudi_HoodieTableConfig_getBaseFileFormat | /**
* Get the base file storage format.
*
* @return HoodieFileFormat for the base file Storage format
*/
public HoodieFileFormat getBaseFileFormat() {
return HoodieFileFormat.valueOf(getStringOrDefault(BASE_FILE_FORMAT));
} | 3.68 |
dubbo_RpcServiceContext_isProviderSide | /**
* is provider side.
*
* @return provider side.
*/
@Override
public boolean isProviderSide() {
return !isConsumerSide();
} | 3.68 |
hbase_HBaseFsckRepair_waitUntilAssigned | /*
* Should we check all assignments or just not in RIT?
*/
public static void waitUntilAssigned(Admin admin, RegionInfo region)
throws IOException, InterruptedException {
long timeout = admin.getConfiguration().getLong("hbase.hbck.assign.timeout", 120000);
long expiration = timeout + EnvironmentEdgeManager.currentTime();
while (EnvironmentEdgeManager.currentTime() < expiration) {
try {
boolean inTransition = false;
for (RegionState rs : admin.getClusterMetrics(EnumSet.of(Option.REGIONS_IN_TRANSITION))
.getRegionStatesInTransition()) {
if (RegionInfo.COMPARATOR.compare(rs.getRegion(), region) == 0) {
inTransition = true;
break;
}
}
if (!inTransition) {
// yay! no longer RIT
return;
}
// still in rit
LOG.info("Region still in transition, waiting for " + "it to become assigned: " + region);
} catch (IOException e) {
LOG.warn("Exception when waiting for region to become assigned," + " retrying", e);
}
Thread.sleep(1000);
}
throw new IOException("Region " + region + " failed to move out of "
+ "transition within timeout " + timeout + "ms");
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.