name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_DefaultConfigurableOptionsFactory_setLogFileNum_rdh | /**
* The maximum number of files RocksDB should keep for logging.
*
* @param logFileNum
* number of files to keep
* @return this options factory
*/
public DefaultConfigurableOptionsFactory setLogFileNum(int logFileNum) {
Preconditions.checkArgument(logFileNum > 0, "Invalid configuration: Must keep at least one log file.");
configuredOptions.put(LOG_FILE_NUM.key(), String.valueOf(logFileNum));
return this;
} | 3.26 |
flink_DefaultConfigurableOptionsFactory_getLogLevel_rdh | // --------------------------------------------------------------------------
// Configuring RocksDB's info log.
// --------------------------------------------------------------------------
private InfoLogLevel getLogLevel() {
return InfoLogLevel.valueOf(getInternal(LOG_LEVEL.key()).toUpperCase());
} | 3.26 |
flink_DefaultConfigurableOptionsFactory_getMaxWriteBufferNumber_rdh | // --------------------------------------------------------------------------
// The maximum number of write buffers that are built up in memory.
// --------------------------------------------------------------------------
private int getMaxWriteBufferNumber() {
return Integer.parseInt(getInternal(MAX_WRITE_BUFFER_NUMBER.key()));
} | 3.26 |
flink_DefaultConfigurableOptionsFactory_getMaxSizeLevelBase_rdh | // --------------------------------------------------------------------------
// Maximum total data size for a level, i.e., the max total size for level-1
// --------------------------------------------------------------------------
private long getMaxSizeLevelBase() {
return MemorySize.parseBytes(getInternal(MAX_SIZE_LEVEL_BASE.key()));
} | 3.26 |
flink_DefaultConfigurableOptionsFactory_getBlockCacheSize_rdh | // --------------------------------------------------------------------------
// The amount of the cache for data blocks in RocksDB
// --------------------------------------------------------------------------
private long getBlockCacheSize() {
return MemorySize.parseBytes(getInternal(BLOCK_CACHE_SIZE.key()));
} | 3.26 |
flink_DefaultConfigurableOptionsFactory_configure_rdh | /**
* Creates a {@link DefaultConfigurableOptionsFactory} instance from a {@link ReadableConfig}.
*
* <p>If no options within {@link RocksDBConfigurableOptions} has ever been configured, the
* created RocksDBOptionsFactory would not override anything defined in {@link PredefinedOptions}.
*
* @param configuration
* Configuration to be used for the ConfigurableRocksDBOptionsFactory
* creation
* @return A ConfigurableRocksDBOptionsFactory created from the given configuration
*/
@Override
public DefaultConfigurableOptionsFactory configure(ReadableConfig configuration) {
for (ConfigOption<?> option :
CANDIDATE_CONFIGS) {Optional<?> newValue = configuration.getOptional(option);
if (newValue.isPresent()) {
checkArgumentValid(option, newValue.get());
this.configuredOptions.put(option.key(),
newValue.get().toString());
}
}
return this;
} | 3.26 |
flink_DefaultConfigurableOptionsFactory_getInternal_rdh | /**
* Returns the value in string format with the given key.
*
* @param key
* The configuration-key to query in string format.
*/
private String getInternal(String key) {
Preconditions.checkArgument(configuredOptions.containsKey(key), ("The configuration " + key) + " has not been configured.");
return configuredOptions.get(key);
} | 3.26 |
flink_DefaultConfigurableOptionsFactory_getMaxBackgroundThreads_rdh | // --------------------------------------------------------------------------
// Maximum number of concurrent background flush and compaction threads
// --------------------------------------------------------------------------
private int getMaxBackgroundThreads() {
return Integer.parseInt(getInternal(MAX_BACKGROUND_THREADS.key()));
} | 3.26 |
flink_ResourceManagerRuntimeServicesConfiguration_fromConfiguration_rdh | // ---------------------------- Static methods ----------------------------------
public static ResourceManagerRuntimeServicesConfiguration fromConfiguration(Configuration configuration, WorkerResourceSpecFactory defaultWorkerResourceSpecFactory) throws ConfigurationException {
final
String strJobTimeout = configuration.getString(ResourceManagerOptions.JOB_TIMEOUT);
final Time jobTimeout;
try {
jobTimeout = Time.milliseconds(TimeUtils.parseDuration(strJobTimeout).toMillis());
} catch (IllegalArgumentException e) {
throw new ConfigurationException((("Could not parse the resource manager's job timeout " + "value ") + ResourceManagerOptions.JOB_TIMEOUT) + '.', e);
}
final WorkerResourceSpec defaultWorkerResourceSpec = defaultWorkerResourceSpecFactory.createDefaultWorkerResourceSpec(configuration);
final SlotManagerConfiguration slotManagerConfiguration = SlotManagerConfiguration.fromConfiguration(configuration, defaultWorkerResourceSpec);
final boolean enableFineGrainedResourceManagement = configuration.getBoolean(ClusterOptions.ENABLE_FINE_GRAINED_RESOURCE_MANAGEMENT);
return new ResourceManagerRuntimeServicesConfiguration(jobTimeout, slotManagerConfiguration, enableFineGrainedResourceManagement);
} | 3.26 |
flink_PermanentBlobCache_m0_rdh | /**
* Delete the blob file with the given key.
*
* @param jobId
* ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param blobKey
* The key of the desired BLOB.
*/
private boolean m0(JobID jobId, BlobKey blobKey) {
final
File localFile = new File(BlobUtils.getStorageLocationPath(storageDir.deref().getAbsolutePath(), jobId, blobKey));
if ((!localFile.delete()) && localFile.exists()) {
log.warn("Failed to delete locally cached BLOB {} at {}", blobKey, localFile.getAbsolutePath());
return false;
}
return true;
} | 3.26 |
flink_PermanentBlobCache_run_rdh | /**
* Cleans up BLOBs which are not referenced anymore.
*/
@Override
public void run() {
synchronized(jobRefCounters) {
Iterator<Map.Entry<JobID, RefCount>> entryIter = jobRefCounters.entrySet().iterator();
final long currentTimeMillis = System.currentTimeMillis();
while (entryIter.hasNext()) {
Map.Entry<JobID, RefCount> entry = entryIter.next();
RefCount ref = entry.getValue();
if (((ref.references <= 0) && (ref.keepUntil > 0)) && (currentTimeMillis >= ref.keepUntil)) {
JobID jobId = entry.getKey();
final File localFile = new File(BlobUtils.getStorageLocationPath(storageDir.deref().getAbsolutePath(), jobId));
/* NOTE: normally it is not required to acquire the write lock to delete the job's
storage directory since there should be no one accessing it with the ref
counter being 0 - acquire it just in case, to always be on the safe side
*/
readWriteLock.writeLock().lock();boolean success = false;
try {
blobCacheSizeTracker.untrackAll(jobId);
FileUtils.deleteDirectory(localFile);
success = true;
} catch (Throwable t) {
log.warn("Failed to locally delete job directory " + localFile.getAbsolutePath(), t);
} finally {
readWriteLock.writeLock().unlock();
}
// let's only remove this directory from cleanup if the cleanup was
// successful
// (does not need the write lock)
if
(success) {
entryIter.remove();
}
}
} }
} | 3.26 |
flink_PermanentBlobCache_getStorageLocation_rdh | /**
* Returns a file handle to the file associated with the given blob key on the blob server.
*
* @param jobId
* ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param key
* identifying the file
* @return file handle to the file
* @throws IOException
* if creating the directory fails
*/
@VisibleForTesting
public File getStorageLocation(JobID jobId, BlobKey key) throws IOException {
checkNotNull(jobId);
return BlobUtils.getStorageLocation(storageDir.deref(), jobId, key);
} | 3.26 |
flink_PermanentBlobCache_readFile_rdh | /**
* Returns the content of the file for the BLOB with the provided job ID the blob key.
*
* <p>The method will first attempt to serve the BLOB from the local cache. If the BLOB is not
* in the cache, the method will try to download it from the HA store, or directly from the
* {@link BlobServer}.
*
* <p>Compared to {@code getFile}, {@code readFile} makes sure that the file is fully read in
* the same write lock as the file is accessed. This avoids the scenario that the path is
* returned as the file is deleted concurrently by other threads.
*
* @param jobId
* ID of the job this blob belongs to
* @param blobKey
* BLOB key associated with the requested file
* @return The content of the BLOB.
* @throws java.io.FileNotFoundException
* if the BLOB does not exist;
* @throws IOException
* if any other error occurs when retrieving the file.
*/
@Override
public byte[] readFile(JobID jobId, PermanentBlobKey blobKey) throws IOException {
checkNotNull(jobId);
checkNotNull(blobKey);
final File localFile =
BlobUtils.getStorageLocation(storageDir.deref(), jobId, blobKey);
readWriteLock.readLock().lock();
try {
if (localFile.exists()) {
blobCacheSizeTracker.update(jobId, blobKey);
return FileUtils.readAllBytes(localFile.toPath());
}
} finally {
readWriteLock.readLock().unlock();
}
// first try the distributed blob store (if available)
// use a temporary file (thread-safe without locking)
File incomingFile = createTemporaryFilename();try {
try {
if (blobView.get(jobId, blobKey, incomingFile))
{
// now move the temp file to our local cache atomically
readWriteLock.writeLock().lock();
try {
checkLimitAndMoveFile(incomingFile, jobId, blobKey, localFile, log, null);
return FileUtils.readAllBytes(localFile.toPath());
} finally {
readWriteLock.writeLock().unlock();
}
}
} catch (Exception e) {
log.info("Failed to copy from blob store. Downloading from BLOB server instead.", e);}
final InetSocketAddress currentServerAddress = serverAddress;
if (currentServerAddress != null) {
// fallback: download from the BlobServer
BlobClient.downloadFromBlobServer(jobId, blobKey, incomingFile, currentServerAddress, blobClientConfig, numFetchRetries);
readWriteLock.writeLock().lock();
try {checkLimitAndMoveFile(incomingFile, jobId, blobKey, localFile, log, null);
return FileUtils.readAllBytes(localFile.toPath());
} finally {
readWriteLock.writeLock().unlock();
}
} else {
throw new IOException("Cannot download from BlobServer, because the server address is unknown.");
}} finally {
// delete incomingFile from a failed download
if ((!incomingFile.delete()) && incomingFile.exists()) {
log.warn("Could not delete the staging file {} for blob key {} and job {}.", incomingFile, blobKey, jobId);
}
}
} | 3.26 |
flink_PermanentBlobCache_releaseJob_rdh | /**
* Unregisters use of job-related BLOBs and allow them to be released.
*
* @param jobId
* ID of the job this blob belongs to
* @see #registerJob(JobID)
*/
@Override
public void releaseJob(JobID jobId) {
checkNotNull(jobId);
synchronized(jobRefCounters) {
RefCount ref = jobRefCounters.get(jobId);
if ((ref == null) || (ref.references == 0)) {
log.warn("improper use of releaseJob() without a matching number of registerJob() calls for jobId " + jobId);
return;
}
--ref.references;
if (ref.references == 0) {
ref.keepUntil = System.currentTimeMillis() + cleanupInterval;
}
}
} | 3.26 |
flink_PermanentBlobCache_registerJob_rdh | /**
* Registers use of job-related BLOBs.
*
* <p>Using any other method to access BLOBs, e.g. {@link #getFile}, is only valid within calls
* to <tt>registerJob(JobID)</tt> and {@link #releaseJob(JobID)}.
*
* @param jobId
* ID of the job this blob belongs to
* @see #releaseJob(JobID)
*/
@Override
public void registerJob(JobID jobId) {
checkNotNull(jobId);
synchronized(jobRefCounters) {
RefCount ref = jobRefCounters.get(jobId);
if (ref == null) {
ref = new RefCount();
jobRefCounters.put(jobId, ref);
} else {
// reset cleanup timeout
ref.keepUntil = -1;}
++ref.references;
}
} | 3.26 |
flink_PermanentBlobCache_getFile_rdh | /**
* Returns the path to a local copy of the file associated with the provided job ID and blob
* key.
*
* <p>We will first attempt to serve the BLOB from the local storage. If the BLOB is not in
* there, we will try to download it from the HA store, or directly from the {@link BlobServer}.
*
* @param jobId
* ID of the job this blob belongs to
* @param key
* blob key associated with the requested file
* @return The path to the file.
* @throws java.io.FileNotFoundException
* if the BLOB does not exist;
* @throws IOException
* if any other error occurs when retrieving the file
*/
@Override
public File getFile(JobID jobId, PermanentBlobKey key) throws IOException {
checkNotNull(jobId);
return getFileInternal(jobId, key);
} | 3.26 |
flink_OverWindowPartitionedOrderedPreceding_following_rdh | /**
* Set the following offset (based on time or row-count intervals) for over window.
*
* @param following
* following offset that relative to the current row.
* @return an over window with defined following
*/
public OverWindowPartitionedOrderedPreceding following(Expression following)
{
optionalFollowing = Optional.of(following);
return this;
} | 3.26 |
flink_OverWindowPartitionedOrderedPreceding_as_rdh | /**
* Assigns an alias for this window that the following {@code select()} clause can refer to.
*
* @param alias
* alias for this over window
* @return the fully defined over window
*/
public OverWindow as(Expression alias) {
return new OverWindow(alias, partitionBy, orderBy, preceding, optionalFollowing);
} | 3.26 |
flink_LogUrlUtil_getValidLogUrlPattern_rdh | /**
* Validate and normalize log url pattern.
*/
public static Optional<String> getValidLogUrlPattern(final Configuration config, final ConfigOption<String> option) {
String pattern = config.getString(option);
if (StringUtils.isNullOrWhitespaceOnly(pattern)) {
return Optional.empty();
}
pattern = pattern.trim();
String scheme = pattern.substring(0, Math.max(pattern.indexOf(SCHEME_SEPARATOR), 0));
if (scheme.isEmpty()) {
return Optional.of((HTTP_SCHEME + SCHEME_SEPARATOR) + pattern);
} else if (HTTP_SCHEME.equalsIgnoreCase(scheme) || HTTPS_SCHEME.equalsIgnoreCase(scheme)) {
return Optional.of(pattern);
} else {
LOG.warn("Ignore configured value for '{}': unsupported scheme {}", option.key(), scheme);
return Optional.empty();
}
} | 3.26 |
flink_DateTimeUtils_fromTimestamp_rdh | // UNIX TIME
// --------------------------------------------------------------------------------------------
public static long fromTimestamp(long ts) {
return ts;
} | 3.26 |
flink_DateTimeUtils_toSQLDate_rdh | // --------------------------------------------------------------------------------------------
// java.sql Date/Time/Timestamp --> internal data types
// --------------------------------------------------------------------------------------------
/**
* Converts the internal representation of a SQL DATE (int) to the Java type used for UDF
* parameters ({@link java.sql.Date}).
*/public static Date toSQLDate(int v) {
// note that, in this case, can't handle Daylight Saving Time
final long v0
= v * MILLIS_PER_DAY;
return new Date(v0 - LOCAL_TZ.getOffset(v0)); } | 3.26 |
flink_DateTimeUtils_timestampToTimestampWithLocalZone_rdh | // --------------------------------------------------------------------------------------------
// TIMESTAMP to TIMESTAMP_LTZ conversions
// --------------------------------------------------------------------------------------------
public static TimestampData timestampToTimestampWithLocalZone(TimestampData ts, TimeZone tz) {
return TimestampData.fromInstant(ts.toLocalDateTime().atZone(tz.toZoneId()).toInstant());
} | 3.26 |
flink_DateTimeUtils_monthly_rdh | /**
* Whether this is in the YEAR-TO-MONTH family of intervals.
*/
public boolean monthly() {
return ordinal() <= MONTH.ordinal();
} | 3.26 |
flink_DateTimeUtils_parseDate_rdh | /**
* Returns the epoch days since 1970-01-01.
*/
public static int parseDate(String dateStr, String fromFormat) {
// It is OK to use UTC, we just want get the epoch days
// TODO use offset, better performance
long ts = internalParseTimestampMillis(dateStr, fromFormat, TimeZone.getTimeZone("UTC"));
ZoneId zoneId = ZoneId.of("UTC");
Instant instant = Instant.ofEpochMilli(ts);
ZonedDateTime zdt = ZonedDateTime.ofInstant(instant, zoneId);
return ymdToUnixDate(zdt.getYear(), zdt.getMonthValue(), zdt.getDayOfMonth());
} | 3.26 |
flink_DateTimeUtils_toInternal_rdh | /**
* Converts the Java type used for UDF parameters of SQL TIMESTAMP type ({@link java.sql.Timestamp}) to internal representation (long).
*
* <p>Converse of {@link #toSQLTimestamp(long)}.
*/
public static long toInternal(Timestamp
ts) {
long
time = ts.getTime();
return time + LOCAL_TZ.getOffset(time);
} | 3.26 |
flink_DateTimeUtils_getValue_rdh | /**
* Returns the TimeUnit associated with an ordinal. The value returned is null if the
* ordinal is not a member of the TimeUnit enumeration.
*/
public static TimeUnit getValue(int ordinal) {
return (ordinal < 0) || (ordinal >= CACHED_VALUES.length) ? null : CACHED_VALUES[ordinal];
} | 3.26 |
flink_DateTimeUtils_ymdhms_rdh | /**
* Appends year-month-day and hour:minute:second to a buffer; assumes they are valid.
*/
private static StringBuilder ymdhms(StringBuilder b, int year, int month, int day, int h, int m, int s) {
ymd(b, year, month, day);
b.append(' ');
hms(b, h, m, s);return b;
} | 3.26 |
flink_DateTimeUtils_addMonths_rdh | /**
* Adds a given number of months to a date, represented as the number of days since the epoch.
*/
public static int addMonths(int date, int m) {
int y0 = ((int) (extractFromDate(TimeUnitRange.YEAR, date)));
int m0 = ((int) (extractFromDate(TimeUnitRange.MONTH, date)));
int d0 = ((int) (extractFromDate(TimeUnitRange.DAY, date)));
m0 += m;
int deltaYear = ((int) (DateTimeUtils.floorDiv(m0, 12)));
y0 += deltaYear;
m0 = ((int) (DateTimeUtils.floorMod(m0, 12)));
if
(m0 == 0) {y0 -= 1;
m0 += 12;
}
int last = lastDay(y0, m0); if (d0 > last) {
d0 = last;
}
return ymdToUnixDate(y0, m0, d0);
} | 3.26 |
flink_DateTimeUtils_isValidValue_rdh | /**
* Returns whether a given value is valid for a field of this time unit.
*
* @param field
* Field value
* @return Whether value
*/
public boolean isValidValue(BigDecimal field) {
return (field.compareTo(BigDecimal.ZERO) >= 0) && ((limit == null) || (field.compareTo(limit) < 0));
} | 3.26 |
flink_DateTimeUtils_toTimestampData_rdh | // --------------------------------------------------------------------------------------------
// Numeric -> Timestamp conversion
// --------------------------------------------------------------------------------------------
public static TimestampData toTimestampData(long v, int precision) {
switch (precision) {
case 0 :
if ((MIN_EPOCH_SECONDS <= v) && (v <= f1)) {
return timestampDataFromEpochMills(v *
MILLIS_PER_SECOND);
} else {
return null;
}
case 3 :
return timestampDataFromEpochMills(v);
default :throw new TableException(((("The precision value '" + precision) + "' for function ") +
"TO_TIMESTAMP_LTZ(numeric, precision) is unsupported,") + " the supported value is '0' for second or '3' for millisecond.");
}
} | 3.26 |
flink_DateTimeUtils_parseTimestampTz_rdh | /**
* Parse date time string to timestamp based on the given time zone string and format. Returns
* null if parsing failed.
*
* @param dateStr
* the date time string
* @param tzStr
* the time zone id string
*/
private static long parseTimestampTz(String dateStr, String tzStr) throws ParseException {
TimeZone tz = TIMEZONE_CACHE.get(tzStr);
return parseTimestampMillis(dateStr, DateTimeUtils.TIMESTAMP_FORMAT_STRING, tz);
} | 3.26 |
flink_DateTimeUtils_fromTemporalAccessor_rdh | /**
* This is similar to {@link LocalDateTime#from(TemporalAccessor)}, but it's less strict and
* introduces default values.
*/
private static LocalDateTime fromTemporalAccessor(TemporalAccessor accessor, int precision) {// complement year with 1970
int year = (accessor.isSupported(YEAR)) ? accessor.get(YEAR) : 1970;
// complement month with 1
int month = (accessor.isSupported(MONTH_OF_YEAR)) ? accessor.get(MONTH_OF_YEAR) : 1;
// complement day with 1
int
day = (accessor.isSupported(DAY_OF_MONTH)) ? accessor.get(DAY_OF_MONTH) : 1;
// complement hour with 0
int hour = (accessor.isSupported(HOUR_OF_DAY)) ? accessor.get(HOUR_OF_DAY) : 0;
// complement minute with 0
int minute = (accessor.isSupported(MINUTE_OF_HOUR)) ? accessor.get(MINUTE_OF_HOUR) : 0;
// complement second with 0
int second
= (accessor.isSupported(SECOND_OF_MINUTE)) ? accessor.get(SECOND_OF_MINUTE) : 0;
// complement nano_of_second with 0
int nanoOfSecond = (accessor.isSupported(NANO_OF_SECOND)) ? accessor.get(NANO_OF_SECOND) : 0;
if (precision == 0) {
nanoOfSecond = 0;
} else if (precision !=
9) {
nanoOfSecond = ((int) (floor(nanoOfSecond, powerX(10, 9 - precision))));
}return LocalDateTime.of(year, month, day, hour, minute, second, nanoOfSecond);
} | 3.26 |
flink_DateTimeUtils_timestampCeil_rdh | /**
* Keep the algorithm consistent with Calcite DateTimeUtils.julianDateFloor, but here we take
* time zone into account.
*/
public static long
timestampCeil(TimeUnitRange range, long ts, TimeZone tz) {
// assume that we are at UTC timezone, just for algorithm performance
long offset = tz.getOffset(ts);
long utcTs = ts + offset;
switch (range) {
case f3 :
return ceil(utcTs, MILLIS_PER_HOUR) - offset;
case DAY :
return ceil(utcTs,
MILLIS_PER_DAY) - offset;
case MILLENNIUM :
case CENTURY :
case DECADE :
case MONTH :
case YEAR :
case QUARTER :
case WEEK :
int days = ((int) ((utcTs / MILLIS_PER_DAY) + EPOCH_JULIAN));
return (julianDateFloor(range, days, false) * MILLIS_PER_DAY) - offset;
default :
// for MINUTE and SECONDS etc...,
// it is more effective to use arithmetic Method
throw new AssertionError(range);
}
} | 3.26 |
flink_DateTimeUtils_ymd_rdh | /**
* Appends year-month-day to a buffer; assumes they are valid.
*/
private static StringBuilder ymd(StringBuilder b, int year, int month, int day) {int4(b, year);
b.append('-');
int2(b, month);
b.append('-');
int2(b, day);
return b;
} | 3.26 |
flink_DateTimeUtils_subtractMonths_rdh | /**
* Finds the number of months between two dates, each represented as the number of days since
* the epoch.
*/public static int subtractMonths(int date0, int date1) {
if (date0 < date1) {
return -subtractMonths(date1, date0);}
// Start with an estimate.
// Since no month has more than 31 days, the estimate is <= the true value.
int m =
(date0 - date1) / 31;
while (true) {
int date2 = addMonths(date1, m);
if (date2 >= date0) {
return m;
}
int date3 = addMonths(date1, m + 1);
if (date3 > date0) {
return m;
}
++m;
}
} | 3.26 |
flink_DateTimeUtils_timestampMillisToDate_rdh | // --------------------------------------------------------------------------------------------
// TIMESTAMP to DATE/TIME utils
// --------------------------------------------------------------------------------------------
/**
* Get date from a timestamp.
*
* @param ts
* the timestamp in milliseconds.
* @return the date in days.
*/
public static int timestampMillisToDate(long ts) {int days = ((int) (ts
/ MILLIS_PER_DAY));
if (days < 0) {
days = days -
1;
}
return days;
} | 3.26 |
flink_DateTimeUtils_unixTimestamp_rdh | /**
* Returns the value of the argument as an unsigned integer in seconds since '1970-01-01
* 00:00:00' UTC.
*/
public static long
unixTimestamp(String dateStr, String format, TimeZone tz) {
long ts = internalParseTimestampMillis(dateStr, format, tz);
if (ts == Long.MIN_VALUE) {
return Long.MIN_VALUE;
} else {
// return the seconds
return ts / 1000;
}
} | 3.26 |
flink_DateTimeUtils_of_rdh | /**
* Returns a {@code TimeUnitRange} with a given start and end unit.
*
* @param startUnit
* Start unit
* @param endUnit
* End unit
* @return Time unit range, or null if not valid
*/
public static TimeUnitRange of(TimeUnit startUnit, TimeUnit endUnit) {
return MAP.get(new Pair<>(startUnit, endUnit));
} | 3.26 |
flink_DateTimeUtils_m0_rdh | /**
* Get time from a timestamp.
*
* @param ts
* the timestamp in milliseconds.
* @return the time in milliseconds.
*/
public static int m0(long ts) {
return ((int) (ts % MILLIS_PER_DAY));
} | 3.26 |
flink_DateTimeUtils_toLocalDate_rdh | // --------------------------------------------------------------------------------------------
// Java 8 time conversion
// --------------------------------------------------------------------------------------------
public static LocalDate toLocalDate(int date) {return julianToLocalDate(date + EPOCH_JULIAN);} | 3.26 |
flink_DateTimeUtils_toSQLTime_rdh | /**
* Converts the internal representation of a SQL TIME (int) to the Java type used for UDF
* parameters ({@link java.sql.Time}).
*/
public static Time toSQLTime(int v) {// note that, in this case, can't handle Daylight Saving Time
return new Time(v - LOCAL_TZ.getOffset(v));
} | 3.26 |
flink_DateTimeUtils_formatDate_rdh | /**
* Helper for CAST({date} AS VARCHAR(n)).
*/
public static String formatDate(int date) {
final StringBuilder buf = new StringBuilder(10);
formatDate(buf, date);return buf.toString();
} | 3.26 |
flink_DateTimeUtils_timestampFloor_rdh | // --------------------------------------------------------------------------------------------
// Floor/Ceil/Convert tz
// --------------------------------------------------------------------------------------------
public static long timestampFloor(TimeUnitRange range, long ts, TimeZone tz) {
// assume that we are at UTC timezone, just for algorithm performance
long v160 = tz.getOffset(ts);
long utcTs = ts + v160;
switch (range) {
case f3 :return floor(utcTs, MILLIS_PER_HOUR) - v160;
case DAY :
return floor(utcTs, MILLIS_PER_DAY) - v160;
case MILLENNIUM :
case CENTURY :
case DECADE :
case MONTH :
case YEAR :
case QUARTER :
case WEEK :
int days = ((int) ((utcTs / MILLIS_PER_DAY) + EPOCH_JULIAN));
return (julianDateFloor(range, days, true) * MILLIS_PER_DAY) - v160;default :
// for MINUTE and SECONDS etc...,
// it is more effective to use arithmetic Method
throw new AssertionError(range);
}
} | 3.26 |
flink_DateTimeUtils_parseTimestampData_rdh | // --------------------------------------------------------------------------------------------
// Parsing functions
// --------------------------------------------------------------------------------------------
public static TimestampData parseTimestampData(String dateStr) throws DateTimeException {
// Precision is hardcoded to match signature of TO_TIMESTAMP
// https://issues.apache.org/jira/browse/FLINK-14925
return parseTimestampData(dateStr, 3);
} | 3.26 |
flink_DateTimeUtils_toSQLTimestamp_rdh | /**
* Converts the internal representation of a SQL TIMESTAMP (long) to the Java type used for UDF
* parameters ({@link java.sql.Timestamp}).
*/
public static Timestamp toSQLTimestamp(long v) {return new Timestamp(v - LOCAL_TZ.getOffset(v));
} | 3.26 |
flink_DateTimeUtils_hms_rdh | /**
* Appends hour:minute:second to a buffer; assumes they are valid.
*/
private static StringBuilder hms(StringBuilder b, int h,
int m, int s) {
int2(b, h);
b.append(':');
int2(b, m);
b.append(':');
int2(b, s);
return b;
} | 3.26 |
flink_DateTimeUtils_parseTimestampMillis_rdh | /**
* Parse date time string to timestamp based on the given time zone and format. Returns null if
* parsing failed.
*
* @param dateStr
* the date time string
* @param format
* date time string format
* @param tz
* the time zone
*/
private static long parseTimestampMillis(String dateStr, String format, TimeZone tz) throws ParseException {
SimpleDateFormat
formatter = FORMATTER_CACHE.get(format);
formatter.setTimeZone(tz);
return formatter.parse(dateStr).getTime();
} | 3.26 |
flink_LargeRecordHandler_createSerializer_rdh | // --------------------------------------------------------------------------------------------
private TypeSerializer<Object> createSerializer(Object key, int pos) {
if (key == null)
{
throw new NullKeyFieldException(pos);
}
try {
TypeInformation<Object> info = TypeExtractor.getForObject(key);
return info.createSerializer(executionConfig);
} catch (Throwable t) {
throw new RuntimeException("Could not create key serializer for type " + key);
}
} | 3.26 |
flink_LargeRecordHandler_hasData_rdh | // --------------------------------------------------------------------------------------------
public boolean hasData() {
return recordCounter > 0;
} | 3.26 |
flink_LargeRecordHandler_addRecord_rdh | // --------------------------------------------------------------------------------------------
@SuppressWarnings("unchecked")
public long addRecord(T record) throws IOException {
if (recordsOutFile == null) {
if (closed) {
throw new IllegalStateException("The large record handler has been closed.");
}
if
(recordsReader != null) {
throw new IllegalStateException("The handler has already switched to sorting."); }
LOG.debug("Initializing the large record spilling...");
// initialize the utilities
{
final TypeComparator<?>[] keyComps = comparator.getFlatComparators();
numKeyFields = keyComps.length;
Object[] v1 = new Object[numKeyFields];
comparator.extractKeys(record, v1, 0);
TypeSerializer<?>[] keySers = new TypeSerializer<?>[numKeyFields];
TypeSerializer<?>[] tupleSers = new TypeSerializer<?>[numKeyFields + 1];
int[] keyPos = new int[numKeyFields];
for (int i = 0; i < numKeyFields; i++) {
keyPos[i] = i;
keySers[i] = createSerializer(v1[i], i);
tupleSers[i] = keySers[i];
}
// add the long serializer for the offset
tupleSers[numKeyFields] = LongSerializer.INSTANCE;
keySerializer = new TupleSerializer<>(((Class<Tuple>) (Tuple.getTupleClass(numKeyFields + 1))), tupleSers);
keyComparator = new TupleComparator<>(keyPos, keyComps, keySers);
keyTuple = keySerializer.createInstance();
}
// initialize the spilling
final int totalNumSegments = memory.size();
final int segmentsForKeys = (totalNumSegments >= (2 *
MAX_SEGMENTS_FOR_KEY_SPILLING)) ? MAX_SEGMENTS_FOR_KEY_SPILLING : Math.max(MIN_SEGMENTS_FOR_KEY_SPILLING, totalNumSegments - MAX_SEGMENTS_FOR_KEY_SPILLING);
List<MemorySegment> recordsMemory = new ArrayList<MemorySegment>();
List<MemorySegment> keysMemory = new ArrayList<MemorySegment>();
for (int i = 0; i < segmentsForKeys; i++) {
keysMemory.add(memory.get(i));
}
for (int i = segmentsForKeys; i < totalNumSegments; i++)
{
recordsMemory.add(memory.get(i));
}
recordsChannel = ioManager.createChannel();
keysChannel = ioManager.createChannel();
recordsOutFile = new FileChannelOutputView(ioManager.createBlockChannelWriter(recordsChannel), memManager, recordsMemory, memManager.getPageSize());
keysOutFile = new FileChannelOutputView(ioManager.createBlockChannelWriter(keysChannel), memManager, keysMemory, memManager.getPageSize());
}
final long offset
= recordsOutFile.getWriteOffset();
if (offset < 0) {
throw new RuntimeException("wrong offset");
}Object[] keyHolder = new Object[numKeyFields];
comparator.extractKeys(record, keyHolder, 0);
for (int i = 0; i < numKeyFields; i++) {
keyTuple.setField(keyHolder[i], i);
}
keyTuple.setField(offset, numKeyFields);
keySerializer.serialize(keyTuple, keysOutFile);
serializer.serialize(record, recordsOutFile);
recordCounter++;
return offset;
} | 3.26 |
flink_RuntimeSerializerFactory_hashCode_rdh | // --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return clazz.hashCode() ^ serializer.hashCode();} | 3.26 |
flink_ResolveCallByArgumentsRule_adaptArguments_rdh | /**
* Adapts the arguments according to the properties of the {@link Result}.
*/
private List<ResolvedExpression> adaptArguments(Result inferenceResult, List<ResolvedExpression> resolvedArgs) {
return IntStream.range(0, resolvedArgs.size()).mapToObj(pos -> {
final ResolvedExpression argument = resolvedArgs.get(pos);
final DataType argumentType = argument.getOutputDataType();
final DataType expectedType = inferenceResult.getExpectedArgumentTypes().get(pos);
if (!supportsAvoidingCast(argumentType.getLogicalType(), expectedType.getLogicalType())) {
return resolutionContext.postResolutionFactory().cast(argument, expectedType);
}
return argument;
}).collect(Collectors.toList());
} | 3.26 |
flink_ResolveCallByArgumentsRule_getOptionalTypeInference_rdh | /**
* Temporary method until all calls define a type inference.
*/
private Optional<TypeInference> getOptionalTypeInference(FunctionDefinition definition) {
if ((((definition instanceof ScalarFunctionDefinition) || (definition instanceof TableFunctionDefinition)) || (definition instanceof AggregateFunctionDefinition)) || (definition instanceof TableAggregateFunctionDefinition)) {
return Optional.empty();
}
final TypeInference inference = definition.getTypeInference(resolutionContext.typeFactory());
if (inference.getOutputTypeStrategy()
!= TypeStrategies.MISSING) {
return Optional.of(inference);
} else {
return
Optional.empty();
}
} | 3.26 |
flink_ResolveCallByArgumentsRule_prepareInlineUserDefinedFunction_rdh | /**
* Validates and cleans an inline, unregistered {@link UserDefinedFunction}.
*/
private FunctionDefinition prepareInlineUserDefinedFunction(FunctionDefinition definition) {
if (definition instanceof ScalarFunctionDefinition) {
final ScalarFunctionDefinition sf = ((ScalarFunctionDefinition) (definition));
UserDefinedFunctionHelper.prepareInstance(resolutionContext.configuration(), sf.getScalarFunction());
return new ScalarFunctionDefinition(sf.getName(), sf.getScalarFunction());
} else if (definition instanceof TableFunctionDefinition) {
final TableFunctionDefinition tf
= ((TableFunctionDefinition) (definition));
UserDefinedFunctionHelper.prepareInstance(resolutionContext.configuration(), tf.getTableFunction());
return new TableFunctionDefinition(tf.getName(), tf.getTableFunction(), tf.getResultType());
} else if (definition instanceof AggregateFunctionDefinition) {
final AggregateFunctionDefinition af = ((AggregateFunctionDefinition) (definition));
UserDefinedFunctionHelper.prepareInstance(resolutionContext.configuration(), af.getAggregateFunction());
return new AggregateFunctionDefinition(af.getName(), af.getAggregateFunction(), af.getResultTypeInfo(), af.getAccumulatorTypeInfo());
} else if (definition instanceof TableAggregateFunctionDefinition) {
final TableAggregateFunctionDefinition taf = ((TableAggregateFunctionDefinition) (definition));
UserDefinedFunctionHelper.prepareInstance(resolutionContext.configuration(), taf.getTableAggregateFunction());
return new TableAggregateFunctionDefinition(taf.getName(), taf.getTableAggregateFunction(), taf.getResultTypeInfo(), taf.getAccumulatorTypeInfo());
} else if (definition instanceof UserDefinedFunction) {
UserDefinedFunctionHelper.prepareInstance(resolutionContext.configuration(), ((UserDefinedFunction)
(definition)));
}
return definition;
} | 3.26 |
flink_FlinkRelBuilder_watermark_rdh | /**
* Build watermark assigner relational node.
*/
public RelBuilder watermark(int rowtimeFieldIndex, RexNode watermarkExpr) {
final RelNode input = build();
final
RelNode relNode = LogicalWatermarkAssigner.create(cluster, input, rowtimeFieldIndex, watermarkExpr);
return push(relNode);
} | 3.26 |
flink_FlinkRelBuilder_windowAggregate_rdh | /**
* Build window aggregate for either aggregate or table aggregate.
*/
public RelBuilder windowAggregate(LogicalWindow window, GroupKey groupKey, List<NamedWindowProperty> namedProperties, Iterable<AggCall> aggCalls) {
// build logical aggregate
// Because of:
// [CALCITE-3763] RelBuilder.aggregate should prune unused fields from the input,
// if the input is a Project.
//
// the field can not be pruned if it is referenced by other expressions
// of the window aggregation(i.e. the TUMBLE_START/END).
// To solve this, we config the RelBuilder to forbidden this feature.
final LogicalAggregate aggregate = ((LogicalAggregate) (super.transform(t -> t.withPruneInputOfAggregate(false)).push(build()).aggregate(groupKey, aggCalls).build()));
// build logical window aggregate from it
final RelNode windowAggregate;
if (isTableAggregate(aggregate.getAggCallList())) {
windowAggregate = LogicalWindowTableAggregate.create(window, namedProperties, aggregate);
} else {
windowAggregate = LogicalWindowAggregate.create(window, namedProperties, aggregate);}return push(windowAggregate);
} | 3.26 |
flink_FlinkRelBuilder_aggregate_rdh | /**
* Build non-window aggregate for either aggregate or table aggregate.
*/
@Override
public RelBuilder aggregate(RelBuilder.GroupKey groupKey, Iterable<RelBuilder.AggCall> aggCalls) {
// build a relNode, the build() may also return a project
RelNode relNode = super.aggregate(groupKey, aggCalls).build();
if (relNode instanceof LogicalAggregate) {
final LogicalAggregate logicalAggregate = ((LogicalAggregate) (relNode));
if (isTableAggregate(logicalAggregate.getAggCallList())) {
relNode = LogicalTableAggregate.create(logicalAggregate);
} else if (isCountStarAgg(logicalAggregate)) {
final RelNode newAggInput = push(logicalAggregate.getInput(0)).project(literal(0)).build();
relNode = logicalAggregate.copy(logicalAggregate.getTraitSet(), ImmutableList.of(newAggInput));
}
}
return push(relNode);
} | 3.26 |
flink_FlinkRelBuilder_pushFunctionScan_rdh | /**
* {@link RelBuilder#functionScan(SqlOperator, int, Iterable)} cannot work smoothly with aliases
* which is why we implement a custom one. The method is static because some {@link RelOptRule}s
* don't use {@link FlinkRelBuilder}.
*/
public static RelBuilder pushFunctionScan(RelBuilder relBuilder, SqlOperator operator, int inputCount,
Iterable<RexNode> operands, List<String> aliases) {
Preconditions.checkArgument(operator instanceof BridgingSqlFunction.WithTableFunction, "Table function expected.");
final RexBuilder rexBuilder = relBuilder.getRexBuilder();
final RelDataTypeFactory typeFactory = relBuilder.getTypeFactory();
final List<RelNode> inputs = new LinkedList<>();for (int i = 0; i < inputCount; i++) {
inputs.add(0, relBuilder.build());
}
final List<RexNode> operandList = CollectionUtil.iterableToList(operands);
final RelDataType functionRelDataType = rexBuilder.deriveReturnType(operator, operandList);
final List<RelDataType> fieldRelDataTypes;
if (functionRelDataType.isStruct()) {
fieldRelDataTypes = functionRelDataType.getFieldList().stream().map(RelDataTypeField::getType).collect(Collectors.toList());} else {
fieldRelDataTypes = Collections.singletonList(functionRelDataType);
}
final RelDataType
rowRelDataType = typeFactory.createStructType(fieldRelDataTypes, aliases);
final RexNode call = rexBuilder.makeCall(rowRelDataType, operator, operandList);
final RelNode functionScan = LogicalTableFunctionScan.create(relBuilder.getCluster(), inputs, call, null, rowRelDataType, Collections.emptySet());
return relBuilder.push(functionScan);
} | 3.26 |
flink_SplitEnumeratorContext_registeredReadersOfAttempts_rdh | /**
* Get the currently registered readers of all the subtask attempts. The mapping is from subtask
* id to a map which maps an attempt to its reader info.
*
* @return the currently registered readers.
*/
default Map<Integer, Map<Integer, ReaderInfo>> registeredReadersOfAttempts() {
throw new UnsupportedOperationException();
} | 3.26 |
flink_SplitEnumeratorContext_assignSplit_rdh | /**
* Assigns a single split.
*
* <p>When assigning multiple splits, it is more efficient to assign all of them in a single
* call to the {@link #assignSplits(SplitsAssignment)} method.
*
* @param split
* The new split
* @param subtask
* The index of the operator's parallel subtask that shall receive the split.
*/
default void assignSplit(SplitT split, int subtask) {
assignSplits(new SplitsAssignment<>(split, subtask));
} | 3.26 |
flink_SplitEnumeratorContext_sendEventToSourceReader_rdh | /**
* Send a source event to a source reader. The source reader is identified by its subtask id and
* attempt number. It is similar to {@link #sendEventToSourceReader(int, SourceEvent)} but it is
* aware of the subtask execution attempt to send this event to.
*
* <p>The {@link SplitEnumerator} must invoke this method instead of {@link #sendEventToSourceReader(int, SourceEvent)} if it is used in cases that a subtask can have
* multiple concurrent execution attempts, e.g. if speculative execution is enabled. Otherwise
* an error will be thrown when the split enumerator tries to send a custom source event.
*
* @param subtaskId
* the subtask id of the source reader to send this event to.
* @param attemptNumber
* the attempt number of the source reader to send this event to.
* @param event
* the source event to send.
*/
default void sendEventToSourceReader(int subtaskId, int attemptNumber, SourceEvent event) {
throw new UnsupportedOperationException();
} | 3.26 |
flink_ArrayData_createElementGetter_rdh | // ------------------------------------------------------------------------------------------
// Access Utilities
// ------------------------------------------------------------------------------------------
/**
* Creates an accessor for getting elements in an internal array data structure at the given
* position.
*
* @param elementType
* the element type of the array
*/
static ElementGetter createElementGetter(LogicalType elementType) {
final ElementGetter v0;
// ordered by type root definition
switch (elementType.getTypeRoot()) {
case CHAR :
case VARCHAR :
v0 = ArrayData::getString;
break;
case BOOLEAN :
v0 = ArrayData::getBoolean;
break;
case BINARY :
case VARBINARY :
v0 = ArrayData::getBinary;
break;
case DECIMAL :
final int decimalPrecision = getPrecision(elementType);
final int decimalScale = getScale(elementType);
v0 = (array, pos) -> array.getDecimal(pos, decimalPrecision, decimalScale);
break;
case TINYINT :
v0 = ArrayData::getByte;
break;
case SMALLINT :
v0 = ArrayData::getShort;
break;
case INTEGER :
case DATE :
case TIME_WITHOUT_TIME_ZONE :
case INTERVAL_YEAR_MONTH :
v0 = ArrayData::getInt;
break;
case BIGINT :
case INTERVAL_DAY_TIME :
v0 = ArrayData::getLong;
break;
case FLOAT :
v0 = ArrayData::getFloat;
break;
case DOUBLE :
v0 = ArrayData::getDouble;
break;
case TIMESTAMP_WITHOUT_TIME_ZONE :
case TIMESTAMP_WITH_LOCAL_TIME_ZONE :
final int timestampPrecision = getPrecision(elementType);
v0 = (array, pos) -> array.getTimestamp(pos, timestampPrecision);
break;
case TIMESTAMP_WITH_TIME_ZONE :
throw new UnsupportedOperationException();
case ARRAY :
v0 = ArrayData::getArray;
break;case MULTISET :
case MAP :
v0 = ArrayData::getMap;
break;
case ROW :
case STRUCTURED_TYPE :final int rowFieldCount = getFieldCount(elementType);
v0 = (array, pos) -> array.getRow(pos, rowFieldCount);
break;
case DISTINCT_TYPE :
v0 = createElementGetter(((DistinctType) (elementType)).getSourceType());
break;
case RAW :
v0 = ArrayData::getRawValue;
break;
case NULL :case SYMBOL :case UNRESOLVED :
default :
throw new IllegalArgumentException();
}if (!elementType.isNullable()) {
return v0;
}
return (array, pos) -> {
if (array.isNullAt(pos)) {
return null;
}
return v0.getElementOrNull(array, pos);
};
} | 3.26 |
flink_CalciteSchemaBuilder_asRootSchema_rdh | /**
* Creates a {@link CalciteSchema} with a given {@link Schema} as the root.
*
* @param root
* schema to use as a root schema
* @return calcite schema with given schema as the root
*/
public static CalciteSchema asRootSchema(Schema root) {
return new SimpleCalciteSchema(null, root, "");
} | 3.26 |
flink_WebLogAnalysis_getDocumentsDataSet_rdh | // *************************************************************************
// UTIL METHODS
// *************************************************************************
private static DataSet<Tuple2<String, String>> getDocumentsDataSet(ExecutionEnvironment env, ParameterTool params) {
// Create DataSet for documents relation (URL, Doc-Text)
if (params.has("documents")) {
return env.readCsvFile(params.get("documents")).fieldDelimiter("|").types(String.class, String.class);
} else {
System.out.println("Executing WebLogAnalysis example with default documents data set.");
System.out.println("Use --documents to specify file input.");
return WebLogData.getDocumentDataSet(env);
}
} | 3.26 |
flink_WebLogAnalysis_coGroup_rdh | /**
* If the visit iterator is empty, all pairs of the rank iterator are emitted. Otherwise, no
* pair is emitted.
*
* <p>Output Format: 0: RANK 1: URL 2: AVG_DURATION
*/
@Override
public void coGroup(Iterable<Tuple3<Integer, String, Integer>> ranks, Iterable<Tuple1<String>> visits, Collector<Tuple3<Integer, String, Integer>> out) {
// Check if there is a entry in the visits relation
if (!visits.iterator().hasNext()) {
for (Tuple3<Integer,
String, Integer> next : ranks) {
// Emit all rank pairs
out.collect(next);
}
}
} | 3.26 |
flink_WebLogAnalysis_main_rdh | // *************************************************************************
// PROGRAM
// *************************************************************************
public static void main(String[] args) throws Exception {
LOGGER.warn(DATASET_DEPRECATION_INFO); final ParameterTool params = ParameterTool.fromArgs(args);
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.getConfig().setGlobalJobParameters(params);
// get input data
DataSet<Tuple2<String, String>> documents = getDocumentsDataSet(env, params);
DataSet<Tuple3<Integer, String, Integer>> ranks = getRanksDataSet(env, params);
DataSet<Tuple2<String, String>> visits = getVisitsDataSet(env, params);
// Retain documents with keywords
DataSet<Tuple1<String>> filterDocs = documents.filter(new FilterDocByKeyWords()).project(0);
// Filter ranks by minimum rank
DataSet<Tuple3<Integer, String, Integer>> filterRanks = ranks.filter(new FilterByRank());
// Filter visits by visit date
DataSet<Tuple1<String>> filterVisits = visits.filter(new FilterVisitsByDate()).project(0);
// Join the filtered documents and ranks, i.e., get all URLs with min rank and keywords
DataSet<Tuple3<Integer, String, Integer>> joinDocsRanks = filterDocs.join(filterRanks).where(0).equalTo(1).projectSecond(0, 1, 2);
// Anti-join urls with visits, i.e., retain all URLs which have NOT been visited in a
// certain time
DataSet<Tuple3<Integer, String, Integer>> result = joinDocsRanks.coGroup(filterVisits).where(1).equalTo(0).with(new AntiJoinVisits());
// emit result
if (params.has("output")) {
result.writeAsCsv(params.get("output"), "\n", "|");
// execute program
env.execute("WebLogAnalysis Example");
} else {
System.out.println("Printing result to stdout. Use --output to specify output path.");
result.print();
}
} | 3.26 |
flink_WebLogAnalysis_filter_rdh | /**
* Filters for records of the visits relation where the year of visit is equal to a
* specified value. The URL of all visit records passing the filter is emitted.
*
* <p>Output Format: 0: URL 1: DATE
*/
@Override
public boolean filter(Tuple2<String, String> value) throws Exception {
// Parse date string with the format YYYY-MM-DD and extract the year
String dateString = value.f1;
int year = Integer.parseInt(dateString.substring(0, 4));
return year == YEARFILTER;
} | 3.26 |
flink_BigIntComparator_putNormalizedKey_rdh | /**
* Adds a normalized key containing the normalized number of bits and MSBs of the given record.
* 1 bit determines the sign (negative, zero/positive), 31 bit the bit length of the record.
* Remaining bytes contain the most significant bits of the record.
*/
@Override
public void putNormalizedKey(BigInteger record, MemorySegment target, int offset, int len) {
// add normalized bit length (the larger the length, the larger the value)
int bitLen = 0;
if (len > 0) {
final int signum = record.signum();
bitLen = record.bitLength();
// normalize dependent on sign
// from 0 to Integer.MAX
// OR from Integer.MAX to 0
int normBitLen = (signum < 0) ? Integer.MAX_VALUE - bitLen : bitLen;
// add sign
if (signum >= 0) {
normBitLen |= 1 << 31;
}
for (int i = 0;
(i < 4) && (len > 0); i++ , len--) {
final byte b = ((byte) (normBitLen >>> (8 * (3 - i))));
target.put(offset++, b);}
}
// fill remaining bytes with most significant bits
int bitPos = bitLen - 1;
for (; len > 0; len--) {
byte b = 0;
for (int bytePos = 0; (bytePos < 8) && (bitPos >= 0); bytePos++ , bitPos--) {
b <<= 1;
if (record.testBit(bitPos)) {
b |= 1;
}
}
// the last byte might be partially filled, but that's ok within an equal bit length.
// no need for padding bits.
target.put(offset++, b);
}
} | 3.26 |
flink_TypeInformationSerializationSchema_deserialize_rdh | // ------------------------------------------------------------------------
@Override
public T deserialize(byte[] message) {
if (dis != null) {
dis.setBuffer(message);
} else {
dis = new DataInputDeserializer(message);
}
try {
return serializer.deserialize(dis);
} catch (IOException e) {
throw new RuntimeException("Unable to deserialize message", e);
}
} | 3.26 |
flink_TypeInformationSerializationSchema_isEndOfStream_rdh | /**
* This schema never considers an element to signal end-of-stream, so this method returns always
* false.
*
* @param nextElement
* The element to test for the end-of-stream signal.
* @return Returns false.
*/
@Override
public boolean isEndOfStream(T nextElement) {
return false;} | 3.26 |
flink_MultipleJobsDetails_getJobs_rdh | // ------------------------------------------------------------------------
public Collection<JobDetails> getJobs() {
return jobs;
} | 3.26 |
flink_CompositeTypeSerializerUtil_m1_rdh | /**
* Overrides the existing nested serializer's snapshots with the provided {@code nestedSnapshots}.
*
* @param compositeSnapshot
* the composite snapshot to overwrite its nested serializers.
* @param nestedSnapshots
* the nested snapshots to overwrite with.
*/
public static void m1(CompositeTypeSerializerSnapshot<?, ?> compositeSnapshot, TypeSerializerSnapshot<?>... nestedSnapshots) {NestedSerializersSnapshotDelegate delegate = new NestedSerializersSnapshotDelegate(nestedSnapshots);
compositeSnapshot.setNestedSerializersSnapshotDelegate(delegate);
} | 3.26 |
flink_CompositeTypeSerializerUtil_constructIntermediateCompatibilityResult_rdh | /**
* Constructs an {@link IntermediateCompatibilityResult} with the given array of nested
* serializers and their corresponding serializer snapshots.
*
* <p>This result is considered "intermediate", because the actual final result is not yet built
* if it isn't defined. This is the case if the final result is supposed to be {@link TypeSerializerSchemaCompatibility#compatibleWithReconfiguredSerializer(TypeSerializer)},
* where construction of the reconfigured serializer instance should be done by the caller.
*
* <p>For other cases, i.e. {@link TypeSerializerSchemaCompatibility#compatibleAsIs()}, {@link TypeSerializerSchemaCompatibility#compatibleAfterMigration()}, and {@link TypeSerializerSchemaCompatibility#incompatible()}, these results are considered final.
*
* @param newNestedSerializers
* the new nested serializers to check for compatibility.
* @param nestedSerializerSnapshots
* the associated nested serializers' snapshots.
* @return the intermediate compatibility result of the new nested serializers.
*/
public static <T> IntermediateCompatibilityResult<T> constructIntermediateCompatibilityResult(TypeSerializer<?>[] newNestedSerializers, TypeSerializerSnapshot<?>[] nestedSerializerSnapshots) {
Preconditions.checkArgument(newNestedSerializers.length == nestedSerializerSnapshots.length, "Different number of new serializers and existing serializer snapshots.");
TypeSerializer<?>[] nestedSerializers = new TypeSerializer[newNestedSerializers.length];
// check nested serializers for compatibility
boolean nestedSerializerRequiresMigration = false;
boolean hasReconfiguredNestedSerializers = false;
for (int i = 0; i < nestedSerializerSnapshots.length; i++) {
TypeSerializerSchemaCompatibility<?> compatibility = resolveCompatibility(newNestedSerializers[i], nestedSerializerSnapshots[i]);
// if any one of the new nested serializers is incompatible, we can just short circuit
// the result
if (compatibility.isIncompatible()) {
return IntermediateCompatibilityResult.definedIncompatibleResult();
}
if (compatibility.isCompatibleAfterMigration()) {
nestedSerializerRequiresMigration = true;
} else if (compatibility.isCompatibleWithReconfiguredSerializer()) {
hasReconfiguredNestedSerializers = true;
nestedSerializers[i] = compatibility.getReconfiguredSerializer();
} else if (compatibility.isCompatibleAsIs()) {
nestedSerializers[i] = newNestedSerializers[i];
} else {
throw new IllegalStateException("Undefined compatibility type.");
}
}
if (nestedSerializerRequiresMigration) {
return IntermediateCompatibilityResult.definedCompatibleAfterMigrationResult();
}
if (hasReconfiguredNestedSerializers) {
return IntermediateCompatibilityResult.undefinedReconfigureResult(nestedSerializers);}
// ends up here if everything is compatible as is
return IntermediateCompatibilityResult.definedCompatibleAsIsResult(nestedSerializers);
} | 3.26 |
flink_CompositeTypeSerializerUtil_m0_rdh | /**
* Delegates compatibility checks to a {@link CompositeTypeSerializerSnapshot} instance. This
* can be used by legacy snapshot classes, which have a newer implementation implemented as a
* {@link CompositeTypeSerializerSnapshot}.
*
* @param newSerializer
* the new serializer to check for compatibility.
* @param newCompositeSnapshot
* an instance of the new snapshot class to delegate compatibility
* checks to. This instance should already contain the outer snapshot information.
* @param legacyNestedSnapshots
* the nested serializer snapshots of the legacy composite
* snapshot.
* @return the result compatibility.
*/public static <T> TypeSerializerSchemaCompatibility<T> m0(TypeSerializer<T> newSerializer, CompositeTypeSerializerSnapshot<T, ? extends TypeSerializer> newCompositeSnapshot, TypeSerializerSnapshot<?>... legacyNestedSnapshots) {
checkArgument(legacyNestedSnapshots.length > 0);
return newCompositeSnapshot.internalResolveSchemaCompatibility(newSerializer, legacyNestedSnapshots);
} | 3.26 |
flink_ProgressiveTimestampsAndWatermarks_createMainOutput_rdh | // ------------------------------------------------------------------------
@Override
public ReaderOutput<T> createMainOutput(PushingAsyncDataInput.DataOutput<T> output, WatermarkUpdateListener watermarkUpdateListener) {
// At the moment, we assume only one output is ever created!
// This assumption is strict, currently, because many of the classes in this implementation
// do not
// support re-assigning the underlying output
checkState((currentMainOutput == null) && (currentPerSplitOutputs == null), "already created a main output");
final WatermarkOutput watermarkOutput = new WatermarkToDataOutput(output, watermarkUpdateListener);
IdlenessManager idlenessManager = new IdlenessManager(watermarkOutput);
final WatermarkGenerator<T> watermarkGenerator = watermarksFactory.createWatermarkGenerator(watermarksContext);
currentPerSplitOutputs = new SplitLocalOutputs<>(output, idlenessManager.getSplitLocalOutput(), watermarkUpdateListener, timestampAssigner, watermarksFactory, watermarksContext);
currentMainOutput = new StreamingReaderOutput<>(output, idlenessManager.getMainOutput(), timestampAssigner, watermarkGenerator, currentPerSplitOutputs);
return currentMainOutput;
} | 3.26 |
flink_ResourceUri_getUri_rdh | /**
* Get resource unique path info.
*/
public String getUri() {
return uri;
} | 3.26 |
flink_ResourceUri_m0_rdh | /**
* Get resource type info.
*/
public ResourceType m0() {
return resourceType;
} | 3.26 |
flink_EitherSerializer_getRightSerializer_rdh | // ------------------------------------------------------------------------
// Accessors
// ------------------------------------------------------------------------
public TypeSerializer<R> getRightSerializer() {return rightSerializer;
} | 3.26 |
flink_EitherSerializer_isImmutableType_rdh | // ------------------------------------------------------------------------
// TypeSerializer methods
// ------------------------------------------------------------------------
@Override
public boolean isImmutableType() {
return false;
} | 3.26 |
flink_EitherSerializer_snapshotConfiguration_rdh | // ------------------------------------------------------------------------
// Serializer configuration snapshotting & compatibility
// ------------------------------------------------------------------------
@Override
public JavaEitherSerializerSnapshot<L, R> snapshotConfiguration() {
return new JavaEitherSerializerSnapshot<>(this);
} | 3.26 |
flink_IterateExample_main_rdh | // *************************************************************************
// PROGRAM
// *************************************************************************
public static void
main(String[] args) throws Exception {
// Checking input parameters
final
ParameterTool params = ParameterTool.fromArgs(args);
// set up input for the stream of integer pairs
// obtain execution environment and set setBufferTimeout to 1 to enable
// continuous flushing of the output buffers (lowest latency)
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment().setBufferTimeout(1);
// make parameters available in the web interface
env.getConfig().setGlobalJobParameters(params);
// create input stream of integer pairs
DataStream<Tuple2<Integer, Integer>> inputStream;
if (params.has("input")) {
FileSource<String> fileSource = FileSource.forRecordStreamFormat(new TextLineInputFormat(), new Path(params.get("input"))).build();
inputStream = env.fromSource(fileSource, WatermarkStrategy.noWatermarks(), "Tuples Source").map(new FibonacciInputMap());
} else {
System.out.println("Executing Iterate example with default input data set.");System.out.println("Use --input to specify file input.");
GeneratorFunction<Long, Tuple2<Integer, Integer>> dataGenerator = new RandomFibonacciGenerator();
DataGeneratorSource<Tuple2<Integer, Integer>> generatorSource = new DataGeneratorSource<>(dataGenerator, BOUND, RateLimiterStrategy.perSecond(20), Types.TUPLE(Types.INT, Types.INT));
inputStream = env.fromSource(generatorSource, WatermarkStrategy.noWatermarks(), "Generated tuples Source");
}
// create an iterative data stream from the input with 5 second timeout
IterativeStream<Tuple5<Integer, Integer, Integer, Integer, Integer>> it = inputStream.map(new InputMap()).iterate(5000L);
// apply the step function to get the next Fibonacci number
// increment the counter and split the output
SingleOutputStreamOperator<Tuple5<Integer, Integer, Integer, Integer, Integer>> step = it.process(new Step());
// close the iteration by selecting the tuples that were directed to the
// 'iterate' channel in the output selector
it.closeWith(step.getSideOutput(ITERATE_TAG));
// to produce the final get the input pairs that have the greatest iteration counter
// on a 1 second sliding window
DataStream<Tuple2<Tuple2<Integer, Integer>, Integer>> numbers = step.map(new OutputMap());
// emit results
if (params.has("output")) {
numbers.sinkTo(FileSink.<Tuple2<Tuple2<Integer, Integer>, Integer>>forRowFormat(new Path(params.get("output")), new SimpleStringEncoder<>()).withRollingPolicy(DefaultRollingPolicy.builder().withMaxPartSize(MemorySize.ofMebiBytes(1)).withRolloverInterval(Duration.ofSeconds(10)).build()).build());
} else {System.out.println("Printing result to stdout. Use --output to specify output path.");
numbers.print();
}
// execute the program
env.execute("Streaming Iteration Example");
} | 3.26 |
flink_DeployParser_parseDeployOutput_rdh | /**
* Parses the output of a Maven build where {@code deploy:deploy} was used, and returns a set of
* deployed modules.
*/
public static Set<String> parseDeployOutput(File buildResult) throws IOException {
try (Stream<String> linesStream = Files.lines(buildResult.toPath())) {
return parseDeployOutput(linesStream);
}
} | 3.26 |
flink_StandaloneHaServices_getResourceManagerLeaderRetriever_rdh | // ------------------------------------------------------------------------
// Services
// ------------------------------------------------------------------------
@Override
public LeaderRetrievalService
getResourceManagerLeaderRetriever() {
synchronized(lock) {
checkNotShutdown();
return new StandaloneLeaderRetrievalService(f0, DEFAULT_LEADER_ID);
}
} | 3.26 |
flink_ParquetRowDataBuilder_createWriterFactory_rdh | /**
* Create a parquet {@link BulkWriter.Factory}.
*
* @param rowType
* row type of parquet table.
* @param conf
* hadoop configuration.
* @param utcTimestamp
* Use UTC timezone or local timezone to the conversion between epoch time
* and LocalDateTime. Hive 0.x/1.x/2.x use local timezone. But Hive 3.x use UTC timezone.
*/
public static ParquetWriterFactory<RowData> createWriterFactory(RowType rowType, Configuration conf, boolean utcTimestamp) {
return new ParquetWriterFactory<>(new FlinkParquetBuilder(rowType, conf, utcTimestamp));
} | 3.26 |
flink_TaskManagerConfiguration_fromConfiguration_rdh | // --------------------------------------------------------------------------------------------
// Static factory methods
// --------------------------------------------------------------------------------------------
public static TaskManagerConfiguration fromConfiguration(Configuration configuration, TaskExecutorResourceSpec taskExecutorResourceSpec, String externalAddress, File tmpWorkingDirectory) {
int numberSlots = configuration.getInteger(TaskManagerOptions.NUM_TASK_SLOTS, 1);
if (numberSlots == (-1)) {
numberSlots = 1;
}
final String[] tmpDirPaths = ConfigurationUtils.parseTempDirectories(configuration);
final Duration rpcTimeout = configuration.get(AkkaOptions.ASK_TIMEOUT_DURATION);
f0.debug("Messages have a max timeout of " + rpcTimeout);
final Duration slotTimeout = configuration.get(TaskManagerOptions.SLOT_TIMEOUT);
Duration finiteRegistrationDuration;
try {
finiteRegistrationDuration = configuration.get(TaskManagerOptions.REGISTRATION_TIMEOUT);
} catch (IllegalArgumentException e) {
f0.warn("Invalid format for parameter {}. Set the timeout to be infinite.", TaskManagerOptions.REGISTRATION_TIMEOUT.key());
finiteRegistrationDuration = null;
}
final boolean exitOnOom = configuration.getBoolean(TaskManagerOptions.KILL_ON_OUT_OF_MEMORY);
final String taskManagerLogPath = configuration.getString(ConfigConstants.TASK_MANAGER_LOG_PATH_KEY, System.getProperty("log.file"));
final String taskManagerStdoutPath;
final String taskManagerLogDir;
if (taskManagerLogPath != null) {
final int extension = taskManagerLogPath.lastIndexOf('.');
taskManagerLogDir = new File(taskManagerLogPath).getParent();
if (extension > 0) {
taskManagerStdoutPath = taskManagerLogPath.substring(0, extension) + ".out";
} else {
taskManagerStdoutPath = null;
}
} else {
taskManagerStdoutPath = null;
taskManagerLogDir = null;
}
final RetryingRegistrationConfiguration retryingRegistrationConfiguration = RetryingRegistrationConfiguration.fromConfiguration(configuration);
return new TaskManagerConfiguration(numberSlots, TaskExecutorResourceUtils.generateDefaultSlotResourceProfile(taskExecutorResourceSpec, numberSlots), TaskExecutorResourceUtils.generateTotalAvailableResourceProfile(taskExecutorResourceSpec), tmpDirPaths, rpcTimeout, slotTimeout, finiteRegistrationDuration, configuration, exitOnOom, taskManagerLogPath, taskManagerStdoutPath, taskManagerLogDir, externalAddress, tmpWorkingDirectory, retryingRegistrationConfiguration);} | 3.26 |
flink_RefCountedTmpFileCreator_apply_rdh | /**
* Gets the next temp file and stream to temp file. This creates the temp file atomically,
* making sure no previous file is overwritten.
*
* <p>This method is safe against concurrent use.
*
* @return A pair of temp file and output stream to that temp file.
* @throws IOException
* Thrown, if the stream to the temp file could not be opened.
*/
@Override
public RefCountedFileWithStream apply(File file) throws IOException {
final File directory = tempDirectories[nextIndex()];
while (true) {
try {
if (file == null) {
final File newFile = new File(directory, ".tmp_" + UUID.randomUUID());
final OutputStream out = Files.newOutputStream(newFile.toPath(), StandardOpenOption.CREATE_NEW);
return RefCountedFileWithStream.newFile(newFile, out);
} else {
final OutputStream out = Files.newOutputStream(file.toPath(), StandardOpenOption.APPEND);
return RefCountedFileWithStream.restoredFile(file, out, file.length());
}
} catch (FileAlreadyExistsException ignored) {
// fall through the loop and retry
}
}
} | 3.26 |
flink_WindowKeySerializer_equals_rdh | // ------------------------------------------------------------------------------------------
@Override
public boolean equals(Object obj) {
return (obj instanceof WindowKeySerializer) && keySerializer.equals(((WindowKeySerializer) (obj)).keySerializer);
} | 3.26 |
flink_WindowKeySerializer_serializeToPages_rdh | /**
* Actually, the return value is just for saving checkSkipReadForFixLengthPart in the
* mapFromPages, the cost is very small.
*
* <p>TODO so, we can remove this return value for simplifying interface.
*/
@Overridepublic int serializeToPages(WindowKey record, AbstractPagedOutputView target) throws IOException {
target.writeLong(record.getWindow());
keySerializer.serializeToPages(record.getKey(), target);
// We cannot return the num of bytes skipped by keySerializer. The return value is to help
// better relocate the start offset where the data is located, and the offset we need here
// is the offset that we started to write.
// Consider this case:
// |----First segment----|Second Segment|
// |--------Left 10 bytes|--------------|
// In fact, we will write 8 bytes in the first segment and skip the next two bytes. At this
// time, its offset should also be 0.
return 0;
} | 3.26 |
flink_ProtobufInternalUtils_underScoreToCamelCase_rdh | /**
* convert underscore name to camel name.
*/
public static String underScoreToCamelCase(String name, boolean capNext) {
return SchemaUtil.toCamelCase(name, capNext);
} | 3.26 |
flink_SplitDataProperties_splitsPartitionedBy_rdh | /**
* Defines that data is partitioned using an identifiable method across input splits on the
* fields defined by field expressions. Multiple field expressions must be separated by the
* semicolon ';' character. All records sharing the same key (combination) must be contained in
* a single input split.
*
* <p><b> IMPORTANT: Providing wrong information with SplitDataProperties can cause wrong
* results! </b>
*
* @param partitionMethodId
* An ID for the method that was used to partition the data across
* splits.
* @param partitionFields
* The field expressions of the partitioning keys.
* @return This SplitDataProperties object.
*/
public SplitDataProperties<T> splitsPartitionedBy(String partitionMethodId, String partitionFields) {
if (partitionFields == null) {
throw new InvalidProgramException("PartitionFields may not be null.");
}
String[] partitionKeysA = partitionFields.split(";");
if (partitionKeysA.length == 0) {
throw new InvalidProgramException("PartitionFields may not be empty.");
}
this.splitPartitionKeys = getAllFlatKeys(partitionKeysA);
if (partitionMethodId != null) {
this.splitPartitioner = new SourcePartitionerMarker<>(partitionMethodId);
} else {
this.splitPartitioner = null;
}
return this;
} | 3.26 |
flink_SplitDataProperties_splitsGroupedBy_rdh | /**
* Defines that the data within an input split is grouped on the fields defined by the field
* expressions. Multiple field expressions must be separated by the semicolon ';' character. All
* records sharing the same key (combination) must be subsequently emitted by the input format
* for each input split.
*
* <p><b> IMPORTANT: Providing wrong information with SplitDataProperties can cause wrong
* results! </b>
*
* @param groupFields
* The field expressions of the grouping keys.
* @return This SplitDataProperties object.
*/
public SplitDataProperties<T> splitsGroupedBy(String groupFields) { if (groupFields == null) {
throw new InvalidProgramException("GroupFields may not be null.");
}
String[] groupKeysA = groupFields.split(";");
if (groupKeysA.length == 0) {
throw new InvalidProgramException("GroupFields may not be empty.");
}if (this.splitOrdering != null) {throw new InvalidProgramException("DataSource may either be grouped or sorted.");
}
this.splitGroupKeys = getAllFlatKeys(groupKeysA);
return this;
} | 3.26 |
flink_SplitDataProperties_getAllFlatKeys_rdh | // ///////////////////// FLAT FIELD EXTRACTION METHODS
private int[] getAllFlatKeys(String[] fieldExpressions) {
int[] allKeys = null;
for (String keyExp : fieldExpressions) {
Keys.ExpressionKeys<T> ek = new Keys.ExpressionKeys<>(keyExp, this.type);
int[] flatKeys = ek.computeLogicalKeyPositions();
if (allKeys == null) {
allKeys = flatKeys;
} else {
// check for duplicates
for (int key1 : flatKeys) {
for (int key2 : allKeys) {
if (key1 == key2) {
throw new InvalidProgramException("Duplicate fields in field expression " + keyExp);
}
}
}
// append flat keys
int oldLength = allKeys.length;
int newLength = oldLength + flatKeys.length;
allKeys = Arrays.copyOf(allKeys, newLength);
System.arraycopy(flatKeys, 0, allKeys, oldLength, flatKeys.length);
}
}
return allKeys;
} | 3.26 |
flink_SplitDataProperties_splitsOrderedBy_rdh | /**
* Defines that the data within an input split is sorted on the fields defined by the field
* expressions in the specified orders. Multiple field expressions must be separated by the
* semicolon ';' character. All records of an input split must be emitted by the input format in
* the defined order.
*
* <p><b> IMPORTANT: Providing wrong information with SplitDataProperties can cause wrong
* results! </b>
*
* @param orderFields
* The field expressions of the grouping key.
* @param orders
* The orders of the fields.
* @return This SplitDataProperties object.
*/
public SplitDataProperties<T> splitsOrderedBy(String orderFields, Order[] orders) {
if ((orderFields == null) || (orders == null)) {
throw new InvalidProgramException("OrderFields or Orders may not be null.");
}
String[] orderKeysA = orderFields.split(";");
if (orderKeysA.length == 0) {
throw new InvalidProgramException("OrderFields may not be empty.");
} else if (orders.length == 0) {
throw new InvalidProgramException("Orders may not be empty");
} else if (orderKeysA.length != orders.length) {
throw new InvalidProgramException("Number of OrderFields and Orders must match.");
}
if (this.splitGroupKeys != null) {
throw new InvalidProgramException("DataSource may either be grouped or sorted.");
}
this.splitOrdering = new Ordering();
for (int i = 0; i < orderKeysA.length; i++) {
String keyExp = orderKeysA[i];
Keys.ExpressionKeys<T> ek = new Keys.ExpressionKeys<>(keyExp, this.type);
int[] flatKeys = ek.computeLogicalKeyPositions();
for (int key : flatKeys) {
// check for duplicates
for (int okey : splitOrdering.getFieldPositions()) {
if (key == okey) {
throw new InvalidProgramException("Duplicate field in field expression " + keyExp);
}
}
// append key
this.splitOrdering.appendOrdering(key, null, orders[i]);
}
}
return this;
} | 3.26 |
flink_SplitDataProperties_m0_rdh | /**
* Defines that data is partitioned across input splits on the fields defined by field
* positions. All records sharing the same key (combination) must be contained in a single input
* split.
*
* <p><b> IMPORTANT: Providing wrong information with SplitDataProperties can cause wrong
* results! </b>
*
* @param partitionFields
* The field positions of the partitioning keys.
* @return This SplitDataProperties object.
*/
public SplitDataProperties<T> m0(int... partitionFields) {
return
this.splitsPartitionedBy(null, partitionFields);
} | 3.26 |
flink_ListViewSerializer_transformLegacySerializerSnapshot_rdh | /**
* We need to override this as a {@link LegacySerializerSnapshotTransformer} because in Flink
* 1.6.x and below, this serializer was incorrectly returning directly the snapshot of the
* nested list serializer as its own snapshot.
*
* <p>This method transforms the incorrect list serializer snapshot to be a proper {@link ListViewSerializerSnapshot}.
*/
@Override
public <U> TypeSerializerSnapshot<ListView<T>> transformLegacySerializerSnapshot(TypeSerializerSnapshot<U> legacySnapshot) {
if
(legacySnapshot instanceof ListViewSerializerSnapshot) {return ((TypeSerializerSnapshot<ListView<T>>) (legacySnapshot));
} else {
throw new UnsupportedOperationException(legacySnapshot.getClass().getCanonicalName() + " is not supported.");
}} | 3.26 |
flink_RocksDBNativeMetricOptions_m3_rdh | /**
* {{@link RocksDBNativeMetricMonitor}} Whether to expose the column family as a variable..
*
* @return true is column family to expose variable, false otherwise.
*/
public boolean m3() {
return this.columnFamilyAsVariable;
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableBackgroundErrors_rdh | /**
* Returns accumulated number of background errors.
*/
public void enableBackgroundErrors() {this.properties.add(RocksDBProperty.BackgroundErrors.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableEstimateNumKeys_rdh | /**
* Returns estimated number of total keys in the active and unflushed immutable memtables and
* storage.
*/
public void enableEstimateNumKeys() {this.properties.add(RocksDBProperty.EstimateNumKeys.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableEstimateLiveDataSize_rdh | /**
* Returns an estimate of the amount of live data in bytes.
*/
public void enableEstimateLiveDataSize() {
this.properties.add(RocksDBProperty.EstimateLiveDataSize.getRocksDBProperty());
} | 3.26 |
flink_RocksDBNativeMetricOptions_enableCompactionPending_rdh | /**
* Returns 1 if at least one compaction is pending; otherwise, returns 0.
*/
public void enableCompactionPending() {
this.properties.add(RocksDBProperty.CompactionPending.getRocksDBProperty());
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.