name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_AuditingIntegration_enterStageWorker | /**
* Update the thread context with the stage name and
* job ID.
* This MUST be invoked at the start of methods invoked in helper threads,
* to ensure that they are all annotated with job and stage.
* @param jobId job ID.
* @param stage stage name.
*/
public static void enterStageWorker(String jobId, String stage) {
CommonAuditContext context = currentAuditContext();
context.put(PARAM_JOB_ID, jobId);
context.put(CONTEXT_ATTR_STAGE, stage);
} | 3.68 |
streampipes_IgnoreBlocksAfterContentFilter_getDefaultInstance | /**
* Returns the singleton instance for DeleteBlocksAfterContentFilter.
*/
public static IgnoreBlocksAfterContentFilter getDefaultInstance() {
return DEFAULT_INSTANCE;
} | 3.68 |
framework_AbsoluteLayout_getTopValue | /**
* Gets the 'top' attributes value in current units.
*
* @see #getTopUnits()
* @return The value of the 'top' attribute, null if not set
*/
public Float getTopValue() {
return topValue;
} | 3.68 |
AreaShop_FileManager_postUpdateFiles | /**
* Checks for old file formats and converts them to the latest format.
* This is to be triggered after the load of the region files.
*/
private void postUpdateFiles() {
Integer fileStatus = versions.get(AreaShop.versionFiles);
// If the the files are already the current version
if(fileStatus != null && fileStatus == AreaShop.versionFilesCurrent) {
return;
}
// Add 'general.lastActive' to rented/bought regions (initialize at current time)
if(fileStatus == null || fileStatus < 3) {
for(GeneralRegion region : getRegions()) {
region.updateLastActiveTime();
}
// Update versions file to 3
versions.put(AreaShop.versionFiles, 3);
saveVersions();
if(!getRegions().isEmpty()) {
AreaShop.info(" Added last active time to regions (v2 to v3)");
}
}
} | 3.68 |
flink_CopyOnWriteStateMap_removeEntry | /** Helper method that is the basis for operations that remove mappings. */
private StateMapEntry<K, N, S> removeEntry(K key, N namespace) {
final int hash = computeHashForOperationAndDoIncrementalRehash(key, namespace);
final StateMapEntry<K, N, S>[] tab = selectActiveTable(hash);
int index = hash & (tab.length - 1);
for (StateMapEntry<K, N, S> e = tab[index], prev = null; e != null; prev = e, e = e.next) {
if (e.hash == hash && key.equals(e.key) && namespace.equals(e.namespace)) {
if (prev == null) {
tab[index] = e.next;
} else {
// copy-on-write check for entry
if (prev.entryVersion < highestRequiredSnapshotVersion) {
prev = handleChainedEntryCopyOnWrite(tab, index, prev);
}
prev.next = e.next;
}
++modCount;
if (tab == primaryTable) {
--primaryTableSize;
} else {
--incrementalRehashTableSize;
}
return e;
}
}
return null;
} | 3.68 |
morf_DatabaseSchemaManager_mutateToSupportSchema | /**
* Mutates the current database schema so that it supports the one requested.
*
* <p>When this method returns, it guarantees that all the tables in {code schema} are
* present in the database and also empty.</p>
*
* <p>Note it does not guarantee that no other tables exist.</p>
*
* @param schema The schema which the database should support
* @param truncationBehavior The behaviour to use when an existing table is found. Should it be truncated?
*/
public void mutateToSupportSchema(Schema schema, TruncationBehavior truncationBehavior) {
ProducerCache producerCache = new ProducerCache();
try {
Collection<String> tableStatements = ensureTablesExist(schema, truncationBehavior, producerCache);
if (!tableStatements.isEmpty()) {
viewsDeployedByThis.get().clear(); // this will force a drop and redeploy, needed in case the views are affected.
}
// Drop all views in the schema and create the ones we need.
// note that if this class deployed the view already, then leave it alone as it means the view must be based on the current definition
Collection<View> viewsToDrop = viewCache(producerCache).values().stream().filter(v->!viewsDeployedByThis.get().contains(v.getName().toUpperCase())).collect(toList());
Collection<View> viewToDeploy = schema.views().stream().filter(v->!viewsDeployedByThis.get().contains(v.getName().toUpperCase())).collect(toList());;
ViewChanges changes = new ViewChanges(
schema.views(),
viewsToDrop,
viewToDeploy
);
Collection<String> sql = Lists.newLinkedList();
for (View view : changes.getViewsToDeploy()) {
sql.addAll(dropTableIfPresent(producerCache, view.getName()));
}
for (View view : changes.getViewsToDrop()) {
sql.addAll(dropViewIfExists(view));
}
sql.addAll(tableStatements);
for (View view: changes.getViewsToDeploy()) {
sql.addAll(deployView(view));
}
executeScript(sql);
} catch (RuntimeException e) {
if (log.isDebugEnabled()) {
log.debug("Invalidating cache. Exception while mutating schema.");
}
invalidateCache();
throw e;
} finally {
producerCache.close();
}
} | 3.68 |
dubbo_Converter_getTargetType | /**
* Get the target type
*
* @return non-null
*/
default Class<T> getTargetType() {
return findActualTypeArgument(getClass(), Converter.class, 1);
} | 3.68 |
framework_TreeTable_addExpandListener | /**
* Adds an expand listener.
*
* @param listener
* the Listener to be added.
*/
public void addExpandListener(ExpandListener listener) {
addListener(ExpandEvent.class, listener, ExpandListener.EXPAND_METHOD);
} | 3.68 |
hbase_PrivateCellUtil_getValueAsBigDecimal | /**
* Converts the value bytes of the given cell into a BigDecimal
* @return value as BigDecimal
*/
public static BigDecimal getValueAsBigDecimal(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.toBigDecimal(((ByteBufferExtendedCell) cell).getValueByteBuffer(),
((ByteBufferExtendedCell) cell).getValuePosition(), cell.getValueLength());
}
return Bytes.toBigDecimal(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
} | 3.68 |
framework_ScrollbarBundle_truncate | /**
* Truncates a double such that no decimal places are retained.
* <p>
* E.g. {@code trunc(2.3d) == 2.0d} and {@code trunc(-2.3d) == -2.0d}.
*
* @param num
* the double value to be truncated
* @return the {@code num} value without any decimal digits
*/
private static double truncate(double num) {
if (num > 0) {
return Math.floor(num);
} else {
return Math.ceil(num);
}
} | 3.68 |
hbase_RollingStatCalculator_insertDataValue | /**
* Inserts given data value to array of data values to be considered for statistics calculation
*/
public void insertDataValue(long data) {
// if current number of data points already equals rolling period and rolling period is
// non-zero then remove one data and update the statistics
if (numberOfDataValues >= rollingPeriod && rollingPeriod > 0) {
this.removeData(dataValues[currentIndexPosition]);
}
numberOfDataValues++;
currentSum = currentSum + (double) data;
currentSqrSum = currentSqrSum + ((double) data * data);
if (rollingPeriod > 0) {
dataValues[currentIndexPosition] = data;
currentIndexPosition = (currentIndexPosition + 1) % rollingPeriod;
}
} | 3.68 |
framework_VaadinPortletRequest_getPortletPreference | /**
* Reads a portlet preference from the portlet of the request.
*
* @param name
* The name of the portlet preference. Cannot be
* <code>null</code>.
*
* @return The value of the portlet preference, <code>null</code> if the
* preference is not defined.
*/
public String getPortletPreference(String name) {
PortletRequest request = getRequest();
PortletPreferences preferences = request.getPreferences();
return preferences.getValue(name, null);
} | 3.68 |
hadoop_S3ListRequest_v1 | /**
* Restricted constructors to ensure v1 or v2, not both.
* @param request v1 request
* @return new list request container
*/
public static S3ListRequest v1(ListObjectsRequest request) {
return new S3ListRequest(request, null);
} | 3.68 |
framework_VFilterSelect_setPromptingOff | /**
* Turns prompting off. When prompting is turned on a command prompt is
* shown in the text box if nothing has been entered.
* <p>
* For internal use only. May be removed or replaced in the future.
*
* @param text
* The text the text box should contain.
*/
public void setPromptingOff(String text) {
debug("VFS: setPromptingOff()");
setTextboxText(text);
if (prompting) {
prompting = false;
removeStyleDependentName(CLASSNAME_PROMPT);
}
} | 3.68 |
hbase_CloseChecker_isTimeLimit | /**
* Check periodically to see if a system stop is requested every time.
* @return if true, system stop.
*/
public boolean isTimeLimit(Store store, long now) {
if (closeCheckTimeLimit <= 0) {
return false;
}
final long elapsedMillis = now - lastCloseCheckMillis;
if (elapsedMillis <= closeCheckTimeLimit) {
return false;
}
lastCloseCheckMillis = now;
return !store.areWritesEnabled();
} | 3.68 |
hbase_StorageClusterStatusModel_setDeadNodes | /**
* @param nodes the list of dead node names
*/
public void setDeadNodes(List<String> nodes) {
this.deadNodes = nodes;
} | 3.68 |
hadoop_AbstractS3ACommitter_warnOnActiveUploads | /**
* Scan for active uploads and list them along with a warning message.
* Errors are ignored.
* @param path output path of job.
*/
protected void warnOnActiveUploads(final Path path) {
List<MultipartUpload> pending;
try {
pending = getCommitOperations()
.listPendingUploadsUnderPath(path);
} catch (IOException e) {
LOG.debug("Failed to list uploads under {}",
path, e);
return;
}
if (!pending.isEmpty()) {
// log a warning
LOG.warn("{} active upload(s) in progress under {}",
pending.size(),
path);
LOG.warn("Either jobs are running concurrently"
+ " or failed jobs are not being cleaned up");
// and the paths + timestamps
DateFormat df = DateFormat.getDateTimeInstance();
pending.forEach(u ->
LOG.info("[{}] {}",
df.format(Date.from(u.initiated())),
u.key()));
if (shouldAbortUploadsInCleanup()) {
LOG.warn("This committer will abort these uploads in job cleanup");
}
}
} | 3.68 |
flink_CompactingHashTable_compactPartition | /**
* Compacts (garbage collects) partition with copy-compact strategy using compaction partition
*
* @param partitionNumber partition to compact
* @throws IOException
*/
private void compactPartition(final int partitionNumber) throws IOException {
// do nothing if table was closed, parameter is invalid or no garbage exists
if (this.closed
|| partitionNumber >= this.partitions.size()
|| this.partitions.get(partitionNumber).isCompacted()) {
return;
}
// release all segments owned by compaction partition
this.compactionMemory.clearAllMemory(availableMemory);
this.compactionMemory.allocateSegments(1);
this.compactionMemory.pushDownPages();
T tempHolder = this.buildSideSerializer.createInstance();
final int numPartitions = this.partitions.size();
InMemoryPartition<T> partition = this.partitions.remove(partitionNumber);
MemorySegment[] overflowSegments = partition.overflowSegments;
long pointer;
int pointerOffset;
int bucketOffset;
final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;
for (int i = 0, bucket = partitionNumber;
i < this.buckets.length && bucket < this.numBuckets;
i++) {
MemorySegment segment = this.buckets[i];
// go over all buckets in the segment belonging to the partition
for (int k = bucket % bucketsPerSegment;
k < bucketsPerSegment && bucket < this.numBuckets;
k += numPartitions, bucket += numPartitions) {
bucketOffset = k * HASH_BUCKET_SIZE;
if ((int) segment.get(bucketOffset + HEADER_PARTITION_OFFSET) != partitionNumber) {
throw new IOException(
"Accessed wrong bucket! wanted: "
+ partitionNumber
+ " got: "
+ segment.get(bucketOffset + HEADER_PARTITION_OFFSET));
}
// loop over all segments that are involved in the bucket (original bucket plus
// overflow buckets)
int countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
int numInSegment = 0;
pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
while (true) {
while (numInSegment < countInSegment) {
pointer = segment.getLong(pointerOffset);
tempHolder = partition.readRecordAt(pointer, tempHolder);
pointer = this.compactionMemory.appendRecord(tempHolder);
segment.putLong(pointerOffset, pointer);
pointerOffset += POINTER_LEN;
numInSegment++;
}
// this segment is done. check if there is another chained bucket
final long forwardPointer =
segment.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
break;
}
final int overflowSegNum = (int) (forwardPointer >>> 32);
segment = overflowSegments[overflowSegNum];
bucketOffset = (int) forwardPointer;
countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
numInSegment = 0;
}
segment = this.buckets[i];
}
}
// swap partition with compaction partition
this.compactionMemory.setPartitionNumber(partitionNumber);
this.partitions.add(partitionNumber, compactionMemory);
this.partitions.get(partitionNumber).overflowSegments = partition.overflowSegments;
this.partitions.get(partitionNumber).numOverflowSegments = partition.numOverflowSegments;
this.partitions.get(partitionNumber).nextOverflowBucket = partition.nextOverflowBucket;
this.partitions.get(partitionNumber).setIsCompacted(true);
// this.partitions.get(partitionNumber).pushDownPages();
this.compactionMemory = partition;
this.compactionMemory.resetRecordCounter();
this.compactionMemory.setPartitionNumber(-1);
this.compactionMemory.overflowSegments = null;
this.compactionMemory.numOverflowSegments = 0;
this.compactionMemory.nextOverflowBucket = 0;
// try to allocate maximum segment count
this.compactionMemory.clearAllMemory(this.availableMemory);
int maxSegmentNumber = this.getMaxPartition();
this.compactionMemory.allocateSegments(maxSegmentNumber);
this.compactionMemory.resetRWViews();
this.compactionMemory.pushDownPages();
} | 3.68 |
dubbo_FileSystemDynamicConfiguration_detectPoolingBasedWatchService | /**
* Detect the argument of {@link WatchService} is based on {@linkplain sun.nio.fs.PollingWatchService}
* or not.
* <p>
* Some platforms do not provide the native implementation of {@link WatchService}, just use
* {@linkplain sun.nio.fs.PollingWatchService} in periodic poll file modifications.
*
* @param watchService the instance of {@link WatchService}
* @return if based, return <code>true</code>, or <code>false</code>
*/
private static boolean detectPoolingBasedWatchService(Optional<WatchService> watchService) {
String className =
watchService.map(Object::getClass).map(Class::getName).orElse(null);
return POLLING_WATCH_SERVICE_CLASS_NAME.equals(className);
} | 3.68 |
hadoop_EntityTableRW_createTable | /*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW#
* createTable(org.apache.hadoop.hbase.client.Admin,
* org.apache.hadoop.conf.Configuration)
*/
public void createTable(Admin admin, Configuration hbaseConf)
throws IOException {
TableName table = getTableName(hbaseConf);
if (admin.tableExists(table)) {
// do not disable / delete existing table
// similar to the approach taken by map-reduce jobs when
// output directory exists
throw new IOException("Table " + table.getNameAsString()
+ " already exists.");
}
HTableDescriptor entityTableDescp = new HTableDescriptor(table);
HColumnDescriptor infoCF =
new HColumnDescriptor(EntityColumnFamily.INFO.getBytes());
infoCF.setBloomFilterType(BloomType.ROWCOL);
entityTableDescp.addFamily(infoCF);
HColumnDescriptor configCF =
new HColumnDescriptor(EntityColumnFamily.CONFIGS.getBytes());
configCF.setBloomFilterType(BloomType.ROWCOL);
configCF.setBlockCacheEnabled(true);
entityTableDescp.addFamily(configCF);
HColumnDescriptor metricsCF =
new HColumnDescriptor(EntityColumnFamily.METRICS.getBytes());
entityTableDescp.addFamily(metricsCF);
metricsCF.setBlockCacheEnabled(true);
// always keep 1 version (the latest)
metricsCF.setMinVersions(1);
metricsCF.setMaxVersions(
hbaseConf.getInt(METRICS_MAX_VERSIONS, DEFAULT_METRICS_MAX_VERSIONS));
metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME,
DEFAULT_METRICS_TTL));
entityTableDescp.setRegionSplitPolicyClassName(
"org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy");
entityTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH);
admin.createTable(entityTableDescp,
TimelineHBaseSchemaConstants.getUsernameSplits());
LOG.info("Status of table creation for " + table.getNameAsString() + "="
+ admin.tableExists(table));
} | 3.68 |
hbase_AbstractFSWAL_getNewPath | /**
* retrieve the next path to use for writing. Increments the internal filenum.
*/
private Path getNewPath() throws IOException {
this.filenum.set(Math.max(getFilenum() + 1, EnvironmentEdgeManager.currentTime()));
Path newPath = getCurrentFileName();
return newPath;
} | 3.68 |
hadoop_ConfigurationUtils_resolve | /**
* Returns a new ConfigurationUtils instance with all inline values resolved.
*
* @return a new ConfigurationUtils instance with all inline values resolved.
*/
public static Configuration resolve(Configuration conf) {
Configuration resolved = new Configuration(false);
for (Map.Entry<String, String> entry : conf) {
resolved.set(entry.getKey(), conf.get(entry.getKey()));
}
return resolved;
} | 3.68 |
hmily_MotanServerConfig_baseServiceConfig | /**
* Base service config basic service config bean.
*
* @return the basic service config bean
*/
@Bean
@ConfigurationProperties(prefix = "hmily.motan.server")
public BasicServiceConfigBean baseServiceConfig() {
return new BasicServiceConfigBean();
} | 3.68 |
hadoop_RouterWebServices_createRequestInterceptorChain | /**
* This method creates and returns reference of the first interceptor in the
* chain of request interceptor instances.
*
* @return the reference of the first interceptor in the chain
*/
@VisibleForTesting
protected RESTRequestInterceptor createRequestInterceptorChain() {
return RouterServerUtil.createRequestInterceptorChain(conf,
YarnConfiguration.ROUTER_WEBAPP_INTERCEPTOR_CLASS_PIPELINE,
YarnConfiguration.DEFAULT_ROUTER_WEBAPP_INTERCEPTOR_CLASS,
RESTRequestInterceptor.class);
} | 3.68 |
flink_SegmentsUtil_bitGet | /**
* read bit from segments.
*
* @param segments target segments.
* @param baseOffset bits base offset.
* @param index bit index from base offset.
*/
public static boolean bitGet(MemorySegment[] segments, int baseOffset, int index) {
int offset = baseOffset + byteIndex(index);
byte current = getByte(segments, offset);
return (current & (1 << (index & BIT_BYTE_INDEX_MASK))) != 0;
} | 3.68 |
flink_RocksDBStateBackend_setDbStoragePath | /**
* Sets the path where the RocksDB local database files should be stored on the local file
* system. Setting this path overrides the default behavior, where the files are stored across
* the configured temp directories.
*
* <p>Passing {@code null} to this function restores the default behavior, where the configured
* temp directories will be used.
*
* @param path The path where the local RocksDB database files are stored.
*/
public void setDbStoragePath(String path) {
setDbStoragePaths(path == null ? null : new String[] {path});
} | 3.68 |
framework_ColumnVisibilityChangeEvent_isHidden | /**
* Was the column set hidden or visible.
*
* @return <code>true</code> if the column was hidden <code>false</code> if
* it was set visible
*/
public boolean isHidden() {
return hidden;
} | 3.68 |
hadoop_KerberosAuthException_getUser | /** @return The user, or null if not set. */
public String getUser() {
return user;
} | 3.68 |
open-banking-gateway_AccountInformationRequestCommon_fintech_calls_list_accounts_for_max_musterman | // Note that max.musterman is typically used for EMBEDDED (real EMBEDDED that is returned by bank, and not EMBEDDED approach in table)
public SELF fintech_calls_list_accounts_for_max_musterman(String bankProfileId) {
ExtractableResponse<Response> response = withAccountsHeaders(MAX_MUSTERMAN, bankProfileId)
.header(SERVICE_SESSION_ID, UUID.randomUUID().toString())
.when()
.get(AIS_ACCOUNTS_ENDPOINT)
.then()
.statusCode(ACCEPTED.value())
.extract();
updateServiceSessionId(response);
updateRedirectCode(response);
updateNextConsentAuthorizationUrl(response);
return self();
} | 3.68 |
hudi_HoodieTableMetaClient_getTableType | /**
* @return Hoodie Table Type
*/
public HoodieTableType getTableType() {
return tableType;
} | 3.68 |
hbase_TimestampsFilter_getNextCellHint | /**
* Pick the next cell that the scanner should seek to. Since this can skip any number of cells any
* of which can be a delete this can resurect old data. The method will only be used if canHint
* was set to true while creating the filter.
* @throws IOException This will never happen.
*/
@Override
public Cell getNextCellHint(Cell currentCell) throws IOException {
if (!canHint) {
return null;
}
Long nextTimestampObject = timestamps.lower(currentCell.getTimestamp());
if (nextTimestampObject == null) {
// This should only happen if the current column's
// timestamp is below the last one in the list.
//
// It should never happen as the filterCell should return NEXT_COL
// but it's always better to be extra safe and protect against future
// behavioral changes.
return PrivateCellUtil.createLastOnRowCol(currentCell);
}
// Since we know the nextTimestampObject isn't null here there must still be
// timestamps that can be included. Cast the Long to a long and return the
// a cell with the current row/cf/col and the next found timestamp.
long nextTimestamp = nextTimestampObject;
return PrivateCellUtil.createFirstOnRowColTS(currentCell, nextTimestamp);
} | 3.68 |
hadoop_BoundedByteArrayOutputStream_getBuffer | /**
* Returns the underlying buffer.
* Data is only valid to {@link #size()}.
* @return the underlying buffer.
*/
public byte[] getBuffer() {
return buffer;
} | 3.68 |
hadoop_MySQLDataDrivenDBRecordReader_executeQuery | // Execute statements for mysql in unbuffered mode.
protected ResultSet executeQuery(String query) throws SQLException {
statement = getConnection().prepareStatement(query,
ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
statement.setFetchSize(Integer.MIN_VALUE); // MySQL: read row-at-a-time.
return statement.executeQuery();
} | 3.68 |
hbase_TableDescriptorBuilder_newBuilder | /**
* Copy all values, families, and name from the input.
* @param desc The desciptor to copy
* @return A clone of input
*/
public static TableDescriptorBuilder newBuilder(final TableDescriptor desc) {
return new TableDescriptorBuilder(desc);
} | 3.68 |
flink_FileMergingSnapshotManagerBase_createLogicalFile | /**
* Create a logical file on a physical file.
*
* @param physicalFile the underlying physical file.
* @param startOffset the offset of the physical file that the logical file start from.
* @param length the length of the logical file.
* @param subtaskKey the id of the subtask that the logical file belongs to.
* @return the created logical file.
*/
protected LogicalFile createLogicalFile(
@Nonnull PhysicalFile physicalFile,
int startOffset,
int length,
@Nonnull SubtaskKey subtaskKey) {
LogicalFileId fileID = LogicalFileId.generateRandomId();
return new LogicalFile(fileID, physicalFile, startOffset, length, subtaskKey);
} | 3.68 |
hbase_TableDescriptorBuilder_toStringTableAttributes | /** Returns map of all table attributes formatted into string. */
public String toStringTableAttributes() {
return getValues(true).toString();
} | 3.68 |
framework_PropertyFilterDefinition_getIgnorePackageNamesStartingWith | /**
* Returns a list of package name prefixes to ignore.
*
* @return list of strings that
*/
public List<String> getIgnorePackageNamesStartingWith() {
return ignorePackageNamesStartingWith;
} | 3.68 |
framework_PropertyFormatter_readOnlyStatusChange | /**
* Listens for changes in the datasource.
*
* This should not be called directly.
*/
@Override
public void readOnlyStatusChange(Property.ReadOnlyStatusChangeEvent event) {
fireReadOnlyStatusChange();
} | 3.68 |
hadoop_IOStatisticsSnapshot_serializer | /**
* Get a JSON serializer for this class.
* @return a serializer.
*/
public static JsonSerialization<IOStatisticsSnapshot> serializer() {
return new JsonSerialization<>(IOStatisticsSnapshot.class, false, true);
} | 3.68 |
pulsar_LinuxBrokerHostUsageImpl_getTotalCpuUsageForEntireHost | /**
* Reads first line of /proc/stat to get total cpu usage.
*
* <pre>
* cpu user nice system idle iowait irq softirq steal guest guest_nice
* cpu 317808 128 58637 2503692 7634 0 13472 0 0 0
* </pre>
*
* Line is split in "words", filtering the first. The sum of all numbers give the amount of cpu cycles used this
* far. Real CPU usage should equal the sum substracting the idle cycles(that is idle+iowait), this would include
* cpu, user, nice, system, irq, softirq, steal, guest and guest_nice.
*/
private double getTotalCpuUsageForEntireHost() {
LinuxInfoUtils.ResourceUsage cpuUsageForEntireHost = getCpuUsageForEntireHost();
if (cpuUsageForEntireHost.isEmpty()) {
return -1;
}
double currentUsage = (cpuUsageForEntireHost.getUsage() - lastCpuUsage)
/ (cpuUsageForEntireHost.getTotal() - lastCpuTotalTime) * getTotalCpuLimit(isCGroupsEnabled);
lastCpuUsage = cpuUsageForEntireHost.getUsage();
lastCpuTotalTime = cpuUsageForEntireHost.getTotal();
return currentUsage;
} | 3.68 |
hbase_EnvironmentEdgeManager_getDelegate | /**
* Retrieves the singleton instance of the {@link EnvironmentEdge} that is being managed.
* @return the edge.
*/
public static EnvironmentEdge getDelegate() {
return delegate;
} | 3.68 |
hbase_DateTieredCompactionPolicy_selectMinorCompaction | /**
* We receive store files sorted in ascending order by seqId then scan the list of files. If the
* current file has a maxTimestamp older than last known maximum, treat this file as it carries
* the last known maximum. This way both seqId and timestamp are in the same order. If files carry
* the same maxTimestamps, they are ordered by seqId. We then reverse the list so they are ordered
* by seqId and maxTimestamp in descending order and build the time windows. All the out-of-order
* data into the same compaction windows, guaranteeing contiguous compaction based on sequence id.
*/
public CompactionRequestImpl selectMinorCompaction(ArrayList<HStoreFile> candidateSelection,
boolean mayUseOffPeak, boolean mayBeStuck) throws IOException {
long now = EnvironmentEdgeManager.currentTime();
long oldestToCompact = getOldestToCompact(comConf.getDateTieredMaxStoreFileAgeMillis(), now);
List<Pair<HStoreFile, Long>> storefileMaxTimestampPairs =
Lists.newArrayListWithCapacity(candidateSelection.size());
long maxTimestampSeen = Long.MIN_VALUE;
for (HStoreFile storeFile : candidateSelection) {
// if there is out-of-order data,
// we put them in the same window as the last file in increasing order
maxTimestampSeen =
Math.max(maxTimestampSeen, storeFile.getMaximumTimestamp().orElse(Long.MIN_VALUE));
storefileMaxTimestampPairs.add(new Pair<>(storeFile, maxTimestampSeen));
}
Collections.reverse(storefileMaxTimestampPairs);
CompactionWindow window = getIncomingWindow(now);
int minThreshold = comConf.getDateTieredIncomingWindowMin();
PeekingIterator<Pair<HStoreFile, Long>> it =
Iterators.peekingIterator(storefileMaxTimestampPairs.iterator());
while (it.hasNext()) {
if (window.compareToTimestamp(oldestToCompact) < 0) {
break;
}
int compResult = window.compareToTimestamp(it.peek().getSecond());
if (compResult > 0) {
// If the file is too old for the window, switch to the next window
window = window.nextEarlierWindow();
minThreshold = comConf.getMinFilesToCompact();
} else {
// The file is within the target window
ArrayList<HStoreFile> fileList = Lists.newArrayList();
// Add all files in the same window. For incoming window
// we tolerate files with future data although it is sub-optimal
while (it.hasNext() && window.compareToTimestamp(it.peek().getSecond()) <= 0) {
fileList.add(it.next().getFirst());
}
if (fileList.size() >= minThreshold) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing files: " + fileList + " for window: " + window);
}
DateTieredCompactionRequest request = generateCompactionRequest(fileList, window,
mayUseOffPeak, mayBeStuck, minThreshold, now);
if (request != null) {
return request;
}
}
}
}
// A non-null file list is expected by HStore
return new CompactionRequestImpl(Collections.emptyList());
} | 3.68 |
hbase_WALProcedureMap_isIncreasing | /**
* @return True if this new procedure is 'richer' than the current one else false and we log this
* incidence where it appears that the WAL has older entries appended after newer ones.
* See HBASE-18152.
*/
private static boolean isIncreasing(ProcedureProtos.Procedure current,
ProcedureProtos.Procedure candidate) {
// Check that the procedures we see are 'increasing'. We used to compare
// procedure id first and then update time but it can legitimately go backwards if the
// procedure is failed or rolled back so that was unreliable. Was going to compare
// state but lets see if comparing update time enough (unfortunately this issue only
// seen under load...)
boolean increasing = current.getLastUpdate() <= candidate.getLastUpdate();
if (!increasing) {
LOG.warn("NOT INCREASING! current=" + current + ", candidate=" + candidate);
}
return increasing;
} | 3.68 |
hbase_HBaseTestingUtility_setupDataTestDir | /**
* Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}. Give it a random name so can
* have many concurrent tests running if we need to. It needs to amend the
* {@link #TEST_DIRECTORY_KEY} System property, as it's what minidfscluster bases it data dir on.
* Moding a System property is not the way to do concurrent instances -- another instance could
* grab the temporary value unintentionally -- but not anything can do about it at moment; single
* instance only is how the minidfscluster works. We also create the underlying directory names
* for hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values in the
* conf, and as a system property for hadoop.tmp.dir (We do not create them!).
* @return The calculated data test build directory, if newly-created.
*/
@Override
protected Path setupDataTestDir() {
Path testPath = super.setupDataTestDir();
if (null == testPath) {
return null;
}
createSubDirAndSystemProperty("hadoop.log.dir", testPath, "hadoop-log-dir");
// This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
// we want our own value to ensure uniqueness on the same machine
createSubDirAndSystemProperty("hadoop.tmp.dir", testPath, "hadoop-tmp-dir");
// Read and modified in org.apache.hadoop.mapred.MiniMRCluster
createSubDir("mapreduce.cluster.local.dir", testPath, "mapred-local-dir");
return testPath;
} | 3.68 |
hadoop_NamenodeStatusReport_getNumLiveDatanodes | /**
* Get the number of live blocks.
*
* @return The number of dead nodes.
*/
public int getNumLiveDatanodes() {
return this.liveDatanodes;
} | 3.68 |
framework_JsonPaintTarget_flush | /**
* Method flush.
*/
private void flush() {
uidlBuffer.flush();
} | 3.68 |
hbase_RegionRemoteProcedureBase_persistAndWake | // A bit strange but the procedure store will throw RuntimeException if we can not persist the
// state, so upper layer should take care of this...
private void persistAndWake(MasterProcedureEnv env, RegionStateNode regionNode) {
env.getMasterServices().getMasterProcedureExecutor().getStore().update(this);
regionNode.getProcedureEvent().wake(env.getProcedureScheduler());
} | 3.68 |
querydsl_PathBuilder_getSimple | /**
* Create a new Simple path
*
* @param <A>
* @param property property name
* @param type property type
* @return property path
*/
@SuppressWarnings("unchecked")
public <A> SimplePath<A> getSimple(String property, Class<A> type) {
Class<? extends A> vtype = validate(property, type);
return super.createSimple(property, (Class<? super A>) vtype);
} | 3.68 |
hudi_HoodieFlinkCopyOnWriteTable_upsert | /**
* Upsert a batch of new records into Hoodie table at the supplied instantTime.
*
* <p>Specifies the write handle explicitly in order to have fine-grained control with
* the underneath file.
*
* @param context HoodieEngineContext
* @param writeHandle The write handle
* @param instantTime Instant Time for the action
* @param records hoodieRecords to upsert
* @return HoodieWriteMetadata
*/
public HoodieWriteMetadata<List<WriteStatus>> upsert(
HoodieEngineContext context,
HoodieWriteHandle<?, ?, ?, ?> writeHandle,
String instantTime,
List<HoodieRecord<T>> records) {
return new FlinkUpsertCommitActionExecutor<>(context, writeHandle, config, this, instantTime, records).execute();
} | 3.68 |
framework_ConnectorTracker_getDirtyVisibleConnectors | /**
* Returns a collection of those {@link #getDirtyConnectors() dirty
* connectors} that are actually visible to the client.
*
* @return A list of dirty and visible connectors.
*/
public ArrayList<ClientConnector> getDirtyVisibleConnectors() {
Collection<ClientConnector> dirtyConnectors = getDirtyConnectors();
ArrayList<ClientConnector> dirtyVisibleConnectors = new ArrayList<>(
dirtyConnectors.size());
for (ClientConnector c : dirtyConnectors) {
if (LegacyCommunicationManager.isConnectorVisibleToClient(c)) {
dirtyVisibleConnectors.add(c);
}
}
return dirtyVisibleConnectors;
} | 3.68 |
framework_Buffered_getCause | /**
* Gets the cause of the exception.
*
* @return The (first) cause for the exception, null if no cause.
*/
@Override
public final Throwable getCause() {
if (causes.length == 0) {
return null;
}
return causes[0];
} | 3.68 |
flink_FlinkRexBuilder_toComparable | /** Copied from the {@link RexBuilder} to fix the {@link RexBuilder#makeIn}. */
@SuppressWarnings("rawtypes")
private static <C extends Comparable<C>> C toComparable(Class<C> clazz, RexNode point) {
switch (point.getKind()) {
case LITERAL:
final RexLiteral literal = (RexLiteral) point;
return literal.getValueAs(clazz);
case ROW:
final RexCall call = (RexCall) point;
final ImmutableList.Builder<Comparable> b = ImmutableList.builder();
for (RexNode operand : call.operands) {
//noinspection unchecked
final Comparable value = toComparable(Comparable.class, operand);
if (value == null) {
return null; // not a constant value
}
b.add(value);
}
return clazz.cast(FlatLists.ofComparable(b.build()));
default:
return null; // not a constant value
}
} | 3.68 |
hmily_JavaBeanBinder_getProperties | /**
* Gets properties.
*
* @return the properties
*/
Map<String, BeanProperty> getProperties() {
return this.properties;
} | 3.68 |
hudi_HoodieAdbJdbcClient_getPartitionClause | /**
* Generate Hive Partition from partition values.
*
* @param partition Partition path
* @return partition clause
*/
private String getPartitionClause(String partition) {
List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(partition);
ValidationUtils.checkArgument(config.getSplitStrings(META_SYNC_PARTITION_FIELDS).size() == partitionValues.size(),
"Partition key parts " + config.getSplitStrings(META_SYNC_PARTITION_FIELDS)
+ " does not match with partition values " + partitionValues + ". Check partition strategy. ");
List<String> partBuilder = new ArrayList<>();
for (int i = 0; i < config.getSplitStrings(META_SYNC_PARTITION_FIELDS).size(); i++) {
partBuilder.add(config.getSplitStrings(META_SYNC_PARTITION_FIELDS).get(i) + "='" + partitionValues.get(i) + "'");
}
return String.join(",", partBuilder);
} | 3.68 |
flink_KvStateService_fromConfiguration | /**
* Creates and returns the KvState service.
*
* @param taskManagerServicesConfiguration task manager configuration
* @return service for kvState related components
*/
public static KvStateService fromConfiguration(
TaskManagerServicesConfiguration taskManagerServicesConfiguration) {
KvStateRegistry kvStateRegistry = new KvStateRegistry();
QueryableStateConfiguration qsConfig =
taskManagerServicesConfiguration.getQueryableStateConfig();
KvStateClientProxy kvClientProxy = null;
KvStateServer kvStateServer = null;
if (qsConfig != null) {
int numProxyServerNetworkThreads =
qsConfig.numProxyServerThreads() == 0
? taskManagerServicesConfiguration.getNumberOfSlots()
: qsConfig.numProxyServerThreads();
int numProxyServerQueryThreads =
qsConfig.numProxyQueryThreads() == 0
? taskManagerServicesConfiguration.getNumberOfSlots()
: qsConfig.numProxyQueryThreads();
kvClientProxy =
QueryableStateUtils.createKvStateClientProxy(
taskManagerServicesConfiguration.getExternalAddress(),
qsConfig.getProxyPortRange(),
numProxyServerNetworkThreads,
numProxyServerQueryThreads,
new DisabledKvStateRequestStats());
int numStateServerNetworkThreads =
qsConfig.numStateServerThreads() == 0
? taskManagerServicesConfiguration.getNumberOfSlots()
: qsConfig.numStateServerThreads();
int numStateServerQueryThreads =
qsConfig.numStateQueryThreads() == 0
? taskManagerServicesConfiguration.getNumberOfSlots()
: qsConfig.numStateQueryThreads();
kvStateServer =
QueryableStateUtils.createKvStateServer(
taskManagerServicesConfiguration.getExternalAddress(),
qsConfig.getStateServerPortRange(),
numStateServerNetworkThreads,
numStateServerQueryThreads,
kvStateRegistry,
new DisabledKvStateRequestStats());
}
return new KvStateService(kvStateRegistry, kvStateServer, kvClientProxy);
} | 3.68 |
flink_FlinkContainerTestEnvironment_getFlinkContainers | /**
* Get instance of Flink containers for cluster controlling.
*
* @return Flink cluster on Testcontainers
*/
public FlinkContainers getFlinkContainers() {
return this.flinkContainers;
} | 3.68 |
flink_SourceTestSuiteBase_getTestDataSize | /**
* Get the size of test data.
*
* @param collections test data
* @return the size of test data
*/
protected int getTestDataSize(List<List<T>> collections) {
int sumSize = 0;
for (Collection<T> collection : collections) {
sumSize += collection.size();
}
return sumSize;
} | 3.68 |
hbase_WALEntryStream_checkAllBytesParsed | // HBASE-15984 check to see we have in fact parsed all data in a cleanly closed file
private boolean checkAllBytesParsed() {
// -1 means the wal wasn't closed cleanly.
final long trailerSize = currentTrailerSize();
FileStatus stat = null;
try {
stat = getCurrentPathFileStatus();
} catch (IOException e) {
LOG.warn("Couldn't get file length information about log {}, it {} closed cleanly {}",
currentPath, trailerSize < 0 ? "was not" : "was", getCurrentPathStat(), e);
metrics.incrUnknownFileLengthForClosedWAL();
}
// Here we use currentPositionOfReader instead of currentPositionOfEntry.
// We only call this method when currentEntry is null so usually they are the same, but there
// are two exceptions. One is we have nothing in the file but only a header, in this way
// the currentPositionOfEntry will always be 0 since we have no change to update it. The other
// is that we reach the end of file, then currentPositionOfEntry will point to the tail of the
// last valid entry, and the currentPositionOfReader will usually point to the end of the file.
if (stat != null) {
if (trailerSize < 0) {
if (currentPositionOfReader < stat.getLen()) {
final long skippedBytes = stat.getLen() - currentPositionOfReader;
// See the commits in HBASE-25924/HBASE-25932 for context.
LOG.warn("Reached the end of WAL {}. It was not closed cleanly,"
+ " so we did not parse {} bytes of data.", currentPath, skippedBytes);
metrics.incrUncleanlyClosedWALs();
metrics.incrBytesSkippedInUncleanlyClosedWALs(skippedBytes);
}
} else if (currentPositionOfReader + trailerSize < stat.getLen()) {
LOG.warn(
"Processing end of WAL {} at position {}, which is too far away from"
+ " reported file length {}. Restarting WAL reading (see HBASE-15983 for details). {}",
currentPath, currentPositionOfReader, stat.getLen(), getCurrentPathStat());
metrics.incrRestartedWALReading();
metrics.incrRepeatedFileBytes(currentPositionOfReader);
return false;
}
}
LOG.debug("Reached the end of {} and length of the file is {}", currentPath,
stat == null ? "N/A" : stat.getLen());
metrics.incrCompletedWAL();
return true;
} | 3.68 |
morf_OracleDialect_defaultNullOrder | /**
* {@inheritDoc}
* @see org.alfasoftware.morf.jdbc.SqlDialect#defaultNullOrder()
*/
@Override
protected String defaultNullOrder() {
return DEFAULT_NULL_ORDER;
} | 3.68 |
hadoop_OBSFileSystem_getBucket | /**
* Return the bucket of this filesystem.
*
* @return the bucket
*/
String getBucket() {
return bucket;
} | 3.68 |
hudi_AbstractTableFileSystemView_getAllLogsMergedFileSliceBeforeOrOn | /**
* Stream all "merged" file-slices before on an instant time
* for a MERGE_ON_READ table with index that can index log files(which means it writes pure logs first).
*
* <p>In streaming read scenario, in order for better reading efficiency, the user can choose to skip the
* base files that are produced by compaction. That is to say, we allow the users to consumer only from
* these partitioned log files, these log files keep the record sequence just like the normal message queue.
*
* <p>NOTE: only local view is supported.
*
* @param partitionStr Partition Path
* @param maxInstantTime Max Instant Time
*/
public final Stream<FileSlice> getAllLogsMergedFileSliceBeforeOrOn(String partitionStr, String maxInstantTime) {
try {
readLock.lock();
String partition = formatPartitionKey(partitionStr);
ensurePartitionLoadedCorrectly(partition);
return fetchAllStoredFileGroups(partition)
.filter(fg -> !isFileGroupReplacedBeforeOrOn(fg.getFileGroupId(), maxInstantTime))
.map(fileGroup -> fetchAllLogsMergedFileSlice(fileGroup, maxInstantTime))
.filter(Option::isPresent).map(Option::get)
.map(this::filterUncommittedLogs)
.map(this::addBootstrapBaseFileIfPresent);
} finally {
readLock.unlock();
}
} | 3.68 |
flink_PendingCheckpoint_discard | /**
* Discard state. Must be called after {@link #dispose(boolean, CheckpointsCleaner,
* Runnable, Executor) dispose}.
*/
@Override
public void discard() {
synchronized (lock) {
if (discarded) {
Preconditions.checkState(
disposed, "Checkpoint should be disposed before being discarded");
return;
} else {
discarded = true;
}
}
// discard the private states.
// unregistered shared states are still considered private at this point.
try {
StateUtil.bestEffortDiscardAllStateObjects(operatorStates.values());
if (targetLocation != null) {
targetLocation.disposeOnFailure();
}
} catch (Throwable t) {
LOG.warn(
"Could not properly dispose the private states in the pending checkpoint {} of job {}.",
checkpointId,
jobId,
t);
} finally {
operatorStates.clear();
}
} | 3.68 |
shardingsphere-elasticjob_JobRegistry_getJobScheduleController | /**
* Get job schedule controller.
*
* @param jobName job name
* @return job schedule controller
*/
public JobScheduleController getJobScheduleController(final String jobName) {
return schedulerMap.get(jobName);
} | 3.68 |
framework_DefaultItemSorter_setSortProperties | /*
* (non-Javadoc)
*
* @see com.vaadin.data.util.ItemSorter#setSortProperties(com.vaadin.data.
* Container .Sortable, java.lang.Object[], boolean[])
*/
@Override
public void setSortProperties(Container.Sortable container,
Object[] propertyId, boolean[] ascending) {
this.container = container;
// Removes any non-sortable property ids
final List<Object> ids = new ArrayList<Object>();
final List<Boolean> orders = new ArrayList<Boolean>();
final Collection<?> sortable = container
.getSortableContainerPropertyIds();
for (int i = 0; i < propertyId.length; i++) {
if (sortable.contains(propertyId[i])) {
ids.add(propertyId[i]);
orders.add(Boolean
.valueOf(i < ascending.length ? ascending[i] : true));
}
}
sortPropertyIds = ids.toArray();
sortDirections = new boolean[orders.size()];
for (int i = 0; i < sortDirections.length; i++) {
sortDirections[i] = (orders.get(i)).booleanValue();
}
} | 3.68 |
hbase_RegionNormalizerWorkQueue_put | /**
* Inserts the specified element at the tail of the queue, if it's not already present.
* @param e the element to add
*/
public void put(E e) {
if (e == null) {
throw new NullPointerException();
}
lock.writeLock().lock();
try {
delegate.add(e);
if (!delegate.isEmpty()) {
notEmpty.signal();
}
} finally {
lock.writeLock().unlock();
}
} | 3.68 |
hadoop_StartupProgress_setTotal | /**
* Sets the total associated with the specified phase and step. For example,
* this can be used while loading edits to indicate the number of operations to
* be applied.
*
* @param phase Phase to set
* @param step Step to set
* @param total long to set
*/
public void setTotal(Phase phase, Step step, long total) {
if (!isComplete(phase)) {
lazyInitStep(phase, step).total = total;
}
} | 3.68 |
flink_SnapshotDirectory_listDirectory | /**
* List the files in the snapshot directory.
*
* @return the files in the snapshot directory.
* @throws IOException if there is a problem creating the file statuses.
*/
public Path[] listDirectory() throws IOException {
return FileUtils.listDirectory(directory);
} | 3.68 |
flink_MetricGroup_counter | /**
* Registers a {@link org.apache.flink.metrics.Counter} with Flink.
*
* @param name name of the counter
* @param counter counter to register
* @param <C> counter type
* @return the given counter
*/
default <C extends Counter> C counter(int name, C counter) {
return counter(String.valueOf(name), counter);
} | 3.68 |
hudi_CollectionUtils_reduce | /**
* Reduces provided {@link Collection} using provided {@code reducer} applied to
* every element of the collection like following
*
* {@code reduce(reduce(reduce(identity, e1), e2), ...)}
*
* @param c target collection to be reduced
* @param identity element for reducing to start from
* @param reducer actual reducing operator
*
* @return result of the reduction of the collection using reducing operator
*/
public static <T, U> U reduce(Collection<T> c, U identity, BiFunction<U, T, U> reducer) {
return c.stream()
.sequential()
.reduce(identity, reducer, (a, b) -> {
throw new UnsupportedOperationException();
});
} | 3.68 |
flink_CrossOperator_projectTuple14 | /**
* Projects a pair of crossed elements to a {@link Tuple} with the previously selected
* fields.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>
ProjectCross<
I1,
I2,
Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>
projectTuple14() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>
tType =
new TupleTypeInfo<
Tuple14<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13>>(fTypes);
return new ProjectCross<
I1, I2, Tuple14<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13>>(
this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint);
} | 3.68 |
hadoop_AbfsManifestStoreOperations_storePreservesEtagsThroughRenames | /**
* Etags are preserved through Gen2 stores, but not wasb stores.
* @param path path to probe.
* @return true if this store preserves etags.
*/
@Override
public boolean storePreservesEtagsThroughRenames(final Path path) {
return etagsPreserved;
} | 3.68 |
zxing_UPCEANExtension5Support_parseExtensionString | /**
* @param raw raw content of extension
* @return formatted interpretation of raw content as a {@link Map} mapping
* one {@link ResultMetadataType} to appropriate value, or {@code null} if not known
*/
private static Map<ResultMetadataType,Object> parseExtensionString(String raw) {
if (raw.length() != 5) {
return null;
}
Object value = parseExtension5String(raw);
if (value == null) {
return null;
}
Map<ResultMetadataType,Object> result = new EnumMap<>(ResultMetadataType.class);
result.put(ResultMetadataType.SUGGESTED_PRICE, value);
return result;
} | 3.68 |
druid_IPAddress_getIPAddress | /**
* Return the integer representation of the IP address.
*
* @return The IP address.
*/
public final int getIPAddress() {
return ipAddress;
} | 3.68 |
flink_ResultPartition_setup | /**
* Registers a buffer pool with this result partition.
*
* <p>There is one pool for each result partition, which is shared by all its sub partitions.
*
* <p>The pool is registered with the partition *after* it as been constructed in order to
* conform to the life-cycle of task registrations in the {@link TaskExecutor}.
*/
@Override
public void setup() throws IOException {
checkState(
this.bufferPool == null,
"Bug in result partition setup logic: Already registered buffer pool.");
this.bufferPool = checkNotNull(bufferPoolFactory.get());
setupInternal();
partitionManager.registerResultPartition(this);
} | 3.68 |
hbase_MobFileCache_getAccessCount | /**
* Gets the count of accesses to the mob file cache.
* @return The count of accesses to the mob file cache.
*/
public long getAccessCount() {
return count.get();
} | 3.68 |
hudi_HoodieBackedTableMetadataWriter_updateFunctionalIndexIfPresent | /**
* Update functional index from {@link HoodieCommitMetadata}.
*/
private void updateFunctionalIndexIfPresent(HoodieCommitMetadata commitMetadata, String instantTime, Map<MetadataPartitionType, HoodieData<HoodieRecord>> partitionToRecordMap) {
dataMetaClient.getTableConfig().getMetadataPartitions()
.stream()
.filter(partition -> partition.startsWith(HoodieTableMetadataUtil.PARTITION_NAME_FUNCTIONAL_INDEX_PREFIX))
.forEach(partition -> {
HoodieData<HoodieRecord> functionalIndexRecords;
try {
functionalIndexRecords = getFunctionalIndexUpdates(commitMetadata, partition, instantTime);
} catch (Exception e) {
throw new HoodieMetadataException("Failed to get functional index updates for partition " + partition, e);
}
partitionToRecordMap.put(FUNCTIONAL_INDEX, functionalIndexRecords);
});
} | 3.68 |
hbase_RequestConverter_buildModifyColumnRequest | /**
* Create a protocol buffer ModifyColumnRequest
* @return an ModifyColumnRequest
*/
public static ModifyColumnRequest buildModifyColumnRequest(final TableName tableName,
final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) {
ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
} | 3.68 |
hadoop_ManifestCommitter_isCommitJobRepeatable | /**
* Failure during Job Commit is not recoverable from.
*
* @param jobContext
* Context of the job whose output is being written.
* @return false, always
* @throws IOException never
*/
@Override
public boolean isCommitJobRepeatable(final JobContext jobContext)
throws IOException {
LOG.info("Probe for isCommitJobRepeatable({}): returning false",
jobContext.getJobID());
return false;
} | 3.68 |
hadoop_WasbTokenRenewer_renew | /**
* Renew the delegation token.
* @param token token to renew.
* @param conf configuration object.
* @return extended expiry time of the token.
* @throws IOException thrown when trying get current user.
* @throws InterruptedException thrown when thread is interrupted
*/
@Override
public long renew(final Token<?> token, Configuration conf)
throws IOException, InterruptedException {
LOG.debug("Renewing the delegation token");
return getInstance(conf).renewDelegationToken(token);
} | 3.68 |
flink_SingleLogicalSlot_release | /**
* A release of the payload by the {@link AllocatedSlot} triggers a release of the payload of
* the logical slot.
*
* @param cause of the payload release
*/
@Override
public void release(Throwable cause) {
if (STATE_UPDATER.compareAndSet(this, State.ALIVE, State.RELEASING)) {
signalPayloadRelease(cause);
}
markReleased();
releaseFuture.complete(null);
} | 3.68 |
flink_CatalogManager_createDatabase | /**
* Create a database.
*
* @param catalogName Name of the catalog for database
* @param databaseName Name of the database to be created
* @param database The database definition
* @param ignoreIfExists Flag to specify behavior when a database with the given name already
* exists: if set to false, throw a DatabaseAlreadyExistException, if set to true, do
* nothing.
* @throws DatabaseAlreadyExistException if the given database already exists and ignoreIfExists
* is false
* @throws CatalogException in case of any runtime exception
*/
public void createDatabase(
String catalogName,
String databaseName,
CatalogDatabase database,
boolean ignoreIfExists)
throws DatabaseAlreadyExistException, CatalogException {
Catalog catalog = getCatalogOrThrowException(catalogName);
catalog.createDatabase(databaseName, database, ignoreIfExists);
catalogModificationListeners.forEach(
listener ->
listener.onEvent(
CreateDatabaseEvent.createEvent(
CatalogContext.createContext(catalogName, catalog),
databaseName,
database,
ignoreIfExists)));
} | 3.68 |
hadoop_ContainerAllocationHistory_addAllocationEntry | /**
* Record the allocation history for the container.
*
* @param container to add record for
* @param requestSet resource request ask set
* @param fulfillTimeStamp time at which allocation happened
* @param fulfillLatency time elapsed in allocating since asked
*/
public synchronized void addAllocationEntry(Container container,
ResourceRequestSet requestSet, long fulfillTimeStamp, long fulfillLatency){
if (!requestSet.isANYRelaxable()) {
LOG.info("allocation history ignoring {}, relax locality is false", container);
return;
}
this.relaxableG.add(new AbstractMap.SimpleEntry<>(
fulfillTimeStamp, fulfillLatency));
if (this.relaxableG.size() > this.maxEntryCount) {
this.relaxableG.remove();
}
} | 3.68 |
hadoop_Paths_getAppAttemptId | /**
* Get the Application Attempt ID for this job.
* @param conf the config to look in
* @return the Application Attempt ID for a given job.
*/
private static int getAppAttemptId(Configuration conf) {
return conf.getInt(
MRJobConfig.APPLICATION_ATTEMPT_ID, 0);
} | 3.68 |
dubbo_RpcServiceContext_getLocalHost | /**
* get local host.
*
* @return local host
*/
@Override
public String getLocalHost() {
String host = localAddress == null
? null
: localAddress.getAddress() == null
? localAddress.getHostName()
: NetUtils.filterLocalHost(localAddress.getAddress().getHostAddress());
if (host == null || host.length() == 0) {
return NetUtils.getLocalHost();
}
return host;
} | 3.68 |
morf_DatabaseMetaDataProviderUtils_shouldIgnoreIndex | /**
* Indexes which contain the suffix _PRF and a digit are to be ignored:
* this allows performance testing of new index to verify their effect,
* without breaking the schema checking.
*
* eg. Schedule_PRF1
*
* @param indexName The name of an index
* @return Whether it should be ignored
*/
public static boolean shouldIgnoreIndex(String indexName) {
return indexName.toUpperCase().matches(".*_PRF\\d+$");
} | 3.68 |
flink_MiniCluster_isRunning | /** Checks if the mini cluster was started and is running. */
public boolean isRunning() {
return running;
} | 3.68 |
hibernate-validator_ClassLoadingHelper_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private static <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
pulsar_TransactionBufferProvider_newProvider | /**
* Construct a provider from the provided class.
*
* @param providerClassName the provider class name.
* @return an instance of transaction buffer provider.
*/
static TransactionBufferProvider newProvider(String providerClassName) throws IOException {
try {
TransactionBufferProvider transactionBufferProvider = Reflections.createInstance(providerClassName,
TransactionBufferProvider.class, Thread.currentThread().getContextClassLoader());
return transactionBufferProvider;
} catch (Exception e) {
throw new IOException(e);
}
} | 3.68 |
flink_ManuallyTriggeredScheduledExecutorService_triggerNonPeriodicScheduledTask | /**
* Triggers a single non-periodically scheduled task.
*
* @throws NoSuchElementException If there is no such task.
*/
public void triggerNonPeriodicScheduledTask() {
final ScheduledTask<?> poll = nonPeriodicScheduledTasks.remove();
if (poll != null) {
poll.execute();
}
} | 3.68 |
hadoop_ValidationFailure_verify | /**
* Verify that a condition holds.
* @param expression expression which must be true
* @param message message to raise on a failure
* @param args arguments for the message formatting
* @throws ValidationFailure on a failure
*/
public static void verify(boolean expression,
String message,
Object... args) throws ValidationFailure {
if (!expression) {
throw new ValidationFailure(message, args);
}
} | 3.68 |
hbase_CoprocessorHost_checkAndLoadInstance | /**
* @param implClass Implementation class
* @param priority priority
* @param conf configuration
* @throws java.io.IOException Exception
*/
public E checkAndLoadInstance(Class<?> implClass, int priority, Configuration conf)
throws IOException {
// create the instance
C impl;
try {
impl = checkAndGetInstance(implClass);
if (impl == null) {
LOG.error("Cannot load coprocessor " + implClass.getSimpleName());
return null;
}
} catch (InstantiationException | IllegalAccessException e) {
throw new IOException(e);
}
// create the environment
E env = createEnvironment(impl, priority, loadSequence.incrementAndGet(), conf);
assert env instanceof BaseEnvironment;
((BaseEnvironment<C>) env).startup();
// HBASE-4014: maintain list of loaded coprocessors for later crash analysis
// if server (master or regionserver) aborts.
coprocessorNames.add(implClass.getName());
return env;
} | 3.68 |
streampipes_AssetLinkBuilder_withEditingDisabled | /**
* Sets whether editing is disabled for the AssetLink being built.
*
* @param editingDisabled Whether editing is disabled.
* @return The AssetLinkBuilder instance for method chaining.
*/
public AssetLinkBuilder withEditingDisabled(boolean editingDisabled) {
this.assetLink.setEditingDisabled(editingDisabled);
return this;
} | 3.68 |
flink_SqlJsonUtils_getNodeFactory | /** Returns the {@link JsonNodeFactory} for creating nodes. */
public static JsonNodeFactory getNodeFactory() {
return MAPPER.getNodeFactory();
} | 3.68 |
pulsar_FieldParser_stringToList | /**
* Converts comma separated string to List.
*
* @param <T>
* type of list
* @param val
* comma separated values.
* @return The converted list with type {@code <T>}.
*/
public static <T> List<T> stringToList(String val, Class<T> type) {
if (val == null) {
return null;
}
String[] tokens = trim(val).split(",");
return Arrays.stream(tokens).map(t -> {
return convert(trim(t), type);
}).collect(Collectors.toList());
} | 3.68 |
framework_FilesystemContainer_equals | /**
* Tests if the given object is the same as the this object. Two
* Properties got from an Item with the same ID are equal.
*
* @param obj
* an object to compare with this object.
* @return <code>true</code> if the given object is the same as this
* object, <code>false</code> if not
*/
@Override
public boolean equals(Object obj) {
if (!(obj instanceof FileItem)) {
return false;
}
final FileItem fi = (FileItem) obj;
return fi.getHost() == getHost() && fi.file.equals(file);
} | 3.68 |
dubbo_URLStrParser_parseDecodedStr | /**
* @param decodedURLStr : after {@link URL#decode} string
* decodedURLStr format: protocol://username:password@host:port/path?k1=v1&k2=v2
* [protocol://][username:password@][host:port]/[path][?k1=v1&k2=v2]
*/
public static URL parseDecodedStr(String decodedURLStr) {
Map<String, String> parameters = null;
int pathEndIdx = decodedURLStr.indexOf('?');
if (pathEndIdx >= 0) {
parameters = parseDecodedParams(decodedURLStr, pathEndIdx + 1);
} else {
pathEndIdx = decodedURLStr.length();
}
String decodedBody = decodedURLStr.substring(0, pathEndIdx);
return parseURLBody(decodedURLStr, decodedBody, parameters);
} | 3.68 |
MagicPlugin_Base64Coder_encodeString | /**
* Encodes a string into Base64 format.
* No blanks or line breaks are inserted.
*
* @param s A String to be encoded.
* @return A String containing the Base64 encoded data.
*/
public static String encodeString(String s) {
return new String(encode(s.getBytes(StandardCharsets.UTF_8)));
} | 3.68 |
framework_Notification_setHtmlContentAllowed | /**
* Sets whether html is allowed in the caption and description. If set to
* true, the texts are passed to the browser as html and the developer is
* responsible for ensuring no harmful html is used. If set to false, the
* texts are passed to the browser as plain text.
*
* @param htmlContentAllowed
* true if the texts are used as html, false if used as plain
* text
*/
public void setHtmlContentAllowed(boolean htmlContentAllowed) {
getState().htmlContentAllowed = htmlContentAllowed;
} | 3.68 |
flink_RocksDBIncrementalRestoreOperation_restoreWithRescaling | /**
* Recovery from multi incremental states with rescaling. For rescaling, this method creates a
* temporary RocksDB instance for a key-groups shard. All contents from the temporary instance
* are copied into the real restore instance and then the temporary instance is discarded.
*/
private void restoreWithRescaling(Collection<KeyedStateHandle> restoreStateHandles)
throws Exception {
Preconditions.checkArgument(restoreStateHandles != null && !restoreStateHandles.isEmpty());
final List<StateHandleDownloadSpec> allDownloadSpecs = new ArrayList<>();
final List<IncrementalLocalKeyedStateHandle> localKeyedStateHandles =
new ArrayList<>(restoreStateHandles.size());
final Path absolutInstanceBasePath = instanceBasePath.getAbsoluteFile().toPath();
// Prepare and collect all the download request to pull remote state to a local directory
for (KeyedStateHandle stateHandle : restoreStateHandles) {
if (stateHandle instanceof IncrementalRemoteKeyedStateHandle) {
StateHandleDownloadSpec downloadRequest =
new StateHandleDownloadSpec(
(IncrementalRemoteKeyedStateHandle) stateHandle,
absolutInstanceBasePath.resolve(UUID.randomUUID().toString()));
allDownloadSpecs.add(downloadRequest);
} else if (stateHandle instanceof IncrementalLocalKeyedStateHandle) {
localKeyedStateHandles.add((IncrementalLocalKeyedStateHandle) stateHandle);
} else {
throw unexpectedStateHandleException(
IncrementalRemoteKeyedStateHandle.class, stateHandle.getClass());
}
}
allDownloadSpecs.stream()
.map(StateHandleDownloadSpec::createLocalStateHandleForDownloadedState)
.forEach(localKeyedStateHandles::add);
// Choose the best state handle for the initial DB
final IncrementalLocalKeyedStateHandle selectedInitialHandle =
RocksDBIncrementalCheckpointUtils.chooseTheBestStateHandleForInitial(
localKeyedStateHandles, keyGroupRange, overlapFractionThreshold);
Preconditions.checkNotNull(selectedInitialHandle);
// Remove the selected handle from the list so that we don't restore it twice.
localKeyedStateHandles.remove(selectedInitialHandle);
try {
// Process all state downloads
transferRemoteStateToLocalDirectory(allDownloadSpecs);
// Init the base DB instance with the initial state
initBaseDBForRescaling(selectedInitialHandle);
// Transfer remaining key-groups from temporary instance into base DB
byte[] startKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes];
CompositeKeySerializationUtils.serializeKeyGroup(
keyGroupRange.getStartKeyGroup(), startKeyGroupPrefixBytes);
byte[] stopKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes];
CompositeKeySerializationUtils.serializeKeyGroup(
keyGroupRange.getEndKeyGroup() + 1, stopKeyGroupPrefixBytes);
// Insert all remaining state through creating temporary RocksDB instances
for (IncrementalLocalKeyedStateHandle stateHandle : localKeyedStateHandles) {
logger.info(
"Starting to restore from state handle: {} with rescaling.", stateHandle);
try (RestoredDBInstance tmpRestoreDBInfo =
restoreTempDBInstanceFromLocalState(stateHandle);
RocksDBWriteBatchWrapper writeBatchWrapper =
new RocksDBWriteBatchWrapper(
this.rocksHandle.getDb(), writeBatchSize)) {
List<ColumnFamilyDescriptor> tmpColumnFamilyDescriptors =
tmpRestoreDBInfo.columnFamilyDescriptors;
List<ColumnFamilyHandle> tmpColumnFamilyHandles =
tmpRestoreDBInfo.columnFamilyHandles;
// iterating only the requested descriptors automatically skips the default
// column
// family handle
for (int descIdx = 0; descIdx < tmpColumnFamilyDescriptors.size(); ++descIdx) {
ColumnFamilyHandle tmpColumnFamilyHandle =
tmpColumnFamilyHandles.get(descIdx);
ColumnFamilyHandle targetColumnFamilyHandle =
this.rocksHandle.getOrRegisterStateColumnFamilyHandle(
null,
tmpRestoreDBInfo.stateMetaInfoSnapshots.get(
descIdx))
.columnFamilyHandle;
try (RocksIteratorWrapper iterator =
RocksDBOperationUtils.getRocksIterator(
tmpRestoreDBInfo.db,
tmpColumnFamilyHandle,
tmpRestoreDBInfo.readOptions)) {
iterator.seek(startKeyGroupPrefixBytes);
while (iterator.isValid()) {
if (RocksDBIncrementalCheckpointUtils.beforeThePrefixBytes(
iterator.key(), stopKeyGroupPrefixBytes)) {
writeBatchWrapper.put(
targetColumnFamilyHandle,
iterator.key(),
iterator.value());
} else {
// Since the iterator will visit the record according to the
// sorted
// order,
// we can just break here.
break;
}
iterator.next();
}
} // releases native iterator resources
}
logger.info(
"Finished restoring from state handle: {} with rescaling.",
stateHandle);
}
}
} finally {
// Cleanup all download directories
allDownloadSpecs.stream()
.map(StateHandleDownloadSpec::getDownloadDestination)
.forEach(this::cleanUpPathQuietly);
}
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_helpGetBytesForKeyAndNamespace | /**
* Returns the byte arrays of serialized key and namespace.
*
* @param node the node.
* @return a tuple of byte arrays of serialized key and namespace
*/
Tuple2<byte[], byte[]> helpGetBytesForKeyAndNamespace(long node) {
Node nodeStorage = getNodeSegmentAndOffset(node);
MemorySegment segment = nodeStorage.nodeSegment;
int offsetInSegment = nodeStorage.nodeOffset;
int level = SkipListUtils.getLevel(segment, offsetInSegment);
int keyDataOffset = offsetInSegment + SkipListUtils.getKeyDataOffset(level);
return skipListKeySerializer.getSerializedKeyAndNamespace(segment, keyDataOffset);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.