name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_Configuration_fromMap | /** Creates a new configuration that is initialized with the options of the given map. */
public static Configuration fromMap(Map<String, String> map) {
final Configuration configuration = new Configuration();
map.forEach(configuration::setString);
return configuration;
} | 3.68 |
hbase_CompactionPolicy_setConf | /**
* Inform the policy that some configuration has been change, so cached value should be updated it
* any.
*/
public void setConf(Configuration conf) {
this.comConf = new CompactionConfiguration(conf, this.storeConfigInfo);
} | 3.68 |
hadoop_SysInfoWindows_getPhysicalMemorySize | /** {@inheritDoc} */
@Override
public long getPhysicalMemorySize() {
refreshIfNeeded();
return memSize;
} | 3.68 |
dubbo_SimpleReferenceCache_destroy | /**
* clear and destroy one {@link ReferenceConfigBase} in the cache.
*
* @param referenceConfig use for create key.
*/
@Override
public <T> void destroy(ReferenceConfigBase<T> referenceConfig) {
String key = generator.generateKey(referenceConfig);
Class<?> type = referenceConfig.getInterfaceClass();
destroy(key, type);
} | 3.68 |
hbase_MasterObserver_postCompletedSplitRegionAction | /**
* Called after the region is split.
* @param c the environment to interact with the framework and master
* @param regionInfoA the left daughter region
* @param regionInfoB the right daughter region
*/
default void postCompletedSplitRegionAction(final ObserverContext<MasterCoprocessorEnvironment> c,
final RegionInfo regionInfoA, final RegionInfo regionInfoB) throws IOException {
} | 3.68 |
hbase_MasterProcedureScheduler_waitPeerExclusiveLock | // ============================================================================
// Peer Locking Helpers
// ============================================================================
/**
* Try to acquire the exclusive lock on the specified peer.
* @see #wakePeerExclusiveLock(Procedure, String)
* @param procedure the procedure trying to acquire the lock
* @param peerId peer to lock
* @return true if the procedure has to wait for the peer to be available
*/
public boolean waitPeerExclusiveLock(Procedure<?> procedure, String peerId) {
schedLock();
try {
final LockAndQueue lock = locking.getPeerLock(peerId);
if (lock.tryExclusiveLock(procedure)) {
removeFromRunQueue(peerRunQueue, getPeerQueue(peerId),
() -> procedure + " held exclusive lock");
return false;
}
waitProcedure(lock, procedure);
logLockedResource(LockedResourceType.PEER, peerId);
return true;
} finally {
schedUnlock();
}
} | 3.68 |
hudi_HoodieMetadataTableValidator_validateFilesInPartition | /**
* Compare the file listing and index data between metadata table and fileSystem.
* For now, validate five kinds of apis:
* 1. HoodieMetadataFileSystemView::getLatestFileSlices
* 2. HoodieMetadataFileSystemView::getLatestBaseFiles
* 3. HoodieMetadataFileSystemView::getAllFileGroups and HoodieMetadataFileSystemView::getAllFileSlices
* 4. HoodieBackedTableMetadata::getColumnStats
* 5. HoodieBackedTableMetadata::getBloomFilters
*
* @param metadataTableBasedContext Validation context containing information based on metadata table
* @param fsBasedContext Validation context containing information based on the file system
* @param partitionPath Partition path String
* @param baseDataFilesForCleaning Base files for un-complete cleaner action
*/
private void validateFilesInPartition(
HoodieMetadataValidationContext metadataTableBasedContext,
HoodieMetadataValidationContext fsBasedContext, String partitionPath,
Set<String> baseDataFilesForCleaning) {
if (cfg.validateLatestFileSlices) {
validateLatestFileSlices(metadataTableBasedContext, fsBasedContext, partitionPath, baseDataFilesForCleaning);
}
if (cfg.validateLatestBaseFiles) {
validateLatestBaseFiles(metadataTableBasedContext, fsBasedContext, partitionPath, baseDataFilesForCleaning);
}
if (cfg.validateAllFileGroups) {
validateAllFileGroups(metadataTableBasedContext, fsBasedContext, partitionPath, baseDataFilesForCleaning);
}
if (cfg.validateAllColumnStats) {
validateAllColumnStats(metadataTableBasedContext, fsBasedContext, partitionPath, baseDataFilesForCleaning);
}
if (cfg.validateBloomFilters) {
validateBloomFilters(metadataTableBasedContext, fsBasedContext, partitionPath, baseDataFilesForCleaning);
}
} | 3.68 |
hudi_SecondaryIndexManager_drop | /**
* Drop a secondary index by index name
*
* @param metaClient Hoodie table meta client
* @param indexName The unique secondary index name
* @param ignoreIfNotExists Whether ignore drop if the specific secondary index no exists
*/
public void drop(HoodieTableMetaClient metaClient, String indexName, boolean ignoreIfNotExists) {
Option<List<HoodieSecondaryIndex>> secondaryIndexes = SecondaryIndexUtils.getSecondaryIndexes(metaClient);
if (!indexExists(secondaryIndexes, indexName, Option.empty(), Option.empty())) {
if (ignoreIfNotExists) {
return;
} else {
throw new HoodieSecondaryIndexException("Secondary index not exists: " + indexName);
}
}
List<HoodieSecondaryIndex> secondaryIndexesToKeep = secondaryIndexes.get().stream()
.filter(i -> !i.getIndexName().equals(indexName))
.sorted(new HoodieSecondaryIndex.HoodieIndexCompactor())
.collect(Collectors.toList());
if (CollectionUtils.nonEmpty(secondaryIndexesToKeep)) {
Properties updatedProps = new Properties();
updatedProps.put(HoodieTableConfig.SECONDARY_INDEXES_METADATA.key(),
SecondaryIndexUtils.toJsonString(secondaryIndexesToKeep));
HoodieTableConfig.update(metaClient.getFs(), new Path(metaClient.getMetaPath()), updatedProps);
} else {
HoodieTableConfig.delete(metaClient.getFs(), new Path(metaClient.getMetaPath()),
CollectionUtils.createSet(HoodieTableConfig.SECONDARY_INDEXES_METADATA.key()));
}
LOG.info("Success to delete secondary index metadata: {}", indexName);
// TODO: drop index data
} | 3.68 |
graphhopper_OSMReader_setArtificialWayTags | /**
* This method is called during the second pass of {@link WaySegmentParser} and provides an entry point to enrich
* the given OSM way with additional tags before it is passed on to the tag parsers.
*/
protected void setArtificialWayTags(PointList pointList, ReaderWay way, double distance, List<Map<String, Object>> nodeTags) {
way.setTag("node_tags", nodeTags);
way.setTag("edge_distance", distance);
way.setTag("point_list", pointList);
// we have to remove existing artificial tags, because we modify the way even though there can be multiple edges
// per way. sooner or later we should separate the artificial ('edge') tags from the way, see discussion here:
// https://github.com/graphhopper/graphhopper/pull/2457#discussion_r751155404
way.removeTag("country");
way.removeTag("country_rule");
way.removeTag("custom_areas");
List<CustomArea> customAreas;
if (areaIndex != null) {
double middleLat;
double middleLon;
if (pointList.size() > 2) {
middleLat = pointList.getLat(pointList.size() / 2);
middleLon = pointList.getLon(pointList.size() / 2);
} else {
double firstLat = pointList.getLat(0), firstLon = pointList.getLon(0);
double lastLat = pointList.getLat(pointList.size() - 1), lastLon = pointList.getLon(pointList.size() - 1);
middleLat = (firstLat + lastLat) / 2;
middleLon = (firstLon + lastLon) / 2;
}
customAreas = areaIndex.query(middleLat, middleLon);
} else {
customAreas = emptyList();
}
// special handling for countries: since they are built-in with GraphHopper they are always fed to the EncodingManager
Country country = Country.MISSING;
State state = State.MISSING;
double countryArea = Double.POSITIVE_INFINITY;
for (CustomArea customArea : customAreas) {
// ignore areas that aren't countries
if (customArea.getProperties() == null) continue;
String alpha2WithSubdivision = (String) customArea.getProperties().get(State.ISO_3166_2);
if (alpha2WithSubdivision == null)
continue;
// the country string must be either something like US-CA (including subdivision) or just DE
String[] strs = alpha2WithSubdivision.split("-");
if (strs.length == 0 || strs.length > 2)
throw new IllegalStateException("Invalid alpha2: " + alpha2WithSubdivision);
Country c = Country.find(strs[0]);
if (c == null)
throw new IllegalStateException("Unknown country: " + strs[0]);
if (
// countries with subdivision overrule those without subdivision as well as bigger ones with subdivision
strs.length == 2 && (state == State.MISSING || customArea.getArea() < countryArea)
// countries without subdivision only overrule bigger ones without subdivision
|| strs.length == 1 && (state == State.MISSING && customArea.getArea() < countryArea)) {
country = c;
state = State.find(alpha2WithSubdivision);
countryArea = customArea.getArea();
}
}
way.setTag("country", country);
way.setTag("country_state", state);
if (countryRuleFactory != null) {
CountryRule countryRule = countryRuleFactory.getCountryRule(country);
if (countryRule != null)
way.setTag("country_rule", countryRule);
}
// also add all custom areas as artificial tag
way.setTag("custom_areas", customAreas);
} | 3.68 |
querydsl_AbstractFetchableMongodbQuery_iterate | /**
* Iterate with the specific fields
*
* @param paths fields to return
* @return iterator
*/
public CloseableIterator<K> iterate(Path<?>... paths) {
getQueryMixin().setProjection(paths);
return iterate();
} | 3.68 |
flink_DefaultRollingPolicy_builder | /**
* Creates a new {@link PolicyBuilder} that is used to configure and build an instance of {@code
* DefaultRollingPolicy}.
*/
public static DefaultRollingPolicy.PolicyBuilder builder() {
return new DefaultRollingPolicy.PolicyBuilder(
DEFAULT_MAX_PART_SIZE, DEFAULT_ROLLOVER_INTERVAL, DEFAULT_INACTIVITY_INTERVAL);
} | 3.68 |
dubbo_AbstractConfigurator_isV27ConditionMatchOrUnset | /**
* Check if v2.7 configurator rule is set and can be matched.
*
* @param url the configurator rule url
* @return true if v2.7 configurator rule is not set or the rule can be matched.
*/
private boolean isV27ConditionMatchOrUnset(URL url) {
String providers = configuratorUrl.getParameter(OVERRIDE_PROVIDERS_KEY);
if (StringUtils.isNotEmpty(providers)) {
boolean match = false;
String[] providerAddresses = providers.split(CommonConstants.COMMA_SEPARATOR);
for (String address : providerAddresses) {
if (address.equals(url.getAddress())
|| address.equals(ANYHOST_VALUE)
|| address.equals(ANYHOST_VALUE + CommonConstants.GROUP_CHAR_SEPARATOR + ANY_VALUE)
|| address.equals(ANYHOST_VALUE + CommonConstants.GROUP_CHAR_SEPARATOR + url.getPort())
|| address.equals(url.getHost())) {
match = true;
}
}
if (!match) {
logger.debug("Cannot apply configurator rule, provider address mismatch, current address "
+ url.getAddress() + ", address in rule is " + providers);
return false;
}
}
String configApplication = configuratorUrl.getApplication(configuratorUrl.getUsername());
String currentApplication = url.getApplication(url.getUsername());
if (configApplication != null
&& !ANY_VALUE.equals(configApplication)
&& !configApplication.equals(currentApplication)) {
logger.debug("Cannot apply configurator rule, application name mismatch, current application is "
+ currentApplication + ", application in rule is " + configApplication);
return false;
}
String configServiceKey = configuratorUrl.getServiceKey();
String currentServiceKey = url.getServiceKey();
if (!ANY_VALUE.equals(configServiceKey) && !configServiceKey.equals(currentServiceKey)) {
logger.debug("Cannot apply configurator rule, service mismatch, current service is " + currentServiceKey
+ ", service in rule is " + configServiceKey);
return false;
}
return true;
} | 3.68 |
dubbo_ReferenceBeanManager_transformName | // convert reference name/alias to referenceBeanName
private String transformName(String referenceBeanNameOrAlias) {
return referenceAliasMap.getOrDefault(referenceBeanNameOrAlias, referenceBeanNameOrAlias);
} | 3.68 |
flink_DeweyNumber_isCompatibleWith | /**
* Checks whether this dewey number is compatible to the other dewey number.
*
* <p>True iff this contains other as a prefix or iff they differ only in the last digit whereas
* the last digit of this is greater than the last digit of other.
*
* @param other The other dewey number to check compatibility against
* @return Whether this dewey number is compatible to the other dewey number
*/
public boolean isCompatibleWith(DeweyNumber other) {
if (length() > other.length()) {
// prefix case
for (int i = 0; i < other.length(); i++) {
if (other.deweyNumber[i] != deweyNumber[i]) {
return false;
}
}
return true;
} else if (length() == other.length()) {
// check init digits for equality
int lastIndex = length() - 1;
for (int i = 0; i < lastIndex; i++) {
if (other.deweyNumber[i] != deweyNumber[i]) {
return false;
}
}
// check that the last digit is greater or equal
return deweyNumber[lastIndex] >= other.deweyNumber[lastIndex];
} else {
return false;
}
} | 3.68 |
hadoop_AzureBlobFileSystemStore_rename | /**
* Rename a file or directory.
* If a source etag is passed in, the operation will attempt to recover
* from a missing source file by probing the destination for
* existence and comparing etags.
* @param source path to source file
* @param destination destination of rename.
* @param tracingContext trace context
* @param sourceEtag etag of source file. may be null or empty
* @throws AzureBlobFileSystemException failure, excluding any recovery from overload failures.
* @return true if recovery was needed and succeeded.
*/
public boolean rename(final Path source,
final Path destination,
final TracingContext tracingContext,
final String sourceEtag) throws
AzureBlobFileSystemException {
final Instant startAggregate = abfsPerfTracker.getLatencyInstant();
long countAggregate = 0;
boolean shouldContinue;
if (isAtomicRenameKey(source.getName())) {
LOG.warn("The atomic rename feature is not supported by the ABFS scheme; however rename,"
+" create and delete operations are atomic if Namespace is enabled for your Azure Storage account.");
}
LOG.debug("renameAsync filesystem: {} source: {} destination: {}",
client.getFileSystem(),
source,
destination);
String continuation = null;
String sourceRelativePath = getRelativePath(source);
String destinationRelativePath = getRelativePath(destination);
// was any operation recovered from?
boolean recovered = false;
do {
try (AbfsPerfInfo perfInfo = startTracking("rename", "renamePath")) {
boolean isNamespaceEnabled = getIsNamespaceEnabled(tracingContext);
final AbfsClientRenameResult abfsClientRenameResult =
client.renamePath(sourceRelativePath, destinationRelativePath,
continuation, tracingContext, sourceEtag, false,
isNamespaceEnabled);
AbfsRestOperation op = abfsClientRenameResult.getOp();
perfInfo.registerResult(op.getResult());
continuation = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_CONTINUATION);
perfInfo.registerSuccess(true);
countAggregate++;
shouldContinue = continuation != null && !continuation.isEmpty();
// update the recovery flag.
recovered |= abfsClientRenameResult.isRenameRecovered();
populateRenameRecoveryStatistics(abfsClientRenameResult);
if (!shouldContinue) {
perfInfo.registerAggregates(startAggregate, countAggregate);
}
}
} while (shouldContinue);
return recovered;
} | 3.68 |
zxing_BitSource_readBits | /**
* @param numBits number of bits to read
* @return int representing the bits read. The bits will appear as the least-significant
* bits of the int
* @throws IllegalArgumentException if numBits isn't in [1,32] or more than is available
*/
public int readBits(int numBits) {
if (numBits < 1 || numBits > 32 || numBits > available()) {
throw new IllegalArgumentException(String.valueOf(numBits));
}
int result = 0;
// First, read remainder from current byte
if (bitOffset > 0) {
int bitsLeft = 8 - bitOffset;
int toRead = Math.min(numBits, bitsLeft);
int bitsToNotRead = bitsLeft - toRead;
int mask = (0xFF >> (8 - toRead)) << bitsToNotRead;
result = (bytes[byteOffset] & mask) >> bitsToNotRead;
numBits -= toRead;
bitOffset += toRead;
if (bitOffset == 8) {
bitOffset = 0;
byteOffset++;
}
}
// Next read whole bytes
if (numBits > 0) {
while (numBits >= 8) {
result = (result << 8) | (bytes[byteOffset] & 0xFF);
byteOffset++;
numBits -= 8;
}
// Finally read a partial byte
if (numBits > 0) {
int bitsToNotRead = 8 - numBits;
int mask = (0xFF >> bitsToNotRead) << bitsToNotRead;
result = (result << numBits) | ((bytes[byteOffset] & mask) >> bitsToNotRead);
bitOffset += numBits;
}
}
return result;
} | 3.68 |
hadoop_QueueCapacityConfigParser_isCapacityVectorFormat | /**
* Checks whether the given capacity string is in a capacity vector compatible
* format.
* @param configuredCapacity capacity string
* @return true, if capacity string is in capacity vector format,
* false otherwise
*/
public boolean isCapacityVectorFormat(String configuredCapacity) {
if (configuredCapacity == null) {
return false;
}
String formattedCapacityString = configuredCapacity.replaceAll(" ", "");
return RESOURCE_PATTERN.matcher(formattedCapacityString).find();
} | 3.68 |
hbase_Tag_getValueAsLong | /**
* Converts the value bytes of the given tag into a long value
* @param tag The Tag
* @return value as long
*/
public static long getValueAsLong(Tag tag) {
if (tag.hasArray()) {
return Bytes.toLong(tag.getValueArray(), tag.getValueOffset(), tag.getValueLength());
}
return ByteBufferUtils.toLong(tag.getValueByteBuffer(), tag.getValueOffset());
} | 3.68 |
pulsar_SchemaBuilder_record | /**
* Build the schema for a record.
*
* @param name name of the record.
* @return builder to build the schema for a record.
*/
static RecordSchemaBuilder record(String name) {
return DefaultImplementation.getDefaultImplementation().newRecordSchemaBuilder(name);
} | 3.68 |
hbase_ExportSnapshot_openSourceFile | /**
* Try to open the "source" file. Throws an IOException if the communication with the inputFs
* fail or if the file is not found.
*/
private FSDataInputStream openSourceFile(Context context, final SnapshotFileInfo fileInfo)
throws IOException {
try {
Configuration conf = context.getConfiguration();
FileLink link = null;
switch (fileInfo.getType()) {
case HFILE:
Path inputPath = new Path(fileInfo.getHfile());
link = getFileLink(inputPath, conf);
break;
case WAL:
String serverName = fileInfo.getWalServer();
String logName = fileInfo.getWalName();
link = new WALLink(inputRoot, serverName, logName);
break;
default:
throw new IOException("Invalid File Type: " + fileInfo.getType().toString());
}
return link.open(inputFs);
} catch (IOException e) {
context.getCounter(Counter.MISSING_FILES).increment(1);
LOG.error("Unable to open source file=" + fileInfo.toString(), e);
throw e;
}
} | 3.68 |
hudi_MetadataConversionUtils_convertCommitMetadataToJsonBytes | /**
* Convert commit metadata from avro to json.
*/
public static <T extends SpecificRecordBase> byte[] convertCommitMetadataToJsonBytes(T avroMetaData, Class<T> clazz) {
Schema avroSchema = clazz == org.apache.hudi.avro.model.HoodieReplaceCommitMetadata.class ? org.apache.hudi.avro.model.HoodieReplaceCommitMetadata.getClassSchema() :
org.apache.hudi.avro.model.HoodieCommitMetadata.getClassSchema();
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
JsonEncoder jsonEncoder = new JsonEncoder(avroSchema, outputStream);
DatumWriter<GenericRecord> writer = avroMetaData instanceof SpecificRecord ? new SpecificDatumWriter<>(avroSchema) : new GenericDatumWriter<>(avroSchema);
writer.write(avroMetaData, jsonEncoder);
jsonEncoder.flush();
return outputStream.toByteArray();
} catch (IOException e) {
throw new HoodieIOException("Failed to convert to JSON.", e);
}
} | 3.68 |
AreaShop_FileManager_getSchematicFolder | /**
* Get the folder where schematics are stored.
* @return The folder where schematics are stored
*/
public String getSchematicFolder() {
return schemFolder;
} | 3.68 |
hadoop_OBSDataBlocks_getInputStream | /**
* InputStream backed by the internal byte array.
*
* @return input stream
*/
ByteArrayInputStream getInputStream() {
ByteArrayInputStream bin = new ByteArrayInputStream(this.buf, 0,
count);
this.reset();
this.buf = null;
return bin;
} | 3.68 |
framework_RpcDataProviderExtension_columnsAdded | /**
* Informs this data provider that given columns have been added to grid.
*
* @param addedColumns
* a list of added columns
*/
public void columnsAdded(List<Column> addedColumns) {
for (GridValueChangeListener l : activeItemHandler
.getValueChangeListeners()) {
l.addColumns(addedColumns);
}
// Resend all rows to contain new data.
refreshCache();
} | 3.68 |
hbase_LogEventHandler_clearNamedQueue | /**
* Cleans up queues maintained by services.
* @param namedQueueEvent type of queue to clear
* @return true if queue is cleaned up, false otherwise
*/
boolean clearNamedQueue(NamedQueuePayload.NamedQueueEvent namedQueueEvent) {
return namedQueueServices.get(namedQueueEvent).clearNamedQueue();
} | 3.68 |
flink_AbstractSqlCallContext_getLiteralValueAs | /** Bridges to {@link ValueLiteralExpression#getValueAs(Class)}. */
@SuppressWarnings("unchecked")
protected static <T> T getLiteralValueAs(LiteralValueAccessor accessor, Class<T> clazz) {
Preconditions.checkArgument(!clazz.isPrimitive());
Object convertedValue = null;
if (clazz == Duration.class) {
final long longVal = accessor.getValueAs(Long.class);
convertedValue = Duration.ofMillis(longVal);
} else if (clazz == Period.class) {
final long longVal = accessor.getValueAs(Long.class);
if (longVal <= Integer.MAX_VALUE && longVal >= Integer.MIN_VALUE) {
convertedValue = Period.ofMonths((int) longVal);
}
} else if (clazz == java.time.LocalDate.class) {
final DateString dateString = accessor.getValueAs(DateString.class);
convertedValue = java.time.LocalDate.parse(dateString.toString());
} else if (clazz == java.time.LocalTime.class) {
final TimeString timeString = accessor.getValueAs(TimeString.class);
convertedValue = java.time.LocalTime.parse(timeString.toString());
} else if (clazz == java.time.LocalDateTime.class) {
final TimestampString timestampString = accessor.getValueAs(TimestampString.class);
convertedValue =
java.time.LocalDateTime.parse(timestampString.toString().replace(' ', 'T'));
} else if (clazz == java.time.Instant.class) {
// timestamp string is in UTC, convert back to an instant
final TimestampString timestampString = accessor.getValueAs(TimestampString.class);
convertedValue =
java.time.LocalDateTime.parse(timestampString.toString().replace(' ', 'T'))
.atOffset(ZoneOffset.UTC)
.toInstant();
}
if (convertedValue != null) {
return (T) convertedValue;
}
return accessor.getValueAs(clazz);
} | 3.68 |
hbase_ConnectionOverAsyncConnection_createThreadPool | // only used for executing coprocessor calls, as users may reference the methods in the
// BlockingInterface of the protobuf stub so we have to execute the call in a separated thread...
// Will be removed in 4.0.0 along with the deprecated coprocessor methods in Table and Admin
// interface.
private ThreadPoolExecutor createThreadPool() {
Configuration conf = conn.getConfiguration();
int threads = conf.getInt("hbase.hconnection.threads.max", 256);
long keepAliveTime = conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
BlockingQueue<Runnable> workQueue =
new LinkedBlockingQueue<>(threads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
ThreadPoolExecutor tpe = new ThreadPoolExecutor(threads, threads, keepAliveTime,
TimeUnit.SECONDS, workQueue,
new ThreadFactoryBuilder().setDaemon(true).setNameFormat(toString() + "-shared-%d").build());
tpe.allowCoreThreadTimeOut(true);
return tpe;
} | 3.68 |
pulsar_ManagedCursorImpl_updateLastMarkDeleteEntryToLatest | // update lastMarkDeleteEntry field if newPosition is later than the current lastMarkDeleteEntry.newPosition
private void updateLastMarkDeleteEntryToLatest(final PositionImpl newPosition,
final Map<String, Long> properties) {
LAST_MARK_DELETE_ENTRY_UPDATER.updateAndGet(this, last -> {
if (last != null && last.newPosition.compareTo(newPosition) > 0) {
// keep current value, don't update
return last;
} else {
// use given properties or when missing, use the properties from the previous field value
Map<String, Long> propertiesToUse =
properties != null ? properties : (last != null ? last.properties : Collections.emptyMap());
return new MarkDeleteEntry(newPosition, propertiesToUse, null, null);
}
});
} | 3.68 |
graphhopper_VectorTile_setFloatValue | /**
* <code>optional float float_value = 2;</code>
*/
public Builder setFloatValue(float value) {
bitField0_ |= 0x00000002;
floatValue_ = value;
onChanged();
return this;
} | 3.68 |
zxing_BarcodeRow_set | /**
* Sets a specific location in the bar
*
* @param x The location in the bar
* @param black Black if true, white if false;
*/
private void set(int x, boolean black) {
row[x] = (byte) (black ? 1 : 0);
} | 3.68 |
hadoop_DataNodeVolumeMetrics_getWriteIoSampleCount | // Based on writeIoRate
public long getWriteIoSampleCount() {
return writeIoRate.lastStat().numSamples();
} | 3.68 |
flink_Deadline_now | /**
* Constructs a {@link Deadline} that has now as the deadline. Use this and then extend via
* {@link #plus(Duration)} to specify a deadline in the future.
*/
public static Deadline now() {
return new Deadline(System.nanoTime(), SystemClock.getInstance());
} | 3.68 |
dubbo_DubboProtocol_buildReferenceCountExchangeClientList | /**
* Bulk build client
*
* @param url
* @param connectNum
* @return
*/
private List<ReferenceCountExchangeClient> buildReferenceCountExchangeClientList(URL url, int connectNum) {
List<ReferenceCountExchangeClient> clients = new ArrayList<>();
for (int i = 0; i < connectNum; i++) {
clients.add(buildReferenceCountExchangeClient(url));
}
return clients;
} | 3.68 |
hbase_Query_setAuthorizations | /**
* Sets the authorizations to be used by this Query
*/
public Query setAuthorizations(Authorizations authorizations) {
this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY,
ProtobufUtil.toAuthorizations(authorizations).toByteArray());
return this;
} | 3.68 |
framework_VaadinPortletSession_firePortletRenderRequest | /**
* For internal use by the framework only - API subject to change.
*/
public void firePortletRenderRequest(UI uI, RenderRequest request,
RenderResponse response) {
for (PortletListener l : new ArrayList<>(portletListeners)) {
l.handleRenderRequest(request,
new RestrictedRenderResponse(response), uI);
}
} | 3.68 |
flink_MailboxProcessor_sendControlMail | /**
* Sends the given <code>mail</code> using {@link TaskMailbox#putFirst(Mail)} . Intended use is
* to control this <code>MailboxProcessor</code>; no interaction with tasks should be performed;
*/
private void sendControlMail(
RunnableWithException mail, String descriptionFormat, Object... descriptionArgs) {
mailbox.putFirst(
new Mail(
mail,
Integer.MAX_VALUE /*not used with putFirst*/,
descriptionFormat,
descriptionArgs));
} | 3.68 |
querydsl_BeanPath_instanceOf | /**
* Create an {@code this instanceOf type} expression
*
* @param <B>
* @param type rhs of the expression
* @return instanceof expression
*/
public <B extends T> BooleanExpression instanceOf(Class<B> type) {
return Expressions.booleanOperation(Ops.INSTANCE_OF, pathMixin, ConstantImpl.create(type));
} | 3.68 |
hadoop_FederationStateStoreFacade_generateStateStoreFacade | /**
* Generate the singleton instance of the FederationStateStoreFacade object.
*
* @param conf configuration.
*/
private static void generateStateStoreFacade(Configuration conf){
if (facade == null) {
synchronized (FederationStateStoreFacade.class) {
if (facade == null) {
Configuration yarnConf = new Configuration();
if (conf != null) {
yarnConf = conf;
}
facade = new FederationStateStoreFacade(yarnConf);
}
}
}
} | 3.68 |
hadoop_S3ARemoteObject_getPath | /**
* Gets the path corresponding to the given s3Attributes.
*
* @param s3Attributes attributes of an S3 object.
* @return the path corresponding to the given s3Attributes.
*/
public static String getPath(S3ObjectAttributes s3Attributes) {
return String.format("s3a://%s/%s", s3Attributes.getBucket(),
s3Attributes.getKey());
} | 3.68 |
framework_VScrollTable_fireFooterClickedEvent | /**
* Fires a footer click event after the user has clicked a column footer
* cell
*
* @param event
* The click event
*/
private void fireFooterClickedEvent(Event event) {
if (client.hasEventListeners(VScrollTable.this,
TableConstants.FOOTER_CLICK_EVENT_ID)) {
MouseEventDetails details = MouseEventDetailsBuilder
.buildMouseEventDetails(event);
client.updateVariable(paintableId, "footerClickEvent",
details.toString(), false);
client.updateVariable(paintableId, "footerClickCID", cid, true);
}
} | 3.68 |
hadoop_IOStatisticsBinding_aggregateMinimums | /**
* Aggregate two minimum values.
* @param l left
* @param r right
* @return the new minimum.
*/
public static Long aggregateMinimums(Long l, Long r) {
if (l == MIN_UNSET_VALUE) {
return r;
} else if (r == MIN_UNSET_VALUE) {
return l;
} else {
return Math.min(l, r);
}
} | 3.68 |
flink_SourcePredicates_isJavaClass | /**
* Checks whether the given {@link JavaClass} is actually a Java class, and not a Scala class.
*
* <p>ArchUnit does not yet fully support Scala. Rules should ensure that they restrict
* themselves to only Java classes for correct results.
*/
static boolean isJavaClass(JavaClass clazz) {
if (!clazz.getSource().isPresent()) {
return false;
}
final Source source = clazz.getSource().get();
if (!source.getFileName().isPresent()) {
return false;
}
return source.getFileName().get().contains(".java");
} | 3.68 |
hbase_BlockIOUtils_annotateBytesRead | /**
* Conditionally annotate {@code attributesBuilder} with appropriate attributes when values are
* non-zero.
*/
private static void annotateBytesRead(AttributesBuilder attributesBuilder, long directBytesRead,
long heapBytesRead) {
if (directBytesRead > 0) {
attributesBuilder.put(DIRECT_BYTES_READ_KEY, directBytesRead);
}
if (heapBytesRead > 0) {
attributesBuilder.put(HEAP_BYTES_READ_KEY, heapBytesRead);
}
} | 3.68 |
MagicPlugin_BaseSpell_castMessage | /**
* Send a message to a player when a spell is cast.
*
* @param message The message to send
*/
@Override
public void castMessage(String message)
{
Wand activeWand = mage.getActiveWand();
// First check wand
if (!loud && activeWand != null && !activeWand.showCastMessages()) return;
if (!quiet && canSendMessage() && message != null && message.length() > 0)
{
if (currentCast != null) {
message = currentCast.parameterize(message);
}
mage.castMessage(message);
lastMessageSent = System.currentTimeMillis();
}
} | 3.68 |
flink_BinarySegmentUtils_getBoolean | /**
* get boolean from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static boolean getBoolean(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 1)) {
return segments[0].getBoolean(offset);
} else {
return getBooleanMultiSegments(segments, offset);
}
} | 3.68 |
hadoop_AuthenticationHandlerUtil_checkAuthScheme | /**
* This method checks if the specified HTTP authentication <code>scheme</code>
* value is valid.
*
* @param scheme HTTP authentication scheme to be checked
* @return Canonical representation of HTTP authentication scheme
* @throws IllegalArgumentException In case the specified value is not a valid
* HTTP authentication scheme.
*/
public static String checkAuthScheme(String scheme) {
if (BASIC.equalsIgnoreCase(scheme)) {
return BASIC;
} else if (NEGOTIATE.equalsIgnoreCase(scheme)) {
return NEGOTIATE;
} else if (DIGEST.equalsIgnoreCase(scheme)) {
return DIGEST;
}
throw new IllegalArgumentException(String.format(
"Unsupported HTTP authentication scheme %s ."
+ " Supported schemes are [%s, %s, %s]", scheme, BASIC, NEGOTIATE,
DIGEST));
} | 3.68 |
Activiti_SimpleContext_getELResolver | /**
* Get our resolver. Lazy initialize to a {@link SimpleResolver} if necessary.
*/
@Override
public ELResolver getELResolver() {
if (resolver == null) {
resolver = new SimpleResolver();
}
return resolver;
} | 3.68 |
flink_FlinkContainers_stop | /** Stops all containers. */
public void stop() {
isStarted = false;
if (restClusterClient != null) {
restClusterClient.close();
}
this.taskManagers.forEach(GenericContainer::stop);
deleteJobManagerTemporaryFiles();
this.jobManager.stop();
if (this.haService != null) {
this.haService.stop();
}
} | 3.68 |
framework_BeanValidationBinder_findBeanType | /**
* Finds the bean type containing the property the given definition refers
* to.
*
* @param beanType
* the root beanType
* @param definition
* the definition for the property
* @return the bean type containing the given property
*/
@SuppressWarnings({ "rawtypes" })
private Class<?> findBeanType(Class<BEAN> beanType,
PropertyDefinition<BEAN, ?> definition) {
if (definition instanceof NestedBeanPropertyDefinition) {
return ((NestedBeanPropertyDefinition) definition).getParent()
.getType();
} else {
// Non nested properties must be defined in the main type
return beanType;
}
} | 3.68 |
hadoop_FederationStateStoreFacade_deleteApplicationHomeSubCluster | /**
* Delete the mapping of home {@code SubClusterId} of a previously submitted
* {@code ApplicationId}. Currently response is empty if the operation is
* successful, if not an exception reporting reason for a failure.
*
* @param applicationId the application to delete the home sub-cluster of
* @throws YarnException if the request is invalid/fails
*/
public void deleteApplicationHomeSubCluster(ApplicationId applicationId)
throws YarnException {
stateStore.deleteApplicationHomeSubCluster(
DeleteApplicationHomeSubClusterRequest.newInstance(applicationId));
} | 3.68 |
hadoop_OBSCommonUtils_blockRootDelete | /**
* Reject any request to delete an object where the key is root.
*
* @param bucket bucket name
* @param key key to validate
* @throws InvalidRequestException if the request was rejected due to a
* mistaken attempt to delete the root
* directory.
*/
static void blockRootDelete(final String bucket, final String key)
throws InvalidRequestException {
if (key.isEmpty() || "/".equals(key)) {
throw new InvalidRequestException(
"Bucket " + bucket + " cannot be deleted");
}
} | 3.68 |
hbase_MasterCoprocessorHost_postRollBackMergeRegionsAction | /**
* Invoked after rollback merge regions operation
* @param regionsToMerge the regions to merge
* @param user the user
*/
public void postRollBackMergeRegionsAction(final RegionInfo[] regionsToMerge, final User user)
throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) {
@Override
public void call(MasterObserver observer) throws IOException {
observer.postRollBackMergeRegionsAction(this, regionsToMerge);
}
});
} | 3.68 |
hibernate-validator_ValidationBootstrapParameters_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
flink_CheckpointStatsCache_tryAdd | /**
* Try to add the checkpoint to the cache.
*
* @param checkpoint Checkpoint to be added.
*/
public void tryAdd(AbstractCheckpointStats checkpoint) {
// Don't add in progress checkpoints as they will be replaced by their
// completed/failed version eventually.
if (cache != null && checkpoint != null && !checkpoint.getStatus().isInProgress()) {
cache.put(checkpoint.getCheckpointId(), checkpoint);
}
} | 3.68 |
hibernate-validator_ConstrainedExecutable_hasParameterConstraints | /**
* Whether this executable has at least one cascaded parameter or at least one
* parameter with constraints or at least one cross-parameter constraint.
*
* @return {@code True}, if this executable is parameter-constrained by any
* means, {@code false} otherwise.
*/
public boolean hasParameterConstraints() {
return hasParameterConstraints;
} | 3.68 |
flink_ChangelogMode_insertOnly | /** Shortcut for a simple {@link RowKind#INSERT}-only changelog. */
public static ChangelogMode insertOnly() {
return INSERT_ONLY;
} | 3.68 |
morf_AbstractSqlDialectTest_testAddColumnWithDefault | /**
* Test adding a column with default value.
*/
@Test
public void testAddColumnWithDefault() {
testAlterTableColumn(AlterationType.ADD, column("floatField_new", DataType.DECIMAL, 6, 3).nullable().defaultValue("20.33"), expectedAlterTableAddColumnWithDefaultStatement());
} | 3.68 |
framework_ComplexRenderer_setContentVisible | /**
* Used by Grid to toggle whether to show actual data or just an empty
* placeholder while data is loading. This method is invoked whenever a cell
* changes between data being available and data missing.
* <p>
* Default implementation hides content by setting visibility: hidden to all
* elements inside the cell. Text nodes are left as is - renderers that add
* such to the root element need to implement explicit support hiding them.
*
* @param cell
* The cell
* @param hasData
* Has the cell content been loaded from the data source
*
*/
public void setContentVisible(RendererCellReference cell, boolean hasData) {
Element cellElement = cell.getElement();
for (int n = 0; n < cellElement.getChildCount(); n++) {
Node node = cellElement.getChild(n);
if (Element.is(node)) {
Element e = Element.as(node);
if (hasData) {
e.getStyle().clearVisibility();
} else {
e.getStyle().setVisibility(Visibility.HIDDEN);
}
}
}
} | 3.68 |
querydsl_GeometryExpression_geometryType | /**
* Returns the name of the instantiable subtype of Geometry of which this
* geometric object is an instantiable member. The name of the subtype of Geometry is returned as a string.
*
* @return geometry type
*/
public StringExpression geometryType() {
if (geometryType == null) {
geometryType = Expressions.stringOperation(SpatialOps.GEOMETRY_TYPE, mixin);
}
return geometryType;
} | 3.68 |
hbase_KeyValue_getOffset | /** Returns Offset into {@link #getBuffer()} at which this KeyValue starts. */
public int getOffset() {
return this.offset;
} | 3.68 |
hbase_ByteBufferUtils_arePartsEqual | /**
* Check whether two parts in the same buffer are equal.
* @param buffer In which buffer there are parts
* @param offsetLeft Beginning of first part.
* @param lengthLeft Length of the first part.
* @param offsetRight Beginning of the second part.
* @param lengthRight Length of the second part.
* @return True if equal
*/
public static boolean arePartsEqual(ByteBuffer buffer, int offsetLeft, int lengthLeft,
int offsetRight, int lengthRight) {
if (lengthLeft != lengthRight) {
return false;
}
if (buffer.hasArray()) {
return 0 == Bytes.compareTo(buffer.array(), buffer.arrayOffset() + offsetLeft, lengthLeft,
buffer.array(), buffer.arrayOffset() + offsetRight, lengthRight);
}
for (int i = 0; i < lengthRight; ++i) {
if (buffer.get(offsetLeft + i) != buffer.get(offsetRight + i)) {
return false;
}
}
return true;
} | 3.68 |
framework_GridLayout_getColumnExpandRatio | /**
* Returns the expand ratio of given column.
*
* @see #setColumnExpandRatio(int, float)
*
* @param columnIndex
* The column index, starting from 0 for the leftmost row.
* @return the expand ratio, 0.0f by default
*/
public float getColumnExpandRatio(int columnIndex) {
Float r = columnExpandRatio.get(columnIndex);
return r == null ? 0 : r.floatValue();
} | 3.68 |
hbase_LocalHBaseCluster_waitOnRegionServer | /**
* Wait for the specified region server to stop. Removes this thread from list of running threads.
* @return Name of region server that just went down.
*/
public String waitOnRegionServer(JVMClusterUtil.RegionServerThread rst) {
boolean interrupted = false;
while (rst.isAlive()) {
try {
LOG.info("Waiting on " + rst.getRegionServer().toString());
rst.join();
} catch (InterruptedException e) {
LOG.error("Interrupted while waiting for {} to finish. Retrying join", rst.getName(), e);
interrupted = true;
}
}
regionThreads.remove(rst);
if (interrupted) {
Thread.currentThread().interrupt();
}
return rst.getName();
} | 3.68 |
hadoop_SaslParticipant_createClientSaslParticipant | /**
* Creates a SaslParticipant wrapping a SaslClient.
*
* @param userName SASL user name
* @param saslProps properties of SASL negotiation
* @param callbackHandler for handling all SASL callbacks
* @return SaslParticipant wrapping SaslClient
* @throws SaslException for any error
*/
public static SaslParticipant createClientSaslParticipant(String userName,
Map<String, String> saslProps, CallbackHandler callbackHandler)
throws SaslException {
initializeSaslClientFactory();
return new SaslParticipant(
saslClientFactory.createSaslClient(new String[] {MECHANISM}, userName,
PROTOCOL, SERVER_NAME, saslProps, callbackHandler));
} | 3.68 |
flink_DeserializationSchema_open | /**
* Initialization method for the schema. It is called before the actual working methods {@link
* #deserialize} and thus suitable for one time setup work.
*
* <p>The provided {@link InitializationContext} can be used to access additional features such
* as e.g. registering user metrics.
*
* @param context Contextual information that can be used during initialization.
*/
@PublicEvolving
default void open(InitializationContext context) throws Exception {} | 3.68 |
hbase_QuotaTableUtil_makeQuotaSnapshotGetForTable | /**
* Creates a {@link Get} which returns only {@link SpaceQuotaSnapshot} from the quota table for a
* specific table.
* @param tn table name to get from. Can't be null.
*/
public static Get makeQuotaSnapshotGetForTable(TableName tn) {
Get g = new Get(getTableRowKey(tn));
// Limit to "u:v" column
g.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY);
return g;
} | 3.68 |
flink_HiveParserTypeCheckCtx_getInputRR | /** @return the inputRR */
public HiveParserRowResolver getInputRR() {
return inputRR;
} | 3.68 |
pulsar_LinuxInfoUtils_isLinux | /**
* Determine whether the OS is the linux kernel.
* @return Whether the OS is the linux kernel
*/
public static boolean isLinux() {
return SystemUtils.IS_OS_LINUX;
} | 3.68 |
hadoop_RecordCreatorFactory_setTtl | /**
* Set the TTL value for the records created by the factory.
*
* @param ttl the ttl value, in seconds.
*/
public static void setTtl(long ttl) {
RecordCreatorFactory.ttl = ttl;
} | 3.68 |
framework_Table_setMultiSelectTouchDetectionEnabled | /**
* Default behavior on touch-reporting devices is to switch from CTRL/SHIFT
* based multi-selection to simple mode, but you can use this method to
* explicitly disable the touch device detection. Thus you can keep using
* keyboard-based multi selection on hybrid devices that have both a touch
* screen and a keyboard.
*
* @param multiSelectTouchDetectionEnabled
* Whether to enable or disable touch screen detection
*/
public void setMultiSelectTouchDetectionEnabled(
boolean multiSelectTouchDetectionEnabled) {
this.multiSelectTouchDetectionEnabled = multiSelectTouchDetectionEnabled;
markAsDirty();
} | 3.68 |
shardingsphere-elasticjob_ElasticJobExecutorService_getWorkQueueSize | /**
* Get work queue size.
*
* @return work queue size
*/
public int getWorkQueueSize() {
return workQueue.size();
} | 3.68 |
framework_ConnectorMap_isConnector | /**
* Tests if the widget is the root widget of a {@link ComponentConnector}.
*
* @param widget
* The widget to test
* @return true if the widget is the root widget of a
* {@link ComponentConnector}, false otherwise
*/
public boolean isConnector(Widget widget) {
return getConnectorId(widget.getElement()) != null;
} | 3.68 |
hibernate-validator_SizeValidatorForArraysOfShort_isValid | /**
* Checks the number of entries in an array.
*
* @param array The array to validate.
* @param constraintValidatorContext context in which the constraint is evaluated.
*
* @return Returns {@code true} if the array is {@code null} or the number of entries in
* {@code array} is between the specified {@code min} and {@code max} values (inclusive),
* {@code false} otherwise.
*/
@Override
public boolean isValid(short[] array, ConstraintValidatorContext constraintValidatorContext) {
if ( array == null ) {
return true;
}
return array.length >= min && array.length <= max;
} | 3.68 |
pulsar_OwnershipCache_disableOwnership | /**
* Disable bundle in local cache and on zk.
* @Deprecated This is a dangerous method which is currently only used for test, it will occupy the ZK thread.
* Please switch to your own thread after calling this method.
*/
@Deprecated
public CompletableFuture<Void> disableOwnership(NamespaceBundle bundle) {
return updateBundleState(bundle, false)
.thenCompose(__ -> {
ResourceLock<NamespaceEphemeralData> lock = locallyAcquiredLocks.get(bundle);
if (lock == null) {
return CompletableFuture.completedFuture(null);
} else {
return lock.updateValue(selfOwnerInfoDisabled);
}
});
} | 3.68 |
hudi_Triple_toString | /**
* <p>
* Formats the receiver using the given format.
* </p>
*
* <p>
* This uses {@link java.util.Formattable} to perform the formatting. Three variables may be used to embed the left
* and right elements. Use {@code %1$s} for the left element, {@code %2$s} for the middle and {@code %3$s} for the
* right element. The default format used by {@code toString()} is {@code (%1$s,%2$s,%3$s)}.
* </p>
*
* @param format the format string, optionally containing {@code %1$s}, {@code %2$s} and {@code %3$s}, not null
* @return the formatted string, not null
*/
public String toString(final String format) {
return String.format(format, getLeft(), getMiddle(), getRight());
} | 3.68 |
hbase_ResponseConverter_buildException | /** Returns NameValuePair of the exception name to stringified version os exception. */
public static NameBytesPair buildException(final Throwable t) {
NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder();
parameterBuilder.setName(t.getClass().getName());
parameterBuilder.setValue(ByteString.copyFromUtf8(StringUtils.stringifyException(t)));
return parameterBuilder.build();
} | 3.68 |
pulsar_CmdRead_run | /**
* Run the read command.
*
* @return 0 for success, < 0 otherwise
*/
public int run() throws PulsarClientException, IOException {
if (mainOptions.size() != 1) {
throw (new ParameterException("Please provide one and only one topic name."));
}
if (this.numMessagesToRead < 0) {
throw (new ParameterException("Number of messages should be zero or positive."));
}
String topic = this.mainOptions.get(0);
if (this.serviceURL.startsWith("ws")) {
return readFromWebSocket(topic);
} else {
return read(topic);
}
} | 3.68 |
querydsl_AbstractCollQuery_leftJoin | /**
* Define a left join from the Map typed path to the alias
*
* @param <P> type of expression
* @param target target of the join
* @param alias alias for the joint target
* @return current object
*/
public <P> Q leftJoin(MapExpression<?,P> target, Path<P> alias) {
getMetadata().addJoin(JoinType.LEFTJOIN, createAlias(target, alias));
return queryMixin.getSelf();
} | 3.68 |
pulsar_MetadataStoreFactory_create | /**
* Create a new {@link MetadataStore} instance based on the given configuration.
*
* @param metadataURL
* the metadataStore URL
* @param metadataStoreConfig
* the configuration object
* @return a new {@link MetadataStore} instance
* @throws IOException
* if the metadata store initialization fails
*/
public static MetadataStore create(String metadataURL, MetadataStoreConfig metadataStoreConfig)
throws MetadataStoreException {
return MetadataStoreFactoryImpl.create(metadataURL, metadataStoreConfig);
} | 3.68 |
framework_MenuBarTooltipsNearEdge_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Menu item tooltips should not abscure other menu items";
} | 3.68 |
flink_AbstractKubernetesStepDecorator_decorateFlinkPod | /**
* Apply transformations on the given FlinkPod in accordance to this feature. Note that we
* should return a FlinkPod that keeps all of the properties of the passed FlinkPod object.
*
* <p>So this is correct:
*
* <pre>{@code
* Pod decoratedPod = new PodBuilder(pod) // Keeps the original state
* ...
* .build()
*
* Container decoratedContainer = new ContainerBuilder(container) // Keeps the original state
* ...
* .build()
*
* FlinkPod decoratedFlinkPod = new FlinkPodBuilder(flinkPod) // Keeps the original state
* ...
* .build()
*
* }</pre>
*
* <p>And this is the incorrect:
*
* <pre>{@code
* Pod decoratedPod = new PodBuilder() // Loses the original state
* ...
* .build()
*
* Container decoratedContainer = new ContainerBuilder() // Loses the original state
* ...
* .build()
*
* FlinkPod decoratedFlinkPod = new FlinkPodBuilder() // Loses the original state
* ...
* .build()
*
* }</pre>
*/
@Override
public FlinkPod decorateFlinkPod(FlinkPod flinkPod) {
return flinkPod;
} | 3.68 |
flink_AvroParquetRecordFormat_restoreReader | /**
* Restores the reader from a checkpointed position. It is in fact identical since only {@link
* CheckpointedPosition#NO_OFFSET} as the {@code restoredOffset} is support.
*/
@Override
public Reader<E> restoreReader(
Configuration config,
FSDataInputStream stream,
long restoredOffset,
long fileLen,
long splitEnd)
throws IOException {
// current version does not support splitting.
checkNotSplit(fileLen, splitEnd);
checkArgument(
restoredOffset == CheckpointedPosition.NO_OFFSET,
"The restoredOffset should always be NO_OFFSET");
return createReader(config, stream, fileLen, splitEnd);
} | 3.68 |
flink_OperatorChain_getOperatorRecordsOutCounter | /**
* Get the numRecordsOut counter for the operator represented by the given config. And re-use
* the operator-level counter for the task-level numRecordsOut counter if this operator is at
* the end of the operator chain.
*
* <p>Return null if we should not use the numRecordsOut counter to track the records emitted by
* this operator.
*/
@Nullable
private Counter getOperatorRecordsOutCounter(
StreamTask<?, ?> containingTask, StreamConfig operatorConfig) {
ClassLoader userCodeClassloader = containingTask.getUserCodeClassLoader();
Class<StreamOperatorFactory<?>> streamOperatorFactoryClass =
operatorConfig.getStreamOperatorFactoryClass(userCodeClassloader);
// Do not use the numRecordsOut counter on output if this operator is SinkWriterOperator.
//
// Metric "numRecordsOut" is defined as the total number of records written to the
// external system in FLIP-33, but this metric is occupied in AbstractStreamOperator as the
// number of records sent to downstream operators, which is number of Committable batches
// sent to SinkCommitter. So we skip registering this metric on output and leave this metric
// to sink writer implementations to report.
try {
Class<?> sinkWriterFactoryClass =
userCodeClassloader.loadClass(SinkWriterOperatorFactory.class.getName());
if (sinkWriterFactoryClass.isAssignableFrom(streamOperatorFactoryClass)) {
return null;
}
} catch (ClassNotFoundException e) {
throw new StreamTaskException(
"Could not load SinkWriterOperatorFactory class from userCodeClassloader.", e);
}
InternalOperatorMetricGroup operatorMetricGroup =
containingTask
.getEnvironment()
.getMetricGroup()
.getOrAddOperator(
operatorConfig.getOperatorID(), operatorConfig.getOperatorName());
return operatorMetricGroup.getIOMetricGroup().getNumRecordsOutCounter();
} | 3.68 |
framework_Table_setCurrentPageFirstItemIndex | /**
* Setter for property currentPageFirstItem.
*
* @param newIndex
* the New value of property currentPageFirstItem.
*/
public void setCurrentPageFirstItemIndex(int newIndex) {
setCurrentPageFirstItemIndex(newIndex, true);
} | 3.68 |
flink_MemorySegment_getShortBigEndian | /**
* Reads a short integer value (16 bit, 2 bytes) from the given position, in big-endian byte
* order. This method's speed depends on the system's native byte order, and it is possibly
* slower than {@link #getShort(int)}. For most cases (such as transient storage in memory or
* serialization for I/O and network), it suffices to know that the byte order in which the
* value is written is the same as the one in which it is read, and {@link #getShort(int)} is
* the preferable choice.
*
* @param index The position from which the value will be read.
* @return The short value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
public short getShortBigEndian(int index) {
if (LITTLE_ENDIAN) {
return Short.reverseBytes(getShort(index));
} else {
return getShort(index);
}
} | 3.68 |
hbase_TableInputFormatBase_setRowFilter | /**
* Allows subclasses to set the {@link Filter} to be used.
*/
protected void setRowFilter(Filter rowFilter) {
this.rowFilter = rowFilter;
} | 3.68 |
dubbo_SlidingWindow_isPaneDeprecated | /**
* Checks if the specified pane is deprecated at the specified timestamp.
*
* @param timeMillis the specified time.
* @param pane the specified pane.
* @return true if the pane is deprecated; otherwise false.
*/
public boolean isPaneDeprecated(long timeMillis, final Pane<T> pane) {
// the pane is '[)'
return (timeMillis - pane.getStartInMs()) > intervalInMs;
} | 3.68 |
hadoop_DiskBalancerWorkStatus_getPlanFile | /**
* Returns planFile.
*
* @return String
*/
public String getPlanFile() {
return planFile;
} | 3.68 |
hbase_ChaosAgent_createIfZNodeNotExists | /**
* Checks if given ZNode exists, if not creates a PERSISTENT ZNODE for same.
* @param path Path to check for ZNode
*/
private void createIfZNodeNotExists(String path) {
try {
if (zk.exists(path, false) == null) {
createZNode(path, new byte[0]);
}
} catch (KeeperException | InterruptedException e) {
LOG.error("Error checking given node : " + path + " " + e);
}
} | 3.68 |
framework_VAbstractPopupCalendar_updateValue | /**
* Changes the current date, and updates the
* {@link VDateField#bufferedResolutions}, possibly
* {@link VDateField#sendBufferedValues()} to the server if needed.
*
* @param newDate
* the new {@code Date} to update
*/
@SuppressWarnings("deprecation")
public void updateValue(Date newDate) {
Date currentDate = getCurrentDate();
R resolution = getCurrentResolution();
if (currentDate == null || newDate.getTime() != currentDate.getTime()) {
setCurrentDate((Date) newDate.clone());
bufferedResolutions.put(calendar.getResolution(calendar::isYear),
newDate.getYear() + 1900);
if (!calendar.isYear(resolution)) {
bufferedResolutions.put(
calendar.getResolution(calendar::isMonth),
newDate.getMonth() + 1);
if (!calendar.isMonth(resolution)) {
bufferedResolutions.put(
calendar.getResolution(calendar::isDay),
newDate.getDate());
}
}
}
} | 3.68 |
hadoop_TFile_createScannerByByteRange | /**
* Get a scanner that covers a portion of TFile based on byte offsets.
*
* @param offset
* The beginning byte offset in the TFile.
* @param length
* The length of the region.
* @return The actual coverage of the returned scanner tries to match the
* specified byte-region but always round up to the compression
* block boundaries. It is possible that the returned scanner
* contains zero key-value pairs even if length is positive.
* @throws IOException raised on errors performing I/O.
*/
public Scanner createScannerByByteRange(long offset, long length) throws IOException {
return new Scanner(this, offset, offset + length);
} | 3.68 |
querydsl_DateTimeExpression_currentTimestamp | /**
* Create an expression representing the current time instant as a DateTimeExpression instance
*
* @return current timestamp
*/
public static <T extends Comparable> DateTimeExpression<T> currentTimestamp(Class<T> cl) {
return Expressions.dateTimeOperation(cl, Ops.DateTimeOps.CURRENT_TIMESTAMP);
} | 3.68 |
hbase_BlockCacheKey_heapSize | /**
* Strings have two bytes per character due to default Java Unicode encoding (hence length times
* 2).
*/
@Override
public long heapSize() {
return ClassSize.align(FIXED_OVERHEAD + ClassSize.STRING + 2 * hfileName.length());
} | 3.68 |
flink_IOManager_deleteChannel | /**
* Deletes the file underlying the given channel. If the channel is still open, this call may
* fail.
*
* @param channel The channel to be deleted.
*/
public static void deleteChannel(ID channel) {
if (channel != null) {
if (channel.getPathFile().exists() && !channel.getPathFile().delete()) {
LOG.warn("IOManager failed to delete temporary file {}", channel.getPath());
}
}
} | 3.68 |
flink_DeclarativeSlotManager_close | /**
* Closes the slot manager.
*
* @throws Exception if the close operation fails
*/
@Override
public void close() throws Exception {
LOG.info("Closing the slot manager.");
suspend();
} | 3.68 |
flink_DefaultConfigurableOptionsFactory_setLogFileNum | /**
* The maximum number of files RocksDB should keep for logging.
*
* @param logFileNum number of files to keep
* @return this options factory
*/
public DefaultConfigurableOptionsFactory setLogFileNum(int logFileNum) {
Preconditions.checkArgument(
logFileNum > 0, "Invalid configuration: Must keep at least one log file.");
configuredOptions.put(LOG_FILE_NUM.key(), String.valueOf(logFileNum));
return this;
} | 3.68 |
aws-saas-boost_KeycloakUserDataAccessLayer_updateUserRepresentation | // VisibleForTesting
static UserRepresentation updateUserRepresentation(SystemUser user, UserRepresentation keycloakUser) {
if (user != null) {
keycloakUser.setUsername(user.getUsername());
keycloakUser.setFirstName(user.getFirstName());
keycloakUser.setLastName(user.getLastName());
keycloakUser.setEnabled(user.getActive());
keycloakUser.setEmail(user.getEmail());
keycloakUser.setEmailVerified(Boolean.TRUE.equals(user.getEmailVerified()));
if (user.getCreated() == null) {
keycloakUser.setCreatedTimestamp(LocalDateTime.now().toInstant(ZoneOffset.UTC).toEpochMilli());
} else {
keycloakUser.setCreatedTimestamp(user.getCreated().toInstant(ZoneOffset.UTC).toEpochMilli());
}
// TODO should we attempt to map Cognito UserStatusType to Keycloak Required Actions?
if ("FORCE_CHANGE_PASSWORD".equals(user.getStatus())) {
keycloakUser.setRequiredActions(List.of("UPDATE_PASSWORD"));
}
}
return keycloakUser;
} | 3.68 |
flink_BinarySegmentUtils_setByte | /**
* set byte from segments.
*
* @param segments target segments.
* @param offset value offset.
*/
public static void setByte(MemorySegment[] segments, int offset, byte value) {
if (inFirstSegment(segments, offset, 1)) {
segments[0].put(offset, value);
} else {
setByteMultiSegments(segments, offset, value);
}
} | 3.68 |
hbase_ChoreService_printChoreServiceDetails | /** Prints a summary of important details about the service. Used for debugging purposes */
private void printChoreServiceDetails(final String header) {
if (!LOG.isTraceEnabled()) {
return;
}
LinkedHashMap<String, String> output = new LinkedHashMap<>();
output.put(header, "");
output.put("ChoreService corePoolSize: ", Integer.toString(getCorePoolSize()));
output.put("ChoreService scheduledChores: ", Integer.toString(getNumberOfScheduledChores()));
output.put("ChoreService missingStartTimeCount: ",
Integer.toString(getNumberOfChoresMissingStartTime()));
for (Entry<String, String> entry : output.entrySet()) {
LOG.trace(entry.getKey() + entry.getValue());
}
} | 3.68 |
hbase_CacheConfig_shouldPrefetchOnOpen | /** Returns true if blocks should be prefetched into the cache on open, false if not */
public boolean shouldPrefetchOnOpen() {
return this.prefetchOnOpen;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.