name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
druid_IPRange_getExtendedNetworkPrefix | /**
* Return the extended extended network prefix.
*
* @return Return the extended network prefix.
*/
public final int getExtendedNetworkPrefix() {
return extendedNetworkPrefix;
} | 3.68 |
framework_HierarchicalContainer_getChildren | /*
* Gets the IDs of the children of the specified Item. Don't add a JavaDoc
* comment here, we use the default documentation from implemented
* interface.
*/
@Override
public Collection<?> getChildren(Object itemId) {
LinkedList<Object> c;
if (filteredChildren != null) {
c = filteredChildren.get(itemId);
} else {
c = children.get(itemId);
}
if (c == null) {
return null;
}
return Collections.unmodifiableCollection(c);
} | 3.68 |
pulsar_LeastResourceUsageWithWeight_selectBroker | /**
* Find a suitable broker to assign the given bundle to.
* This method is not thread safety.
*
* @param candidates The candidates for which the bundle may be assigned.
* @param bundleToAssign The data for the bundle to assign.
* @param loadData The load data from the leader broker.
* @param conf The service configuration.
* @return The name of the selected broker as it appears on ZooKeeper.
*/
@Override
public synchronized Optional<String> selectBroker(Set<String> candidates, BundleData bundleToAssign,
LoadData loadData,
ServiceConfiguration conf) {
if (candidates.isEmpty()) {
log.info("There are no available brokers as candidates at this point for bundle: {}", bundleToAssign);
return Optional.empty();
}
bestBrokers.clear();
// Maintain of list of all the best scoring brokers and then randomly
// select one of them at the end.
double totalUsage = 0.0d;
for (String broker : candidates) {
BrokerData brokerData = loadData.getBrokerData().get(broker);
double usageWithWeight = getMaxResourceUsageWithWeight(broker, brokerData, conf);
totalUsage += usageWithWeight;
}
final double avgUsage = totalUsage / candidates.size();
final double diffThreshold =
conf.getLoadBalancerAverageResourceUsageDifferenceThresholdPercentage() / 100.0;
candidates.forEach(broker -> {
Double avgResUsage = brokerAvgResourceUsageWithWeight.getOrDefault(broker, MAX_RESOURCE_USAGE);
if ((avgResUsage + diffThreshold <= avgUsage)) {
bestBrokers.add(broker);
}
});
if (bestBrokers.isEmpty()) {
// Assign randomly as all brokers are overloaded.
log.warn("Assign randomly as all {} brokers are overloaded.", candidates.size());
bestBrokers.addAll(candidates);
}
if (log.isDebugEnabled()) {
log.debug("Selected {} best brokers: {} from candidate brokers: {}", bestBrokers.size(), bestBrokers,
candidates);
}
return Optional.of(bestBrokers.get(ThreadLocalRandom.current().nextInt(bestBrokers.size())));
} | 3.68 |
framework_Page_getPage | /**
* Gets the page in which the uri has changed.
*
* @return the page in which the uri has changed
*/
public Page getPage() {
return (Page) getSource();
} | 3.68 |
flink_CheckpointStorageWorkerView_toFileMergingStorage | /**
* Return {@link org.apache.flink.runtime.state.filesystem.FsMergingCheckpointStorageAccess} if
* file merging is enabled Otherwise, return itself. File merging is supported by subclasses of
* {@link org.apache.flink.runtime.state.filesystem.AbstractFsCheckpointStorageAccess}.
*/
default CheckpointStorageWorkerView toFileMergingStorage(
FileMergingSnapshotManager mergingSnapshotManager, Environment environment)
throws IOException {
return this;
} | 3.68 |
morf_CreateDeployedViews_execute | /**
* @see org.alfasoftware.morf.upgrade.UpgradeStep#execute(org.alfasoftware.morf.upgrade.SchemaEditor, org.alfasoftware.morf.upgrade.DataEditor)
*/
@Override
public void execute(SchemaEditor schema, DataEditor data) {
schema.addTable(table("DeployedViews").
columns(
column("name", DataType.STRING, 30).primaryKey(),
column("hash", DataType.STRING, 64)
));
} | 3.68 |
hadoop_LoggingAuditor_serviceInit | /**
* Service init, look for jobID and attach as an attribute in log entries.
* This is where the warning span is created, so the relevant attributes
* (and filtering options) are applied.
* @param conf configuration
* @throws Exception failure
*/
@Override
protected void serviceInit(final Configuration conf) throws Exception {
super.serviceInit(conf);
rejectOutOfSpan = conf.getBoolean(
REJECT_OUT_OF_SPAN_OPERATIONS, false);
// attach the job ID if there is one in the configuration used
// to create this file.
String jobID = extractJobID(conf);
if (jobID != null) {
addAttribute(AuditConstants.PARAM_JOB_ID, jobID);
}
headerEnabled = getConfig().getBoolean(REFERRER_HEADER_ENABLED,
REFERRER_HEADER_ENABLED_DEFAULT);
filters = conf.getTrimmedStringCollection(REFERRER_HEADER_FILTER);
final CommonAuditContext currentContext = currentAuditContext();
warningSpan = new WarningSpan(OUTSIDE_SPAN,
currentContext, createSpanID(), null, null);
isMultipartUploadEnabled = conf.getBoolean(MULTIPART_UPLOADS_ENABLED,
DEFAULT_MULTIPART_UPLOAD_ENABLED);
} | 3.68 |
framework_CalendarTargetDetails_getDropTime | /**
* @return the date where the drop happened
*/
public Date getDropTime() {
if (hasDropTime) {
return (Date) getData("dropTime");
} else {
return (Date) getData("dropDay");
}
} | 3.68 |
hadoop_BlockStorageMovementNeeded_markScanCompleted | /**
* Mark directory scan is completed.
*/
public synchronized void markScanCompleted() {
this.fullyScanned = true;
} | 3.68 |
graphhopper_BikeCommonPriorityParser_handlePriority | /**
* In this method we prefer cycleways or roads with designated bike access and avoid big roads
* or roads with trams or pedestrian.
*
* @return new priority based on priorityFromRelation and on the tags in ReaderWay.
*/
int handlePriority(ReaderWay way, double wayTypeSpeed, Integer priorityFromRelation) {
TreeMap<Double, PriorityCode> weightToPrioMap = new TreeMap<>();
if (priorityFromRelation == null)
weightToPrioMap.put(0d, UNCHANGED);
else
weightToPrioMap.put(110d, PriorityCode.valueOf(priorityFromRelation));
collect(way, wayTypeSpeed, weightToPrioMap);
// pick priority with biggest order value
return weightToPrioMap.lastEntry().getValue().getValue();
} | 3.68 |
flink_MiniCluster_terminateTaskManager | /**
* Terminates a TaskManager with the given index.
*
* <p>See {@link #startTaskManager()} to understand how TaskManagers are indexed. This method
* terminates a TaskManager with a given index but it does not clear the index. The index stays
* occupied for the lifetime of the MiniCluster and its TaskManager stays terminated. The index
* is not reused if more TaskManagers are started with {@link #startTaskManager()}.
*
* @param index index of the TaskManager to terminate
* @return {@link CompletableFuture} of the given TaskManager termination
*/
public CompletableFuture<Void> terminateTaskManager(int index) {
synchronized (lock) {
final TaskExecutor taskExecutor = taskManagers.get(index);
return taskExecutor.closeAsync();
}
} | 3.68 |
hbase_SnapshotReferenceUtil_verifyStoreFile | /**
* Verify the validity of the snapshot store file
* @param conf The current {@link Configuration} instance.
* @param fs {@link FileSystem}
* @param snapshotDir {@link Path} to the Snapshot directory of the snapshot to verify
* @param snapshot the {@link SnapshotDescription} of the snapshot to verify
* @param regionInfo {@link RegionInfo} of the region that contains the store file
* @param family family that contains the store file
* @param storeFile the store file to verify
* @throws CorruptedSnapshotException if the snapshot is corrupted
* @throws IOException if an error occurred while scanning the directory
*/
public static void verifyStoreFile(final Configuration conf, final FileSystem fs,
final Path snapshotDir, final SnapshotDescription snapshot, final RegionInfo regionInfo,
final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
TableName table = TableName.valueOf(snapshot.getTable());
String fileName = storeFile.getName();
Path refPath = null;
if (StoreFileInfo.isReference(fileName)) {
// If is a reference file check if the parent file is present in the snapshot
refPath = new Path(new Path(regionInfo.getEncodedName(), family), fileName);
refPath = StoreFileInfo.getReferredToFile(refPath);
String refRegion = refPath.getParent().getParent().getName();
refPath = HFileLink.createPath(table, refRegion, family, refPath.getName());
if (!HFileLink.buildFromHFileLinkPattern(conf, refPath).exists(fs)) {
throw new CorruptedSnapshotException(
"Missing parent hfile for: " + fileName + " path=" + refPath,
ProtobufUtil.createSnapshotDesc(snapshot));
}
if (storeFile.hasReference()) {
// We don't really need to look for the file on-disk
// we already have the Reference information embedded here.
return;
}
}
Path linkPath;
if (refPath != null && HFileLink.isHFileLink(refPath)) {
linkPath = new Path(family, refPath.getName());
} else if (HFileLink.isHFileLink(fileName)) {
linkPath = new Path(family, fileName);
} else {
linkPath = new Path(family,
HFileLink.createHFileLinkName(table, regionInfo.getEncodedName(), fileName));
}
// check if the linked file exists (in the archive, or in the table dir)
HFileLink link = null;
if (MobUtils.isMobRegionInfo(regionInfo)) {
// for mob region
link = HFileLink.buildFromHFileLinkPattern(MobUtils.getQualifiedMobRootDir(conf),
HFileArchiveUtil.getArchivePath(conf), linkPath);
} else {
// not mob region
link = HFileLink.buildFromHFileLinkPattern(conf, linkPath);
}
try {
FileStatus fstat = link.getFileStatus(fs);
if (storeFile.hasFileSize() && storeFile.getFileSize() != fstat.getLen()) {
String msg = "hfile: " + fileName + " size does not match with the expected one. "
+ " found=" + fstat.getLen() + " expected=" + storeFile.getFileSize();
LOG.error(msg);
throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot));
}
} catch (FileNotFoundException e) {
String msg = "Can't find hfile: " + fileName + " in the real (" + link.getOriginPath()
+ ") or archive (" + link.getArchivePath() + ") directory for the primary table.";
LOG.error(msg);
throw new CorruptedSnapshotException(msg, ProtobufUtil.createSnapshotDesc(snapshot));
}
} | 3.68 |
hbase_HFileReaderImpl_midKey | /**
* @return Midkey for this file. We work with block boundaries only so returned midkey is an
* approximation only.
*/
@Override
public Optional<Cell> midKey() throws IOException {
return Optional.ofNullable(dataBlockIndexReader.midkey(this));
} | 3.68 |
framework_GenericFontIcon_hashCode | /*
* (non-Javadoc)
*
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + codePoint;
result = prime * result
+ ((fontFamily == null) ? 0 : fontFamily.hashCode());
return result;
} | 3.68 |
hadoop_Utils_compareTo | /**
* Compare this version with another version.
*/
@Override
public int compareTo(Version that) {
if (major != that.major) {
return major - that.major;
}
return minor - that.minor;
} | 3.68 |
flink_TimestampsAndWatermarksTransformation_getInputType | /** Returns the {@code TypeInformation} for the elements of the input. */
public TypeInformation<IN> getInputType() {
return input.getOutputType();
} | 3.68 |
hadoop_QueuePriorityContainerCandidateSelector_isQueueSatisfied | /**
* Do we allow the demanding queue preempt resource from other queues?
* A satisfied queue is not allowed to preempt resource from other queues.
* @param demandingQueue
* @return allowed/not
*/
private boolean isQueueSatisfied(String demandingQueue,
String partition) {
TempQueuePerPartition tq = preemptionContext.getQueueByPartition(
demandingQueue, partition);
if (null == tq) {
return false;
}
Resource guaranteed = tq.getGuaranteed();
Resource usedDeductReservd = Resources.subtract(tq.getUsed(),
tq.getReserved());
Resource markedToPreemptFromOtherQueue = toPreemptedFromOtherQueues.get(
demandingQueue, partition);
if (null == markedToPreemptFromOtherQueue) {
markedToPreemptFromOtherQueue = Resources.none();
}
// return Used - reserved + to-preempt-from-other-queue >= guaranteed
boolean flag = Resources.greaterThanOrEqual(rc, clusterResource,
Resources.add(usedDeductReservd, markedToPreemptFromOtherQueue),
guaranteed);
return flag;
} | 3.68 |
hbase_ServerMetrics_getVersion | /** Returns the string type version of a regionserver. */
default String getVersion() {
return "0.0.0";
} | 3.68 |
hbase_FSTableDescriptors_getTableDir | /**
* Return the table directory in HDFS
*/
private Path getTableDir(TableName tableName) {
return CommonFSUtils.getTableDir(rootdir, tableName);
} | 3.68 |
framework_VLoadingIndicator_setSecondDelay | /**
* Sets the delay (in ms) which must pass before the loading indicator moves
* to its "second" state.
*
* @param secondDelay
* The delay (in ms) until the loading indicator moves into its
* "second" state. Counted from when {@link #trigger()} is
* called.
*/
public void setSecondDelay(int secondDelay) {
this.secondDelay = secondDelay;
} | 3.68 |
hadoop_AMRMProxyService_stopApplication | /**
* Shuts down the request processing pipeline for the specified application
* attempt id.
*
* @param applicationId application id
*/
protected void stopApplication(ApplicationId applicationId) {
this.metrics.incrRequestCount();
Preconditions.checkArgument(applicationId != null, "applicationId is null");
RequestInterceptorChainWrapper pipeline =
this.applPipelineMap.remove(applicationId);
boolean isStopSuccess = true;
long startTime = clock.getTime();
if (pipeline == null) {
LOG.info("No interceptor pipeline for application {},"
+ " likely because its AM is not run in this node.", applicationId);
isStopSuccess = false;
} else {
// Remove the appAttempt in AMRMTokenSecretManager
this.secretManager.applicationMasterFinished(pipeline.getApplicationAttemptId());
LOG.info("Stopping the request processing pipeline for application: {}.", applicationId);
try {
pipeline.getRootInterceptor().shutdown();
} catch (Throwable ex) {
LOG.warn("Failed to shutdown the request processing pipeline for app: {}.",
applicationId, ex);
isStopSuccess = false;
}
// Remove the app context from NMSS after the interceptors are shutdown
if (this.nmContext.getNMStateStore() != null) {
try {
this.nmContext.getNMStateStore()
.removeAMRMProxyAppContext(pipeline.getApplicationAttemptId());
} catch (IOException e) {
LOG.error("Error removing AMRMProxy application context for {}.",
applicationId, e);
isStopSuccess = false;
}
}
}
if (isStopSuccess) {
long endTime = clock.getTime();
this.metrics.succeededAppStopRequests(endTime - startTime);
} else {
this.metrics.incrFailedAppStopRequests();
}
} | 3.68 |
hadoop_ActiveAuditManagerS3A_onExecutionFailure | /**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void onExecutionFailure(Context.FailedExecution context,
ExecutionAttributes executionAttributes) {
span.onExecutionFailure(context, executionAttributes);
} | 3.68 |
hbase_Get_setTimeRange | /**
* Get versions of columns only within the specified timestamp range, [minStamp, maxStamp).
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
* @return this for invocation chaining
*/
public Get setTimeRange(long minStamp, long maxStamp) throws IOException {
tr = TimeRange.between(minStamp, maxStamp);
return this;
} | 3.68 |
flink_DynamicSourceUtils_createRequiredMetadataColumns | /**
* Returns a list of required metadata columns. Ordered by the iteration order of {@link
* SupportsReadingMetadata#listReadableMetadata()}.
*
* <p>This method assumes that source and schema have been validated via {@link
* #prepareDynamicSource(String, ResolvedCatalogTable, DynamicTableSource, boolean,
* ReadableConfig, List)}.
*/
public static List<MetadataColumn> createRequiredMetadataColumns(
ResolvedSchema schema, DynamicTableSource source) {
final Map<String, MetadataColumn> metadataKeysToMetadataColumns =
createMetadataKeysToMetadataColumnsMap(schema);
final Map<String, DataType> metadataMap = extractMetadataMap(source);
// reorder the column
return metadataMap.keySet().stream()
.filter(metadataKeysToMetadataColumns::containsKey)
.map(metadataKeysToMetadataColumns::get)
.collect(Collectors.toList());
} | 3.68 |
AreaShop_Utils_getDurationFromSecondsOrString | /**
* Get setting from config that could be only a number indicating seconds.
* or a string indicating a duration string
* @param path Path of the setting to read
* @return milliseconds that the setting indicates
*/
public static long getDurationFromSecondsOrString(String path) {
if(config.isLong(path) || config.isInt(path)) {
long setting = config.getLong(path);
if(setting != -1) {
setting *= 1000;
}
return setting;
} else {
return durationStringToLong(config.getString(path));
}
} | 3.68 |
framework_VCalendarAction_getActionStartDate | /**
* Get the date and time when the action starts.
*
* @return
*/
public Date getActionStartDate() {
return actionStartDate;
} | 3.68 |
open-banking-gateway_QueryHeadersMapperTemplate_forExecution | /**
* Converts context object into object that can be used for ASPSP API call.
* @param context Context to convert
* @return Object that can be used with {@code Xs2aAdapter} to perform ASPSP API calls
*/
public ValidatedQueryHeaders<Q, H> forExecution(C context) {
return new ValidatedQueryHeaders<>(
toQuery.map(context),
toHeaders.map(context)
);
} | 3.68 |
hadoop_FindOptions_getMinDepth | /**
* Returns the minimum depth for applying expressions.
*
* @return min depth
*/
public int getMinDepth() {
return this.minDepth;
} | 3.68 |
hbase_HFileBlockIndex_getNonRootIndexedKey | /**
* The indexed key at the ith position in the nonRootIndex. The position starts at 0.
* @param i the ith position
* @return The indexed key at the ith position in the nonRootIndex.
*/
static byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) {
int numEntries = nonRootIndex.getInt(0);
if (i < 0 || i >= numEntries) {
return null;
}
// Entries start after the number of entries and the secondary index.
// The secondary index takes numEntries + 1 ints.
int entriesOffset = Bytes.SIZEOF_INT * (numEntries + 2);
// Targetkey's offset relative to the end of secondary index
int targetKeyRelOffset = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 1));
// The offset of the target key in the blockIndex buffer
int targetKeyOffset = entriesOffset // Skip secondary index
+ targetKeyRelOffset // Skip all entries until mid
+ SECONDARY_INDEX_ENTRY_OVERHEAD; // Skip offset and on-disk-size
// We subtract the two consecutive secondary index elements, which
// gives us the size of the whole (offset, onDiskSize, key) tuple. We
// then need to subtract the overhead of offset and onDiskSize.
int targetKeyLength = nonRootIndex.getInt(Bytes.SIZEOF_INT * (i + 2)) - targetKeyRelOffset
- SECONDARY_INDEX_ENTRY_OVERHEAD;
// TODO check whether we can make BB backed Cell here? So can avoid bytes copy.
return nonRootIndex.toBytes(targetKeyOffset, targetKeyLength);
} | 3.68 |
AreaShop_SetteleportCommand_canUse | /**
* Check if a person can set the teleport location of the region.
* @param person The person to check
* @param region The region to check for
* @return true if the person can set the teleport location, otherwise false
*/
public static boolean canUse(CommandSender person, GeneralRegion region) {
if(!(person instanceof Player)) {
return false;
}
Player player = (Player)person;
return player.hasPermission("areashop.setteleportall")
|| region.isOwner(player) && player.hasPermission("areashop.setteleport");
} | 3.68 |
flink_CoGroupedStreams_equalTo | /**
* Specifies a {@link KeySelector} for elements from the second input with explicit type
* information for the key type.
*
* @param keySelector The KeySelector to be used for extracting the key for partitioning.
* @param keyType The type information describing the key type.
*/
public EqualTo equalTo(KeySelector<T2, KEY> keySelector, TypeInformation<KEY> keyType) {
Preconditions.checkNotNull(keySelector);
Preconditions.checkNotNull(keyType);
if (!keyType.equals(this.keyType)) {
throw new IllegalArgumentException(
"The keys for the two inputs are not equal: "
+ "first key = "
+ this.keyType
+ " , second key = "
+ keyType);
}
return new EqualTo(input2.clean(keySelector));
} | 3.68 |
hadoop_PeriodicService_getErrorCount | /**
* Get how many times we failed to run the periodic service.
*
* @return Times we failed to run the periodic service.
*/
protected long getErrorCount() {
return this.errorCount;
} | 3.68 |
hbase_StoreScanner_getEstimatedNumberOfKvsScanned | /** Returns The estimated number of KVs seen by this scanner (includes some skipped KVs). */
public long getEstimatedNumberOfKvsScanned() {
return this.kvsScanned;
} | 3.68 |
framework_TableScroll_initProperties | // set up the properties (columns)
private void initProperties(Table table) {
for (int i = 0; i < COLUMNS; i++) {
table.addContainerProperty("property" + i, String.class,
"some value");
}
} | 3.68 |
hbase_Scan_hasFilter | /** Returns true is a filter has been specified, false if not */
public boolean hasFilter() {
return filter != null;
} | 3.68 |
framework_AbstractRemoteDataSource_setRow | /**
* A method for the data source to update the row data.
*
* @param row
* the updated row object
*/
public void setRow(final T row) {
this.row = row;
assert getRowKey(row).equals(key) : "The old key does not "
+ "equal the new key for the given row (old: " + key
+ ", new :" + getRowKey(row) + ")";
} | 3.68 |
flink_BlobServer_deletePermanent | /**
* Delete the uploaded data with the given {@link JobID} and {@link PermanentBlobKey}.
*
* @param jobId ID of the job this blob belongs to
* @param key the key of this blob
*/
@Override
public boolean deletePermanent(JobID jobId, PermanentBlobKey key) {
return deleteInternal(jobId, key);
} | 3.68 |
MagicPlugin_MagicController_initialize | /*
* Saving and loading
*/
public void initialize() {
warpController = new WarpController(this);
kitController = new KitController(this);
crafting = new CraftingController(this);
mobs = new MobController(this);
items = new ItemController(this);
enchanting = new EnchantingController(this);
anvil = new AnvilController(this);
blockController = new BlockController(this);
hangingController = new HangingController(this);
entityController = new EntityController(this);
playerController = new PlayerController(this);
inventoryController = new InventoryController(this);
explosionController = new ExplosionController(this);
requirementsController = new RequirementsController(this);
worldController = new WorldController(this);
arenaController = new ArenaController(this);
arenaController.start();
if (CompatibilityLib.hasStatistics() && !CompatibilityLib.hasJumpEvent()) {
jumpController = new JumpController(this);
}
File examplesFolder = new File(getPlugin().getDataFolder(), "examples");
examplesFolder.mkdirs();
File urlMapFile = getDataFile(URL_MAPS_FILE);
File imageCache = new File(dataFolder, "imagemapcache");
imageCache.mkdirs();
maps = new MapController(this, urlMapFile, imageCache);
// Initialize EffectLib.
if (com.elmakers.mine.bukkit.effect.EffectPlayer.initialize(plugin, getLogger())) {
getLogger().info("EffectLib initialized");
} else {
getLogger().warning("Failed to initialize EffectLib");
}
// Pre-create schematic folder
File magicSchematicFolder = new File(plugin.getDataFolder(), "schematics");
magicSchematicFolder.mkdirs();
// One-time migration of legacy configurations
migrateConfig("enchanting", "paths");
migrateConfig("automata", "blocks");
migrateDataFile("automata", "blocks");
// Ready to load
load();
resourcePacks.startResourcePackChecks();
} | 3.68 |
flink_StreamTaskActionExecutor_synchronizedExecutor | /** Returns an ExecutionDecorator that synchronizes each invocation on a given object. */
static SynchronizedStreamTaskActionExecutor synchronizedExecutor(Object mutex) {
return new SynchronizedStreamTaskActionExecutor(mutex);
} | 3.68 |
hbase_HRegionServer_getMasterAddressTracker | /** Returns Master address tracker instance. */
public MasterAddressTracker getMasterAddressTracker() {
return this.masterAddressTracker;
} | 3.68 |
Activiti_ReflectUtil_getSetter | /**
* Returns the setter-method for the given field name or null if no setter exists.
*/
public static Method getSetter(String fieldName, Class<?> clazz, Class<?> fieldType) {
String setterName = "set" + Character.toTitleCase(fieldName.charAt(0)) + fieldName.substring(1, fieldName.length());
try {
// Using getMethods(), getMethod(...) expects exact parameter type
// matching and ignores inheritance-tree.
Method[] methods = clazz.getMethods();
for (Method method : methods) {
if (method.getName().equals(setterName)) {
Class<?>[] paramTypes = method.getParameterTypes();
if (paramTypes != null && paramTypes.length == 1 && paramTypes[0].isAssignableFrom(fieldType)) {
return method;
}
}
}
return null;
} catch (SecurityException e) {
throw new ActivitiException("Not allowed to access method " + setterName + " on class " + clazz.getCanonicalName());
}
} | 3.68 |
hbase_FamilyFilter_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() {
FilterProtos.FamilyFilter.Builder builder = FilterProtos.FamilyFilter.newBuilder();
builder.setCompareFilter(super.convert());
return builder.build().toByteArray();
} | 3.68 |
hbase_BloomFilterMetrics_getRequestsCount | /** Returns Current value for bloom requests count */
public long getRequestsCount() {
return requests.sum();
} | 3.68 |
hbase_ClientModeStrategy_aggregateRecordsAndAddDistinct | /**
* Aggregate the records and count the unique values for the given distinctField
* @param records records to be processed
* @param groupBy Field on which group by needs to be done
* @param distinctField Field whose unique values needs to be counted
* @param uniqueCountAssignedTo a target field to which the unique count is assigned to
* @return aggregated records
*/
List<Record> aggregateRecordsAndAddDistinct(List<Record> records, Field groupBy,
Field distinctField, Field uniqueCountAssignedTo) {
List<Record> result = new ArrayList<>();
records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).values().forEach(val -> {
Set<FieldValue> distinctValues = new HashSet<>();
Map<Field, FieldValue> map = new HashMap<>();
for (Record record : val) {
for (Map.Entry<Field, FieldValue> field : record.entrySet()) {
if (distinctField.equals(field.getKey())) {
// We will not be adding the field in the new record whose distinct count is required
distinctValues.add(record.get(distinctField));
} else {
if (field.getKey().getFieldValueType() == FieldValueType.STRING) {
map.put(field.getKey(), field.getValue());
} else {
if (map.get(field.getKey()) == null) {
map.put(field.getKey(), field.getValue());
} else {
map.put(field.getKey(), map.get(field.getKey()).plus(field.getValue()));
}
}
}
}
}
// Add unique count field
map.put(uniqueCountAssignedTo, uniqueCountAssignedTo.newValue(distinctValues.size()));
result.add(
Record.ofEntries(map.entrySet().stream().map(k -> Record.entry(k.getKey(), k.getValue()))));
});
return result;
} | 3.68 |
flink_JobGraph_getSavepointRestoreSettings | /**
* Returns the configured savepoint restore setting.
*
* @return The configured savepoint restore settings.
*/
public SavepointRestoreSettings getSavepointRestoreSettings() {
return savepointRestoreSettings;
} | 3.68 |
hadoop_PendingSet_getVersion | /** @return the version marker. */
public int getVersion() {
return version;
} | 3.68 |
framework_VAbstractPopupCalendar_setTextFieldTabIndex | /**
* Set correct tab index for disabled text field in IE as the value set in
* setTextFieldEnabled(...) gets overridden in
* TextualDateConnection.updateFromUIDL(...).
*
* @since 7.3.1
*/
public void setTextFieldTabIndex() {
if (BrowserInfo.get().isIE() && !textFieldEnabled) {
// index needs to be -2 because FocusWidget updates -1 to 0 onAttach
text.setTabIndex(-2);
}
} | 3.68 |
hudi_HoodieTableConfig_getIndexDefinitionPath | /**
* @returns the index definition path.
*/
public Option<String> getIndexDefinitionPath() {
return Option.ofNullable(getString(INDEX_DEFINITION_PATH));
} | 3.68 |
hadoop_TrustedChannelResolver_isTrusted | /**
* Identify boolean value indicating whether a channel is trusted or not.
* @param peerAddress address of the peer
* @return true if the channel is trusted and false otherwise.
*/
public boolean isTrusted(InetAddress peerAddress) {
return false;
} | 3.68 |
flink_TableResultImpl_build | /** Returns a {@link TableResult} instance. */
public TableResultInternal build() {
if (printStyle == null) {
printStyle = PrintStyle.rawContent(resultProvider.getRowDataStringConverter());
}
return new TableResultImpl(
jobClient, resolvedSchema, resultKind, resultProvider, printStyle);
} | 3.68 |
streampipes_AdapterDescription_getCorrespondingServiceGroup | /**
* @deprecated check if the service group can be removed as a single pipeline element
* can correspond to different service groups
*/
@Deprecated
public String getCorrespondingServiceGroup() {
return correspondingServiceGroup;
} | 3.68 |
dubbo_ServiceConfig_isOnlyInJvm | /**
* Determine if it is injvm
*
* @return
*/
private boolean isOnlyInJvm() {
return getProtocols().size() == 1
&& LOCAL_PROTOCOL.equalsIgnoreCase(getProtocols().get(0).getName());
} | 3.68 |
flink_ApplicationDispatcherBootstrap_unwrapJobResultException | /**
* If the given {@link JobResult} indicates success, this passes through the {@link JobResult}.
* Otherwise, this returns a future that is finished exceptionally (potentially with an
* exception from the {@link JobResult}).
*/
private CompletableFuture<JobResult> unwrapJobResultException(
final CompletableFuture<JobResult> jobResult) {
return jobResult.thenApply(
result -> {
if (result.isSuccess()) {
return result;
}
throw new CompletionException(
UnsuccessfulExecutionException.fromJobResult(
result, application.getUserCodeClassLoader()));
});
} | 3.68 |
hbase_MobUtils_hasMobReferenceTag | /**
* Whether the tag list has a mob reference tag.
* @param tags The tag list.
* @return True if the list has a mob reference tag, false if it doesn't.
*/
public static boolean hasMobReferenceTag(List<Tag> tags) {
if (!tags.isEmpty()) {
for (Tag tag : tags) {
if (tag.getType() == TagType.MOB_REFERENCE_TAG_TYPE) {
return true;
}
}
}
return false;
} | 3.68 |
hadoop_AbstractManifestData_validateCollectionClass | /**
* Verify that all instances in a collection are of the given class.
* @param it iterator
* @param classname classname to require
* @throws IOException on a failure
*/
void validateCollectionClass(Iterable it, Class classname)
throws IOException {
for (Object o : it) {
verify(o.getClass().equals(classname),
"Collection element is not a %s: %s", classname, o.getClass());
}
} | 3.68 |
flink_MultipleParameterTool_fromMultiMap | /**
* Returns {@link MultipleParameterTool} for the given multi map.
*
* @param multiMap A map of arguments. Key is String and value is a Collection.
* @return A {@link MultipleParameterTool}
*/
public static MultipleParameterTool fromMultiMap(Map<String, Collection<String>> multiMap) {
Preconditions.checkNotNull(multiMap, "Unable to initialize from empty map");
return new MultipleParameterTool(multiMap);
} | 3.68 |
hadoop_S3ClientFactory_withTransferManagerExecutor | /**
* Set the executor that the transfer manager will use to execute background tasks.
* @param value new value
* @return the builder
*/
public S3ClientCreationParameters withTransferManagerExecutor(
final Executor value) {
transferManagerExecutor = value;
return this;
} | 3.68 |
hadoop_StoreContextBuilder_setAuditor | /**
* Set builder value.
* @param value new value
* @return the builder
*/
public StoreContextBuilder setAuditor(
final AuditSpanSource<AuditSpanS3A> value) {
auditor = value;
return this;
} | 3.68 |
framework_WebBrowser_isIE | /**
* Tests whether the user is using Internet Explorer.
*
* @return true if the user is using Internet Explorer, false if the user is
* not using Internet Explorer or if no information on the browser
* is present
*/
public boolean isIE() {
if (browserDetails == null) {
return false;
}
return browserDetails.isIE();
} | 3.68 |
hbase_LockManager_acquire | /**
* Acquire the lock, waiting indefinitely until the lock is released or the thread is
* interrupted.
* @throws InterruptedException If current thread is interrupted while waiting for the lock
*/
public boolean acquire() throws InterruptedException {
return tryAcquire(0);
} | 3.68 |
flink_LocalBufferPool_getEstimatedNumberOfRequestedMemorySegments | /**
* Estimates the number of requested buffers.
*
* @return the same value as {@link #getMaxNumberOfMemorySegments()} for bounded pools. For
* unbounded pools it returns an approximation based upon {@link
* #getNumberOfRequiredMemorySegments()}
*/
public int getEstimatedNumberOfRequestedMemorySegments() {
if (maxNumberOfMemorySegments < NetworkBufferPool.UNBOUNDED_POOL_SIZE) {
return maxNumberOfMemorySegments;
} else {
return getNumberOfRequiredMemorySegments() * 2;
}
} | 3.68 |
hbase_ClassSize_estimateBaseFromCoefficients | /**
* Estimate the static space taken up by a class instance given the coefficients returned by
* getSizeCoefficients.
* @param coeff the coefficients
* @param debug debug flag
* @return the size estimate, in bytes
*/
private static long estimateBaseFromCoefficients(int[] coeff, boolean debug) {
long prealign_size = (long) OBJECT + coeff[0] + coeff[2] * REFERENCE;
// Round up to a multiple of 8
long size = align(prealign_size) + align(coeff[1] * ARRAY);
if (debug) {
if (LOG.isDebugEnabled()) {
LOG.debug("Primitives=" + coeff[0] + ", arrays=" + coeff[1] + ", references=" + coeff[2]
+ ", refSize " + REFERENCE + ", size=" + size + ", prealign_size=" + prealign_size);
}
}
return size;
} | 3.68 |
hbase_PrivateCellUtil_estimatedSerializedSizeOf | /**
* Estimate based on keyvalue's serialization format in the RPC layer. Note that there is an extra
* SIZEOF_INT added to the size here that indicates the actual length of the cell for cases where
* cell's are serialized in a contiguous format (For eg in RPCs).
* @return Estimate of the <code>cell</code> size in bytes plus an extra SIZEOF_INT indicating the
* actual cell length.
*/
public static int estimatedSerializedSizeOf(final Cell cell) {
return cell.getSerializedSize() + Bytes.SIZEOF_INT;
} | 3.68 |
hbase_AsyncAdmin_mergeSwitch | /**
* Turn the Merge switch on or off.
* @param enabled enabled or not
* @return Previous switch value wrapped by a {@link CompletableFuture}
*/
default CompletableFuture<Boolean> mergeSwitch(boolean enabled) {
return mergeSwitch(enabled, false);
} | 3.68 |
pulsar_ReaderConfiguration_getCryptoKeyReader | /**
* @return the CryptoKeyReader
*/
public CryptoKeyReader getCryptoKeyReader() {
return conf.getCryptoKeyReader();
} | 3.68 |
framework_TouchScrollDelegate_detectScrolledElement | /**
* Detects if a touch happens on a predefined element and the element has
* something to scroll.
*
* @param touch
* @return
*/
private boolean detectScrolledElement(Touch touch) {
Element target = touch.getTarget().cast();
for (Element el : scrollableElements) {
if (el.isOrHasChild(target)
&& el.getScrollHeight() > el.getClientHeight()) {
scrolledElement = el;
layers = getElements(scrolledElement);
return true;
}
}
return false;
} | 3.68 |
hadoop_FutureIOSupport_eval | /**
* Evaluate a CallableRaisingIOE in the current thread,
* converting IOEs to RTEs and propagating.
* See {@link FutureIO#eval(CallableRaisingIOE)}.
*
* @param callable callable to invoke
* @param <T> Return type.
* @return the evaluated result.
* @throws UnsupportedOperationException fail fast if unsupported
* @throws IllegalArgumentException invalid argument
*/
public static <T> CompletableFuture<T> eval(
CallableRaisingIOE<T> callable) {
return FutureIO.eval(callable);
} | 3.68 |
hbase_MetaFixer_fixHoles | /**
* If hole, it papers it over by adding a region in the filesystem and to hbase:meta. Does not
* assign.
*/
void fixHoles(CatalogJanitorReport report) {
final List<Pair<RegionInfo, RegionInfo>> holes = report.getHoles();
if (holes.isEmpty()) {
LOG.info("CatalogJanitor Report contains no holes to fix. Skipping.");
return;
}
LOG.info("Identified {} region holes to fix. Detailed fixup progress logged at DEBUG.",
holes.size());
final List<RegionInfo> newRegionInfos = createRegionInfosForHoles(holes);
final List<RegionInfo> newMetaEntries = createMetaEntries(masterServices, newRegionInfos);
final TransitRegionStateProcedure[] assignProcedures =
masterServices.getAssignmentManager().createRoundRobinAssignProcedures(newMetaEntries);
masterServices.getMasterProcedureExecutor().submitProcedures(assignProcedures);
LOG.info("Scheduled {}/{} new regions for assignment.", assignProcedures.length, holes.size());
} | 3.68 |
hudi_KafkaConnectUtils_getDefaultHadoopConf | /**
* Returns the default Hadoop Configuration.
*
* @return
*/
public static Configuration getDefaultHadoopConf(KafkaConnectConfigs connectConfigs) {
Configuration hadoopConf = new Configuration();
// add hadoop config files
if (!StringUtils.isNullOrEmpty(connectConfigs.getHadoopConfDir())
|| !StringUtils.isNullOrEmpty(connectConfigs.getHadoopConfHome())) {
try {
List<Path> configFiles = getHadoopConfigFiles(connectConfigs.getHadoopConfDir(),
connectConfigs.getHadoopConfHome());
configFiles.forEach(f ->
hadoopConf.addResource(new org.apache.hadoop.fs.Path(f.toAbsolutePath().toUri())));
} catch (Exception e) {
throw new HoodieException("Failed to read hadoop configuration!", e);
}
} else {
DEFAULT_HADOOP_CONF_FILES.forEach(f ->
hadoopConf.addResource(new org.apache.hadoop.fs.Path(f.toAbsolutePath().toUri())));
}
connectConfigs.getProps().keySet().stream().filter(prop -> {
// In order to prevent printing unnecessary warn logs, here filter out the hoodie
// configuration items before passing to hadoop/hive configs
return !prop.toString().startsWith(HOODIE_CONF_PREFIX);
}).forEach(prop -> {
hadoopConf.set(prop.toString(), connectConfigs.getProps().get(prop.toString()).toString());
});
return hadoopConf;
} | 3.68 |
hadoop_ServiceLauncher_verifyConfigurationFilesExist | /**
* Verify that all the specified filenames exist.
* @param filenames a list of files
* @throws ServiceLaunchException if a file is not found
*/
protected void verifyConfigurationFilesExist(String[] filenames) {
if (filenames == null) {
return;
}
for (String filename : filenames) {
File file = new File(filename);
LOG.debug("Conf file {}", file.getAbsolutePath());
if (!file.exists()) {
// no configuration file
throw new ServiceLaunchException(EXIT_NOT_FOUND,
ARG_CONF_PREFIXED + ": configuration file not found: %s",
file.getAbsolutePath());
}
}
} | 3.68 |
flink_DecimalData_isCompact | /** Returns whether the decimal value is small enough to be stored in a long. */
public static boolean isCompact(int precision) {
return precision <= MAX_COMPACT_PRECISION;
} | 3.68 |
hudi_ArrayColumnReader_setChildrenInfo | /**
* The lengths & offsets will be initialized as default size (1024), it should be set to the
* actual size according to the element number.
*/
private void setChildrenInfo(HeapArrayVector lcv, int itemNum, int elementNum) {
lcv.setSize(itemNum);
long[] lcvLength = new long[elementNum];
long[] lcvOffset = new long[elementNum];
System.arraycopy(lcv.lengths, 0, lcvLength, 0, elementNum);
System.arraycopy(lcv.offsets, 0, lcvOffset, 0, elementNum);
lcv.lengths = lcvLength;
lcv.offsets = lcvOffset;
} | 3.68 |
flink_FunctionIdentifier_asSummaryString | /** Returns a string that summarizes this instance for printing to a console or log. */
public String asSummaryString() {
if (objectIdentifier != null) {
return String.join(
".",
objectIdentifier.getCatalogName(),
objectIdentifier.getDatabaseName(),
objectIdentifier.getObjectName());
} else {
return functionName;
}
} | 3.68 |
hbase_AbstractFSWAL_atHeadOfRingBufferEventHandlerAppend | /**
* Exposed for testing only. Use to tricks like halt the ring buffer appending.
*/
protected void atHeadOfRingBufferEventHandlerAppend() {
// Noop
} | 3.68 |
hbase_RawCell_cloneTags | /**
* Allows cloning the tags in the cell to a new byte[]
* @return the byte[] having the tags
*/
default byte[] cloneTags() {
return PrivateCellUtil.cloneTags(this);
} | 3.68 |
pulsar_CustomCommandFactoryProvider_createCustomCommandFactories | /**
* create a Command Factory.
*/
public static List<CustomCommandFactory> createCustomCommandFactories(
Properties conf) throws IOException {
String names = conf.getProperty("customCommandFactories", "");
List<CustomCommandFactory> result = new ArrayList<>();
if (names.isEmpty()) {
// early exit
return result;
}
String directory = conf.getProperty("cliExtensionsDirectory", "cliextensions");
String narExtractionDirectory = NarClassLoader.DEFAULT_NAR_EXTRACTION_DIR;
CustomCommandFactoryDefinitions definitions = searchForCustomCommandFactories(directory,
narExtractionDirectory);
for (String name : names.split(",")) {
CustomCommandFactoryMetaData metaData = definitions.getFactories().get(name);
if (null == metaData) {
throw new RuntimeException("No factory is found for name `" + name
+ "`. Available names are : " + definitions.getFactories());
}
CustomCommandFactory factory = load(metaData, narExtractionDirectory);
if (factory != null) {
result.add(factory);
}
log.debug("Successfully loaded command factory for name `{}`", name);
}
return result;
} | 3.68 |
flink_DynamicPartitionPruningUtils_isNewSource | /** Returns true if the source is FLIP-27 source, else false. */
private static boolean isNewSource(ScanTableSource scanTableSource) {
ScanTableSource.ScanRuntimeProvider provider =
scanTableSource.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
if (provider instanceof SourceProvider) {
return true;
} else if (provider instanceof TransformationScanProvider) {
Transformation<?> transformation =
((TransformationScanProvider) provider)
.createTransformation(name -> Optional.empty());
return transformation instanceof SourceTransformation;
} else if (provider instanceof DataStreamScanProvider) {
// Suppose DataStreamScanProvider of sources that support dynamic filtering will use
// new Source. It's not reliable and should be checked.
// TODO FLINK-28864 check if the source used by the DataStreamScanProvider is
// actually a new source. This situation will not generate wrong result because it's
// handled when translating BatchTableSourceScan. The only effect is the physical
// plan and the exec node plan have DPP nodes, but they do not work in runtime.
return true;
}
// TODO supports more
return false;
} | 3.68 |
framework_BeanItem_getBean | /**
* Gets the underlying JavaBean object.
*
* @return the bean object.
*/
public BT getBean() {
return bean;
} | 3.68 |
hadoop_AddMountAttributes_getMountTableForAddRequest | /**
* Create a new mount table object from the given mount point and update its attributes.
*
* @param mountSrc mount point src.
* @return MountTable object with updated attributes.
* @throws IOException If mount table instantiation fails.
*/
private MountTable getMountTableForAddRequest(String mountSrc) throws IOException {
Map<String, String> destMap = new LinkedHashMap<>();
for (String ns : this.getNss()) {
destMap.put(ns, this.getDest());
}
MountTable newEntry = MountTable.newInstance(mountSrc, destMap);
updateCommonAttributes(newEntry);
return newEntry;
} | 3.68 |
framework_VNotification_showError | /**
* Shows an error notification and redirects the user to the given URL when
* she clicks on the notification.
*
* If both message and caption are null, redirects the user to the url
* immediately
*
* @since 7.5.1
* @param connection
* A reference to the ApplicationConnection
* @param caption
* The caption for the error or null to exclude the caption
* @param message
* The message for the error or null to exclude the message
* @param details
* A details message or null to exclude the details
* @param url
* A url to redirect to after the user clicks the error
* notification
*/
public static void showError(ApplicationConnection connection,
String caption, String message, String details, String url) {
StringBuilder html = new StringBuilder();
if (caption != null) {
html.append("<h1 class='");
html.append(getDependentStyle(connection, CAPTION));
html.append("'>");
html.append(caption);
html.append("</h1>");
}
if (message != null) {
html.append("<p class='");
html.append(getDependentStyle(connection, DESCRIPTION));
html.append("'>");
html.append(message);
html.append("</p>");
}
if (html.length() != 0) {
// Add error description
if (details != null) {
html.append("<p class='");
html.append(getDependentStyle(connection, DETAILS));
html.append("'>");
html.append("<i style=\"font-size:0.7em\">");
html.append(details);
html.append("</i></p>");
}
VNotification n = VNotification.createNotification(1000 * 60 * 45,
connection.getUIConnector().getWidget());
n.addEventListener(new NotificationRedirect(url));
n.show(html.toString(), VNotification.CENTERED_TOP,
VNotification.STYLE_SYSTEM);
} else {
WidgetUtil.redirect(url);
}
} | 3.68 |
hadoop_AccessTokenTimer_setExpiresInMSSinceEpoch | /**
* Set when the access token will expire in milliseconds from epoch,
* as required by the WebHDFS configuration. This is a bit hacky and lame.
*
* @param expiresInMSSinceEpoch Access time expiration in ms since epoch.
*/
public void setExpiresInMSSinceEpoch(String expiresInMSSinceEpoch){
this.nextRefreshMSSinceEpoch = Long.parseLong(expiresInMSSinceEpoch);
} | 3.68 |
open-banking-gateway_EncryptionWithInitVectorOper_decryption | /**
* Decryption cipher
* @param keyWithIv Symmetric key and initialization vector
* @return Symmetric decryption cipher
*/
@SneakyThrows
public Cipher decryption(SecretKeyWithIv keyWithIv) {
Cipher cipher = Cipher.getInstance(encSpec.getCipherAlgo());
cipher.init(
Cipher.DECRYPT_MODE, keyWithIv.getSecretKey(),
new IvParameterSpec(keyWithIv.getIv())
);
return cipher;
} | 3.68 |
hudi_StreamerUtil_isValidFile | /**
* Returns whether the give file is in valid hoodie format.
* For example, filtering out the empty or corrupt files.
*/
public static boolean isValidFile(FileStatus fileStatus) {
final String extension = FSUtils.getFileExtension(fileStatus.getPath().toString());
if (PARQUET.getFileExtension().equals(extension)) {
return fileStatus.getLen() > ParquetFileWriter.MAGIC.length;
}
if (ORC.getFileExtension().equals(extension)) {
return fileStatus.getLen() > OrcFile.MAGIC.length();
}
if (HOODIE_LOG.getFileExtension().equals(extension)) {
return fileStatus.getLen() > HoodieLogFormat.MAGIC.length;
}
return fileStatus.getLen() > 0;
} | 3.68 |
flink_TaskStateSnapshot_putSubtaskStateByOperatorID | /**
* Maps the given operator id to the given subtask state. Returns the subtask state of a
* previous mapping, if such a mapping existed or null otherwise.
*/
public OperatorSubtaskState putSubtaskStateByOperatorID(
@Nonnull OperatorID operatorID, @Nonnull OperatorSubtaskState state) {
return subtaskStatesByOperatorID.put(operatorID, Preconditions.checkNotNull(state));
} | 3.68 |
framework_GridSingleSelect_setUserSelectionAllowed | /**
* Sets whether the user is allowed to change the selection.
* <p>
* The check is done only for the client side actions. It doesn't affect
* selection requests sent from the server side.
*
* @param allowed
* <code>true</code> if the user is allowed to change the
* selection, <code>false</code> otherwise
*/
public void setUserSelectionAllowed(boolean allowed) {
model.setUserSelectionAllowed(allowed);
} | 3.68 |
framework_ComponentDetail_setTooltipInfo | /**
* @param tooltipInfo
* the tooltipInfo to set
*/
public void setTooltipInfo(TooltipInfo tooltipInfo) {
this.tooltipInfo = tooltipInfo;
} | 3.68 |
pulsar_TopicsBase_publishMessagesToPartition | // Publish message to single partition of a partitioned topic.
protected void publishMessagesToPartition(AsyncResponse asyncResponse, ProducerMessages request,
boolean authoritative, int partition) {
if (topicName.isPartitioned()) {
asyncResponse.resume(new RestException(Status.BAD_REQUEST, "Topic name can't contain "
+ "'-partition-' suffix."));
}
String topic = topicName.getPartitionedTopicName();
try {
// If broker owns the partition then proceed to publish message, else do look up.
if ((pulsar().getBrokerService().getOwningTopics().containsKey(topic)
&& pulsar().getBrokerService().getOwningTopics().get(topic)
.contains(partition))
|| !findOwnerBrokerForTopic(authoritative, asyncResponse)) {
addOrGetSchemaForTopic(getSchemaData(request.getKeySchema(), request.getValueSchema()),
request.getSchemaVersion() == -1 ? null : new LongSchemaVersion(request.getSchemaVersion()))
.thenAccept(schemaMeta -> {
// Both schema version and schema data are necessary.
if (schemaMeta.getLeft() != null && schemaMeta.getRight() != null) {
internalPublishMessagesToPartition(topicName, request, partition, asyncResponse,
AutoConsumeSchema.getSchema(schemaMeta.getLeft().toSchemaInfo()),
schemaMeta.getRight());
} else {
asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR,
"Fail to add or retrieve schema."));
}
}).exceptionally(e -> {
if (log.isDebugEnabled()) {
log.debug("Fail to publish message to single partition: " + e.getLocalizedMessage());
}
asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR, "Fail to publish message"
+ "to single partition: "
+ e.getMessage()));
return null;
});
}
} catch (Exception e) {
asyncResponse.resume(new RestException(Status.INTERNAL_SERVER_ERROR, "Fail to publish message: "
+ e.getMessage()));
}
} | 3.68 |
streampipes_JdbcClient_closeAll | /**
* Closes all open connections and statements of JDBC
*/
protected void closeAll() {
boolean error = false;
try {
if (this.statementHandler.statement != null) {
this.statementHandler.statement.close();
this.statementHandler.statement = null;
}
} catch (SQLException e) {
error = true;
LOG.warn("Exception when closing the statement: " + e.getMessage());
}
try {
if (connection != null) {
connection.close();
connection = null;
}
} catch (SQLException e) {
error = true;
LOG.warn("Exception when closing the connection: " + e.getMessage());
}
try {
if (this.statementHandler.preparedStatement != null) {
this.statementHandler.preparedStatement.close();
this.statementHandler.preparedStatement = null;
}
} catch (SQLException e) {
error = true;
LOG.warn("Exception when closing the prepared statement: " + e.getMessage());
}
if (!error) {
LOG.info("Shutdown all connections successfully.");
}
} | 3.68 |
flink_StreamPhysicalPythonCorrelateRule_findTableFunction | // find only calc and table function
private boolean findTableFunction(FlinkLogicalCalc calc) {
RelNode child = ((RelSubset) calc.getInput()).getOriginal();
if (child instanceof FlinkLogicalTableFunctionScan) {
FlinkLogicalTableFunctionScan scan = (FlinkLogicalTableFunctionScan) child;
return PythonUtil.isPythonCall(scan.getCall(), null);
} else if (child instanceof FlinkLogicalCalc) {
FlinkLogicalCalc childCalc = (FlinkLogicalCalc) child;
return findTableFunction(childCalc);
}
return false;
} | 3.68 |
framework_ServerRpcQueue_showLoadingIndicator | /**
* Checks if a loading indicator should be shown when the RPCs have been
* sent to the server and we are waiting for a response.
*
* @return true if a loading indicator should be shown, false otherwise
*/
public boolean showLoadingIndicator() {
for (MethodInvocation invocation : getAll()) {
if (isLegacyVariableChange(invocation)
|| isJavascriptRpc(invocation)) {
// Always show loading indicator for legacy requests
return true;
} else {
Type type = new Type(invocation.getInterfaceName(), null);
Method method = type.getMethod(invocation.getMethodName());
if (!TypeDataStore.isNoLoadingIndicator(method)) {
return true;
}
}
}
return false;
} | 3.68 |
hbase_DigestSaslServerAuthenticationProvider_handle | /** {@inheritDoc} */
@Override
public void handle(Callback[] callbacks) throws InvalidToken, UnsupportedCallbackException {
NameCallback nc = null;
PasswordCallback pc = null;
AuthorizeCallback ac = null;
for (Callback callback : callbacks) {
if (callback instanceof AuthorizeCallback) {
ac = (AuthorizeCallback) callback;
} else if (callback instanceof NameCallback) {
nc = (NameCallback) callback;
} else if (callback instanceof PasswordCallback) {
pc = (PasswordCallback) callback;
} else if (callback instanceof RealmCallback) {
continue; // realm is ignored
} else {
throw new UnsupportedCallbackException(callback, "Unrecognized SASL DIGEST-MD5 Callback");
}
}
if (pc != null) {
TokenIdentifier tokenIdentifier =
HBaseSaslRpcServer.getIdentifier(nc.getDefaultName(), secretManager);
attemptingUser.set(tokenIdentifier.getUser());
char[] password = getPassword(tokenIdentifier);
if (LOG.isTraceEnabled()) {
LOG.trace("SASL server DIGEST-MD5 callback: setting password for client: {}",
tokenIdentifier.getUser());
}
pc.setPassword(password);
}
if (ac != null) {
// The authentication ID is the identifier (username) of the user who authenticated via
// SASL (the one who provided credentials). The authorization ID is who the remote user
// "asked" to be once they authenticated. This is akin to the UGI/JAAS "doAs" notion, e.g.
// authentication ID is the "real" user and authorization ID is the "proxy" user.
//
// For DelegationTokens: we do not expect any remote user with a delegation token to execute
// any RPCs as a user other than themselves. We disallow all cases where the real user
// does not match who the remote user wants to execute a request as someone else.
String authenticatedUserId = ac.getAuthenticationID();
String userRequestedToExecuteAs = ac.getAuthorizationID();
if (authenticatedUserId.equals(userRequestedToExecuteAs)) {
ac.setAuthorized(true);
if (LOG.isTraceEnabled()) {
String username = HBaseSaslRpcServer
.getIdentifier(userRequestedToExecuteAs, secretManager).getUser().getUserName();
LOG.trace(
"SASL server DIGEST-MD5 callback: setting " + "canonicalized client ID: " + username);
}
ac.setAuthorizedID(userRequestedToExecuteAs);
} else {
ac.setAuthorized(false);
}
}
} | 3.68 |
hbase_UnsafeAccess_putLong | /**
* Put a long value out to the specified BB position in big-endian format.
* @param buf the byte buffer
* @param offset position in the buffer
* @param val long to write out
* @return incremented offset
*/
public static int putLong(ByteBuffer buf, int offset, long val) {
if (LITTLE_ENDIAN) {
val = Long.reverseBytes(val);
}
if (buf.isDirect()) {
HBasePlatformDependent.putLong(directBufferAddress(buf) + offset, val);
} else {
HBasePlatformDependent.putLong(buf.array(),
BYTE_ARRAY_BASE_OFFSET + buf.arrayOffset() + offset, val);
}
return offset + Bytes.SIZEOF_LONG;
} | 3.68 |
hadoop_TimelineReaderClient_createTimelineReaderClient | /**
* Create a new instance of Timeline Reader Client.
*
* @return instance of Timeline Reader Client.
*/
@InterfaceAudience.Public
public static TimelineReaderClient createTimelineReaderClient() {
return new TimelineReaderClientImpl();
} | 3.68 |
flink_Channel_setReplicationFactor | /**
* Sets the replication factor of the connection.
*
* @param factor The replication factor of the connection.
*/
public void setReplicationFactor(int factor) {
this.replicationFactor = factor;
} | 3.68 |
flink_SocketStreamIterator_getPort | /**
* Returns the port on which the iterator is getting the data. (Used internally.)
*
* @return The port
*/
public int getPort() {
return socket.getLocalPort();
} | 3.68 |
querydsl_JTSGeometryExpressions_extent | /**
* Returns the bounding box that bounds rows of geometries.
*
* @param collection geometry collection
* @return geometry collection
*/
public static JTSGeometryExpression<?> extent(Expression<? extends GeometryCollection> collection) {
return geometryOperation(SpatialOps.EXTENT, collection);
} | 3.68 |
morf_SqlDialect_getSchemaName | /**
* Returns the database schema name. May be null.
* @return The schema name
*/
public String getSchemaName() {
return schemaName;
} | 3.68 |
framework_FreeformQuery_containsRowWithKey | /**
* This implementation of the containsRowWithKey method rewrites existing
* WHERE clauses in the query string. The logic is, however, not very
* complex and some times can do the Wrong Thing<sup>TM</sup>. For the
* situations where this logic is not enough, you can implement the
* getContainsRowQueryString method in FreeformQueryDelegate and this will
* be used instead of the logic.
*
* @see FreeformQueryDelegate#getContainsRowQueryString(Object...)
*
*/
@Override
@SuppressWarnings("deprecation")
public boolean containsRowWithKey(Object... keys) throws SQLException {
String query = null;
boolean contains = false;
if (delegate != null) {
if (delegate instanceof FreeformStatementDelegate) {
try {
StatementHelper sh = ((FreeformStatementDelegate) delegate)
.getContainsRowQueryStatement(keys);
PreparedStatement pstmt = null;
ResultSet rs = null;
Connection c = getConnection();
try {
pstmt = c.prepareStatement(sh.getQueryString());
sh.setParameterValuesToStatement(pstmt);
rs = pstmt.executeQuery();
contains = rs.next();
return contains;
} finally {
releaseConnection(c, pstmt, rs);
}
} catch (UnsupportedOperationException e) {
// Statement generation not supported, continue...
}
}
try {
query = delegate.getContainsRowQueryString(keys);
} catch (UnsupportedOperationException e) {
query = modifyWhereClause(keys);
}
} else {
query = modifyWhereClause(keys);
}
Statement statement = null;
ResultSet rs = null;
Connection conn = getConnection();
try {
statement = conn.createStatement();
rs = statement.executeQuery(query);
contains = rs.next();
} finally {
releaseConnection(conn, statement, rs);
}
return contains;
} | 3.68 |
flink_MemorySegment_getInt | /**
* Reads an int value (32bit, 4 bytes) from the given position, in the system's native byte
* order. This method offers the best speed for integer reading and should be used unless a
* specific byte order is required. In most cases, it suffices to know that the byte order in
* which the value is written is the same as the one in which it is read (such as transient
* storage in memory, or serialization for I/O and network), making this method the preferable
* choice.
*
* @param index The position from which the value will be read.
* @return The int value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 4.
*/
public int getInt(int index) {
final long pos = address + index;
if (index >= 0 && pos <= addressLimit - 4) {
return UNSAFE.getInt(heapMemory, pos);
} else if (address > addressLimit) {
throw new IllegalStateException("segment has been freed");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
} | 3.68 |
Activiti_TreeBuilderException_getExpression | /**
* @return the expression string
*/
public String getExpression() {
return expression;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.