name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_GridLayout_overlaps | /**
* Tests if this Area overlaps with another Area.
*
* @param other
* the other Area that is to be tested for overlap with this
* area
* @return <code>true</code> if <code>other</code> area overlaps with
* this on, <code>false</code> if it does not.
*/
public boolean overlaps(Area other) {
return componentsOverlap(childData, other.childData);
} | 3.68 |
flink_HeapPriorityQueue_clear | /** Clears the queue. */
public void clear() {
final int arrayOffset = getHeadElementIndex();
Arrays.fill(queue, arrayOffset, arrayOffset + size, null);
size = 0;
} | 3.68 |
flink_TieredStorageNettyServiceImpl_trySetConsumer | /**
* Try to set data consumer.
*
* @return a future that provides the netty connection reader upon its created, or empty if
* the registration already has a consumer.
*/
public Optional<CompletableFuture<NettyConnectionReader>> trySetConsumer() {
if (!isReaderSet()) {
this.readerFuture = new CompletableFuture<>();
return Optional.of(readerFuture);
}
tryCreateNettyConnectionReader();
return Optional.empty();
} | 3.68 |
pulsar_ResourceGroupService_getRGUsage | // Visibility for testing.
protected BytesAndMessagesCount getRGUsage(String rgName, ResourceGroupMonitoringClass monClass,
ResourceGroupUsageStatsType statsType) throws PulsarAdminException {
final ResourceGroup rg = this.getResourceGroupInternal(rgName);
if (rg != null) {
switch (statsType) {
default:
String errStr = "Unsupported statsType: " + statsType;
throw new PulsarAdminException(errStr);
case Cumulative:
return rg.getLocalUsageStatsCumulative(monClass);
case LocalSinceLastReported:
return rg.getLocalUsageStats(monClass);
case ReportFromTransportMgr:
return rg.getLocalUsageStatsFromBrokerReports(monClass);
}
}
BytesAndMessagesCount retCount = new BytesAndMessagesCount();
retCount.bytes = -1;
retCount.messages = -1;
return retCount;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWhereExists | /**
* Tests a select an exists check.
*/
@Test
public void testSelectWhereExists() {
SelectStatement existsStatement = new SelectStatement().from(new TableReference(TEST_TABLE))
.where(isNotNull(new FieldReference(INT_FIELD)));
SelectStatement stmt = new SelectStatement().from(new TableReference(ALTERNATE_TABLE))
.where(exists(existsStatement));
String expectedSql = "SELECT * FROM " + tableName(ALTERNATE_TABLE) + " WHERE (EXISTS (SELECT * FROM " + tableName(TEST_TABLE) + " WHERE (intField IS NOT NULL)))";
assertEquals("Select with exists check", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
flink_InPlaceMutableHashTable_emitAndReset | /**
* Emits all elements currently held by the table to the collector, and resets the table.
* The table will have the same number of buckets as before the reset, to avoid doing
* resizes again.
*/
public void emitAndReset() throws IOException {
final int oldNumBucketSegments = bucketSegments.length;
emit();
close();
open(oldNumBucketSegments);
} | 3.68 |
framework_Slider_setMax | /**
* Sets the maximum slider value. If the current value of the slider is
* larger than this, the value is set to the new maximum.
*
* @param max
* The new maximum slider value
*/
public void setMax(double max) {
double roundedMax = getRoundedValue(max);
getState().maxValue = roundedMax;
if (getMin() > roundedMax) {
getState().minValue = roundedMax;
}
if (getValue() > roundedMax) {
setValue(roundedMax);
}
} | 3.68 |
flink_ExecutionEnvironment_fromCollection | /**
* Creates a DataSet from the given iterator. Because the iterator will remain unmodified until
* the actual execution happens, the type of data returned by the iterator must be given
* explicitly in the form of the type information. This method is useful for cases where the
* type is generic. In that case, the type class (as given in {@link #fromCollection(Iterator,
* Class)} does not supply all type information.
*
* <p>Note that this operation will result in a non-parallel data source, i.e. a data source
* with a parallelism of one.
*
* @param data The collection of elements to create the data set from.
* @param type The TypeInformation for the produced data set.
* @return A DataSet representing the elements in the iterator.
* @see #fromCollection(Iterator, Class)
*/
public <X> DataSource<X> fromCollection(Iterator<X> data, TypeInformation<X> type) {
return new DataSource<>(
this, new IteratorInputFormat<>(data), type, Utils.getCallLocationName());
} | 3.68 |
hadoop_ServletUtils_getParameter | /**
* Extract a query string parameter without triggering http parameters
* processing by the servlet container.
*
* @param request the request
* @param name the parameter to get the value.
* @return the parameter value, or <code>NULL</code> if the parameter is not
* defined.
* @throws IOException thrown if there was an error parsing the query string.
*/
public static String getParameter(HttpServletRequest request, String name)
throws IOException {
String queryString = request.getQueryString();
if (queryString == null) {
return null;
}
List<NameValuePair> list = URLEncodedUtils.parse(queryString, StandardCharsets.UTF_8);
if (list != null) {
for (NameValuePair nv : list) {
if (name.equals(nv.getName())) {
return nv.getValue();
}
}
}
return null;
} | 3.68 |
hudi_WriteProfile_cleanMetadataCache | /**
* Remove the overdue metadata from the cache
* whose instant does not belong to the given instants {@code instants}.
*/
private void cleanMetadataCache(Stream<HoodieInstant> instants) {
Set<String> timestampSet = instants.map(HoodieInstant::getTimestamp).collect(Collectors.toSet());
this.metadataCache.keySet().retainAll(timestampSet);
} | 3.68 |
flink_SplitDataProperties_splitsGroupedBy | /**
* Defines that the data within an input split is grouped on the fields defined by the field
* expressions. Multiple field expressions must be separated by the semicolon ';' character. All
* records sharing the same key (combination) must be subsequently emitted by the input format
* for each input split.
*
* <p><b> IMPORTANT: Providing wrong information with SplitDataProperties can cause wrong
* results! </b>
*
* @param groupFields The field expressions of the grouping keys.
* @return This SplitDataProperties object.
*/
public SplitDataProperties<T> splitsGroupedBy(String groupFields) {
if (groupFields == null) {
throw new InvalidProgramException("GroupFields may not be null.");
}
String[] groupKeysA = groupFields.split(";");
if (groupKeysA.length == 0) {
throw new InvalidProgramException("GroupFields may not be empty.");
}
if (this.splitOrdering != null) {
throw new InvalidProgramException("DataSource may either be grouped or sorted.");
}
this.splitGroupKeys = getAllFlatKeys(groupKeysA);
return this;
} | 3.68 |
hadoop_ListResultEntrySchema_withPermissions | /**
* Set the permissions value.
*
* @param permissions the permissions value to set
* @return the ListEntrySchema object itself.
*/
public ListResultEntrySchema withPermissions(final String permissions) {
this.permissions = permissions;
return this;
} | 3.68 |
flink_CheckpointConfig_getCheckpointInterval | /**
* Gets the interval in which checkpoints are periodically scheduled.
*
* <p>This setting defines the base interval. Checkpoint triggering may be delayed by the
* settings {@link #getMaxConcurrentCheckpoints()} and {@link #getMinPauseBetweenCheckpoints()}.
*
* @return The checkpoint interval, in milliseconds.
*/
public long getCheckpointInterval() {
return configuration
.getOptional(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL)
.map(Duration::toMillis)
.orElse(-1L);
} | 3.68 |
hbase_TableSnapshotInputFormatImpl_setInput | /**
* Configures the job to use TableSnapshotInputFormat to read from a snapshot.
* @param conf the job to configure
* @param snapshotName the name of the snapshot to read from
* @param restoreDir a temporary directory to restore the snapshot into. Current user
* should have write permissions to this directory, and this should not
* be a subdirectory of rootdir. After the job is finished, restoreDir
* can be deleted.
* @param numSplitsPerRegion how many input splits to generate per one region
* @param splitAlgo SplitAlgorithm to be used when generating InputSplits
* @throws IOException if an error occurs
*/
public static void setInput(Configuration conf, String snapshotName, Path restoreDir,
RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException {
conf.set(SNAPSHOT_NAME_KEY, snapshotName);
if (numSplitsPerRegion < 1) {
throw new IllegalArgumentException(
"numSplits must be >= 1, " + "illegal numSplits : " + numSplitsPerRegion);
}
if (splitAlgo == null && numSplitsPerRegion > 1) {
throw new IllegalArgumentException("Split algo can't be null when numSplits > 1");
}
if (splitAlgo != null) {
conf.set(SPLIT_ALGO, splitAlgo.getClass().getName());
}
conf.setInt(NUM_SPLITS_PER_REGION, numSplitsPerRegion);
Path rootDir = CommonFSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
restoreDir = new Path(restoreDir, UUID.randomUUID().toString());
RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);
conf.set(RESTORE_DIR_KEY, restoreDir.toString());
} | 3.68 |
flink_WindowedStream_sideOutputLateData | /**
* Send late arriving data to the side output identified by the given {@link OutputTag}. Data is
* considered late after the watermark has passed the end of the window plus the allowed
* lateness set using {@link #allowedLateness(Time)}.
*
* <p>You can get the stream of late data using {@link
* SingleOutputStreamOperator#getSideOutput(OutputTag)} on the {@link
* SingleOutputStreamOperator} resulting from the windowed operation with the same {@link
* OutputTag}.
*/
@PublicEvolving
public WindowedStream<T, K, W> sideOutputLateData(OutputTag<T> outputTag) {
outputTag = input.getExecutionEnvironment().clean(outputTag);
builder.sideOutputLateData(outputTag);
return this;
} | 3.68 |
hudi_SchemaEvolutionContext_doEvolutionForRealtimeInputFormat | /**
* Do schema evolution for RealtimeInputFormat.
*
* @param realtimeRecordReader recordReader for RealtimeInputFormat.
* @return
*/
public void doEvolutionForRealtimeInputFormat(AbstractRealtimeRecordReader realtimeRecordReader) throws Exception {
if (!(split instanceof RealtimeSplit)) {
LOG.warn(String.format("expect realtime split for mor table, but find other type split %s", split));
return;
}
if (internalSchemaOption.isPresent()) {
Schema tableAvroSchema = new TableSchemaResolver(metaClient).getTableAvroSchema();
List<String> requiredColumns = getRequireColumn(job);
InternalSchema prunedInternalSchema = InternalSchemaUtils.pruneInternalSchema(internalSchemaOption.get(),
requiredColumns);
// Add partitioning fields to writer schema for resulting row to contain null values for these fields
String partitionFields = job.get(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS, "");
List<String> partitioningFields = partitionFields.length() > 0 ? Arrays.stream(partitionFields.split("/")).collect(Collectors.toList())
: new ArrayList<>();
Schema writerSchema = AvroInternalSchemaConverter.convert(internalSchemaOption.get(), tableAvroSchema.getName());
writerSchema = HoodieRealtimeRecordReaderUtils.addPartitionFields(writerSchema, partitioningFields);
Map<String, Schema.Field> schemaFieldsMap = HoodieRealtimeRecordReaderUtils.getNameToFieldMap(writerSchema);
// we should get HoodieParquetInputFormat#HIVE_TMP_COLUMNS,since serdeConstants#LIST_COLUMNS maybe change by HoodieParquetInputFormat#setColumnNameList
Schema hiveSchema = realtimeRecordReader.constructHiveOrderedSchema(writerSchema, schemaFieldsMap, job.get(HIVE_TMP_COLUMNS));
Schema readerSchema = AvroInternalSchemaConverter.convert(prunedInternalSchema, tableAvroSchema.getName());
// setUp evolution schema
realtimeRecordReader.setWriterSchema(writerSchema);
realtimeRecordReader.setReaderSchema(readerSchema);
realtimeRecordReader.setHiveSchema(hiveSchema);
internalSchemaOption = Option.of(prunedInternalSchema);
RealtimeSplit realtimeSplit = (RealtimeSplit) split;
LOG.info(String.format("About to read compacted logs %s for base split %s, projecting cols %s",
realtimeSplit.getDeltaLogPaths(), realtimeSplit.getPath(), requiredColumns));
}
} | 3.68 |
flink_StatsSummary_getCount | /**
* Returns the count of all seen values.
*
* @return Count of all values.
*/
public long getCount() {
return count;
} | 3.68 |
flink_BinaryInMemorySortBuffer_isEmpty | /**
* Checks whether the buffer is empty.
*
* @return True, if no record is contained, false otherwise.
*/
public boolean isEmpty() {
return this.numRecords == 0;
} | 3.68 |
hadoop_LocatedBlockBuilder_newLocatedBlock | // return new block so tokens can be set
LocatedBlock newLocatedBlock(ExtendedBlock eb,
DatanodeStorageInfo[] storage,
long pos, boolean isCorrupt) {
LocatedBlock blk =
BlockManager.newLocatedBlock(eb, storage, pos, isCorrupt);
return blk;
} | 3.68 |
hbase_RegionInfo_isFirst | /** Returns True if this is first Region in Table */
default boolean isFirst() {
return Bytes.equals(getStartKey(), HConstants.EMPTY_START_ROW);
} | 3.68 |
dubbo_ParamParserManager_consumerParamParse | /**
* consumer Design Description:
* <p>
* Object[] args=new Object[0];
* List<Object> argsList=new ArrayList<>;</>
* <p>
* setValueByIndex(int index,Object value);
* <p>
* args=toArray(new Object[0]);
*/
public static void consumerParamParse(ConsumerParseContext parseContext) {
List<ArgInfo> argInfos = parseContext.getArgInfos();
for (int i = 0; i < argInfos.size(); i++) {
for (BaseConsumerParamParser paramParser : consumerParamParsers) {
ArgInfo argInfoByIndex = parseContext.getArgInfoByIndex(i);
if (!paramParser.paramTypeMatch(argInfoByIndex)) {
continue;
}
paramParser.parse(parseContext, argInfoByIndex);
}
}
// TODO add param require or default
} | 3.68 |
hudi_DateTimeUtils_singular | /**
* @param label the original label
* @return the singular format of the original label
*/
private static String[] singular(String label) {
return new String[] {label};
} | 3.68 |
dubbo_MeshRuleRouter_getDubboRouteDestination | /**
* Match route detail (by params)
*/
protected List<DubboRouteDestination> getDubboRouteDestination(DubboRoute dubboRoute, Invocation invocation) {
List<DubboRouteDetail> dubboRouteDetailList = dubboRoute.getRoutedetail();
if (CollectionUtils.isNotEmpty(dubboRouteDetailList)) {
for (DubboRouteDetail dubboRouteDetail : dubboRouteDetailList) {
List<DubboMatchRequest> matchRequestList = dubboRouteDetail.getMatch();
if (CollectionUtils.isEmpty(matchRequestList)) {
return dubboRouteDetail.getRoute();
}
if (matchRequestList.stream()
.allMatch(request -> request.isMatch(invocation, sourcesLabels, tracingContextProviders))) {
return dubboRouteDetail.getRoute();
}
}
}
return null;
} | 3.68 |
querydsl_GeometryExpression_relate | /**
* Returns 1 (TRUE) if this geometric object is spatially related to anotherGeometry by testing
* for intersections between the interior, boundary and exterior of the two geometric objects
* as specified by the values in the intersectionPatternMatrix. This returns FALSE if all the
* tested intersections are empty except exterior (this) intersect exterior (another).
*
* @param geometry other geometry
* @param matrix matrix
* @return true, if this geometry is spatially related to the other
*/
public BooleanExpression relate(Expression<? extends Geometry> geometry, String matrix) {
return Expressions.booleanOperation(SpatialOps.RELATE, mixin, geometry, ConstantImpl.create(matrix));
} | 3.68 |
pulsar_ProducerImpl_applyCompression | /**
* Compress the payload if compression is configured.
* @param payload
* @return a new payload
*/
private ByteBuf applyCompression(ByteBuf payload) {
ByteBuf compressedPayload = compressor.encode(payload);
payload.release();
return compressedPayload;
} | 3.68 |
framework_ColorPickerHistory_getHistory | /**
* Gets the history.
*
* @return the history
*/
public List<Color> getHistory() {
ArrayBlockingQueue<Color> colorHistory = getColorHistory();
Color[] array = colorHistory.toArray(new Color[colorHistory.size()]);
return Collections.unmodifiableList(Arrays.asList(array));
} | 3.68 |
hadoop_S3APrefetchingInputStream_close | /**
* Closes this stream and releases all acquired resources.
*
* @throws IOException if there is an IO error during this operation.
*/
@Override
public synchronized void close() throws IOException {
if (inputStream != null) {
inputStream.close();
inputStream = null;
super.close();
}
} | 3.68 |
hbase_MasterObserver_preSwitchRpcThrottle | /**
* Called before switching rpc throttle enabled state.
* @param ctx the coprocessor instance's environment
* @param enable the rpc throttle value
*/
default void preSwitchRpcThrottle(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final boolean enable) throws IOException {
} | 3.68 |
flink_BroadcastConnectedStream_process | /**
* Assumes as inputs a {@link BroadcastStream} and a non-keyed {@link DataStream} and applies
* the given {@link BroadcastProcessFunction} on them, thereby creating a transformed output
* stream.
*
* @param function The {@link BroadcastProcessFunction} that is called for each element in the
* stream.
* @param outTypeInfo The type of the output elements.
* @param <OUT> The type of the output elements.
* @return The transformed {@link DataStream}.
*/
@PublicEvolving
public <OUT> SingleOutputStreamOperator<OUT> process(
final BroadcastProcessFunction<IN1, IN2, OUT> function,
final TypeInformation<OUT> outTypeInfo) {
Preconditions.checkNotNull(function);
Preconditions.checkArgument(
!(nonBroadcastStream instanceof KeyedStream),
"A BroadcastProcessFunction can only be used on a non-keyed stream.");
return transform(function, outTypeInfo);
} | 3.68 |
framework_RowItem_addItemProperty | /**
* Adding properties is not supported. Properties are generated by
* SQLContainer.
*/
@Override
public boolean addItemProperty(Object id, Property property)
throws UnsupportedOperationException {
throw new UnsupportedOperationException();
} | 3.68 |
flink_FlinkDatabaseMetaData_isCatalogAtStart | /** Catalog name appears at the start of full name. */
@Override
public boolean isCatalogAtStart() throws SQLException {
return true;
} | 3.68 |
hadoop_DiskBalancerWorkStatus_setSourcePath | /**
* Sets the Source Path.
*
* @param sourcePath - Volume Path.
*/
public void setSourcePath(String sourcePath) {
this.sourcePath = sourcePath;
} | 3.68 |
hadoop_ColumnRWHelper_readResult | /**
* Get the latest version of this specified column. Note: this call clones the
* value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
*
* @param result Cannot be null
* @param columnPrefix column prefix to read from
* @param qualifier column qualifier. Nothing gets read when null.
* @return result object (can be cast to whatever object was written to) or
* null when specified column qualifier for this prefix doesn't exist
* in the result.
* @throws IOException if there is any exception encountered while reading
* result.
*/
public static Object readResult(Result result, ColumnPrefix<?> columnPrefix,
String qualifier) throws IOException {
byte[] columnQualifier = ColumnHelper.getColumnQualifier(
columnPrefix.getColumnPrefixInBytes(), qualifier);
return readResult(
result, columnPrefix.getColumnFamilyBytes(),
columnQualifier, columnPrefix.getValueConverter());
} | 3.68 |
hudi_CompactionAdminClient_unscheduleCompactionFileId | /**
* Remove a fileId from pending compaction. Removes the associated compaction operation and rename delta-files that
* were generated for that file-id after the compaction operation was scheduled.
*
* This operation MUST be executed with compactions and writer turned OFF.
*
* @param fgId FileGroupId to be unscheduled
* @param skipValidation Skip validation
* @param dryRun Dry Run Mode
*/
public List<RenameOpResult> unscheduleCompactionFileId(HoodieFileGroupId fgId, boolean skipValidation, boolean dryRun)
throws Exception {
HoodieTableMetaClient metaClient = createMetaClient(false);
if (!dryRun) {
// Ready to remove this file-Id from compaction request
Pair<String, HoodieCompactionOperation> compactionOperationWithInstant =
CompactionUtils.getAllPendingCompactionOperations(metaClient).get(fgId);
HoodieCompactionPlan plan =
CompactionUtils.getCompactionPlan(metaClient, compactionOperationWithInstant.getKey());
List<HoodieCompactionOperation> newOps = plan.getOperations().stream().filter(
op -> (!op.getFileId().equals(fgId.getFileId())) && (!op.getPartitionPath().equals(fgId.getPartitionPath())))
.collect(Collectors.toList());
if (newOps.size() == plan.getOperations().size()) {
return new ArrayList<>();
}
HoodieCompactionPlan newPlan =
HoodieCompactionPlan.newBuilder().setOperations(newOps).setExtraMetadata(plan.getExtraMetadata()).build();
HoodieInstant inflight =
new HoodieInstant(State.INFLIGHT, COMPACTION_ACTION, compactionOperationWithInstant.getLeft());
Path inflightPath = new Path(metaClient.getMetaPath(), inflight.getFileName());
if (metaClient.getFs().exists(inflightPath)) {
// revert if in inflight state
metaClient.getActiveTimeline().revertInstantFromInflightToRequested(inflight);
}
// Overwrite compaction plan with updated info
metaClient.getActiveTimeline().saveToCompactionRequested(
new HoodieInstant(State.REQUESTED, COMPACTION_ACTION, compactionOperationWithInstant.getLeft()),
TimelineMetadataUtils.serializeCompactionPlan(newPlan), true);
}
return new ArrayList<>();
} | 3.68 |
hadoop_MoveStep_getVolumeSetID | /**
* Gets a volume Set ID.
*
* @return String
*/
@Override
public String getVolumeSetID() {
return volumeSetID;
} | 3.68 |
hudi_Lazy_lazily | /**
* Executes provided {@code initializer} lazily, while providing for "exactly once" semantic,
* to instantiate value of type {@link T} being subsequently held by the returned instance of
* {@link Lazy}
*/
public static <T> Lazy<T> lazily(Supplier<T> initializer) {
return new Lazy<>(initializer);
} | 3.68 |
Activiti_CachingAndArtifactsManager_updateCachingAndArtifacts | /**
* Ensures that the process definition is cached in the appropriate places, including the
* deployment's collection of deployed artifacts and the deployment manager's cache, as well
* as caching any ProcessDefinitionInfos.
*/
public void updateCachingAndArtifacts(ParsedDeployment parsedDeployment) {
CommandContext commandContext = Context.getCommandContext();
final ProcessEngineConfigurationImpl processEngineConfiguration = Context.getProcessEngineConfiguration();
DeploymentCache<ProcessDefinitionCacheEntry> processDefinitionCache
= processEngineConfiguration.getDeploymentManager().getProcessDefinitionCache();
DeploymentEntity deployment = parsedDeployment.getDeployment();
for (ProcessDefinitionEntity processDefinition : parsedDeployment.getAllProcessDefinitions()) {
BpmnModel bpmnModel = parsedDeployment.getBpmnModelForProcessDefinition(processDefinition);
Process process = parsedDeployment.getProcessModelForProcessDefinition(processDefinition);
ProcessDefinitionCacheEntry cacheEntry = new ProcessDefinitionCacheEntry(processDefinition, bpmnModel, process);
processDefinitionCache.add(processDefinition.getId(), cacheEntry);
addDefinitionInfoToCache(processDefinition, processEngineConfiguration, commandContext);
// Add to deployment for further usage
deployment.addDeployedArtifact(processDefinition);
}
} | 3.68 |
flink_FactoryUtil_discoverEncodingFormat | /**
* Discovers a {@link EncodingFormat} of the given type using the given option as factory
* identifier.
*/
public <I, F extends EncodingFormatFactory<I>> EncodingFormat<I> discoverEncodingFormat(
Class<F> formatFactoryClass, ConfigOption<String> formatOption) {
return discoverOptionalEncodingFormat(formatFactoryClass, formatOption)
.orElseThrow(
() ->
new ValidationException(
String.format(
"Could not find required sink format '%s'.",
formatOption.key())));
} | 3.68 |
hbase_ReflectedFunctionCache_getAndCallByName | /**
* Get and execute the Function for the given className, passing the argument to the function and
* returning the result.
* @param className the full name of the class to lookup
* @param argument the argument to pass to the function, if found.
* @return null if a function is not found for classname, otherwise the result of the function.
*/
@Nullable
public R getAndCallByName(String className, I argument) {
// todo: if we ever make java9+ our lowest supported jdk version, we can
// handle generating these for newly loaded classes from our DynamicClassLoader using
// MethodHandles.privateLookupIn(). For now this is not possible, because we can't easily
// create a privileged lookup in a non-default ClassLoader. So while this cache loads
// over time, it will never load a custom filter from "hbase.dynamic.jars.dir".
Function<I, ? extends R> lambda =
ConcurrentMapUtils.computeIfAbsent(lambdasByClass, className, () -> loadFunction(className));
return lambda.apply(argument);
} | 3.68 |
framework_DownloadStream_setParameter | /**
* Sets a parameter for download stream. Parameters are optional information
* about the downloadable stream and their meaning depends on the used
* adapter. For example in WebAdapter they are interpreted as HTTP response
* headers.
*
* If the parameters by this name exists, the old value is replaced.
*
* @param name
* the Name of the parameter to set.
* @param value
* the Value of the parameter to set.
*/
public void setParameter(String name, String value) {
if (params == null) {
params = new HashMap<>();
}
params.put(name, value);
} | 3.68 |
flink_FileStateHandle_getFileSystem | /**
* Gets the file system that stores the file state.
*
* @return The file system that stores the file state.
* @throws IOException Thrown if the file system cannot be accessed.
*/
private FileSystem getFileSystem() throws IOException {
return FileSystem.get(filePath.toUri());
} | 3.68 |
hadoop_S3A_finalize | /**
* Close the file system; the FileContext API doesn't have an explicit close.
*/
@Override
protected void finalize() throws Throwable {
fsImpl.close();
super.finalize();
} | 3.68 |
flink_AbstractHeapVector_getDictionaryIds | /** Returns the underlying integer column for ids of dictionary. */
@Override
public HeapIntVector getDictionaryIds() {
return dictionaryIds;
} | 3.68 |
framework_Navigator_performNavigateTo | /**
* Internal method for activating a view, setting its parameters and calling
* listeners.
* <p>
* Invoked after the current view has confirmed that leaving is ok.
* <p>
* This method also verifies that the user is allowed to perform the
* navigation operation.
*
* @param view
* view to activate
* @param viewName
* (optional) name of the view or null not to change the
* navigation state
* @param parameters
* parameters passed in the navigation state to the view
* @since 8.1
*/
protected void performNavigateTo(View view, String viewName,
String parameters) {
ViewChangeEvent event = new ViewChangeEvent(this, currentView, view,
viewName, parameters);
boolean navigationAllowed = beforeViewChange(event);
if (!navigationAllowed) {
// #10901. Revert URL to previous state if back-button navigation
// was canceled
revertNavigation();
return;
}
updateNavigationState(event);
if (getDisplay() != null) {
getDisplay().showView(view);
}
switchView(event);
view.enter(event);
fireAfterViewChange(event);
} | 3.68 |
morf_DataSetAdapter_close | /**
* @see org.alfasoftware.morf.dataset.DataSetConsumer#close(org.alfasoftware.morf.dataset.DataSetConsumer.CloseState)
*/
@Override
public void close(CloseState closeState) {
consumer.close(closeState);
} | 3.68 |
hadoop_PendingSet_serializer | /**
* Get a shared JSON serializer for this class.
* @return a serializer.
*/
public static JsonSerialization<PendingSet> serializer() {
return new JsonSerialization<>(PendingSet.class, false, false);
} | 3.68 |
hadoop_BalanceProcedureScheduler_isRunning | /**
* The running state of the scheduler.
*/
public boolean isRunning() {
return running.get();
} | 3.68 |
hbase_SortedList_get | /**
* Returns a reference to the unmodifiable list currently backing the SortedList. Changes to the
* SortedList will not be reflected in this list. Use this method to get a reference for iterating
* over using the RandomAccess pattern.
*/
public List<E> get() { // FindBugs: UG_SYNC_SET_UNSYNC_GET complaint. Fix!!
return list;
} | 3.68 |
morf_TruncateStatement_truncate | /**
* Constructs a Truncate Statement.
*
* @param table The table to truncate.
* @return Builder.
*/
public static Builder<TruncateStatement> truncate(TableReference table) {
return () -> new TruncateStatement(table);
} | 3.68 |
morf_InsertStatementBuilder_getTable | /**
* Gets the table being inserted into
*
* @return the table being inserted into
*/
TableReference getTable() {
return table;
} | 3.68 |
pulsar_TopicName_getLookupName | /**
* Get a string suitable for completeTopicName lookup.
*
* <p>Example:
*
* <p>persistent://tenant/cluster/namespace/completeTopicName ->
* persistent/tenant/cluster/namespace/completeTopicName
*
* @return
*/
public String getLookupName() {
if (isV2()) {
return String.format("%s/%s/%s/%s", domain, tenant, namespacePortion, getEncodedLocalName());
} else {
return String.format("%s/%s/%s/%s/%s", domain, tenant, cluster, namespacePortion, getEncodedLocalName());
}
} | 3.68 |
AreaShop_FileManager_loadVersions | /**
* Load the file with the versions, used to check if the other files need conversion.
*/
@SuppressWarnings("unchecked")
public void loadVersions() {
File file = new File(versionPath);
if(file.exists()) {
// Load versions from the file
try (ObjectInputStream input = new ObjectInputStream(new FileInputStream(versionPath))) {
versions = (HashMap<String, Integer>) input.readObject();
} catch(IOException | ClassNotFoundException | ClassCastException e) {
AreaShop.warn("Something went wrong reading file: " + versionPath);
versions = null;
}
}
if(versions == null || versions.isEmpty()) {
versions = new HashMap<>();
versions.put(AreaShop.versionFiles, 0);
this.saveVersions();
}
} | 3.68 |
hadoop_ParserValidator_validate | /**
* Validates the input parameters for the {@link LogParser}.
*
* @param logs input log streams to the {@link LogParser}.
* @return whether the input parameters are valid or not.
*/
public final boolean validate(final InputStream logs) {
// TODO
return true;
} | 3.68 |
hadoop_PlacementConstraints_timedOpportunitiesConstraint | /**
* Creates a placement constraint that has to be satisfied within a number of
* placement opportunities (invocations of the scheduler).
*
* @param constraint the placement constraint
* @param delay the number of scheduling opportunities within which the
* constraint has to be satisfied
* @return the resulting timed placement constraint
*/
public static TimedPlacementConstraint timedOpportunitiesConstraint(
AbstractConstraint constraint, long delay) {
return new TimedPlacementConstraint(constraint, delay,
TimedPlacementConstraint.DelayUnit.OPPORTUNITIES);
} | 3.68 |
hbase_ZKNodeTracker_start | /**
* Starts the tracking of the node in ZooKeeper.
* <p/>
* Use {@link #blockUntilAvailable()} to block until the node is available or
* {@link #getData(boolean)} to get the data of the node if it is available.
*/
public synchronized void start() {
this.watcher.registerListener(this);
try {
if (ZKUtil.watchAndCheckExists(watcher, node)) {
byte[] data = ZKUtil.getDataAndWatch(watcher, node);
if (data != null) {
this.data = data;
} else {
// It existed but now does not, try again to ensure a watch is set
LOG.debug("Try starting again because there is no data from {}", node);
start();
}
}
} catch (KeeperException e) {
abortable.abort("Unexpected exception during initialization, aborting", e);
}
postStart();
} | 3.68 |
hadoop_TimelineFilterList_getFilterList | /**
* Get the filter list.
*
* @return filterList
*/
public List<TimelineFilter> getFilterList() {
return filterList;
} | 3.68 |
querydsl_GeometryExpression_union | /**
* Returns a geometric object that represents the Point set
* union of this geometric object with anotherGeometry.
*
* @param geometry other geometry
* @return union of this and the other geometry
*/
public GeometryExpression<Geometry> union(Expression<? extends Geometry> geometry) {
return GeometryExpressions.geometryOperation(SpatialOps.UNION, mixin, geometry);
} | 3.68 |
flink_ConfigurationUtils_getRandomTempDirectory | /**
* Picks a temporary directory randomly from the given configuration.
*
* @param configuration to extract the temp directory from
* @return a randomly picked temporary directory
*/
@Nonnull
public static File getRandomTempDirectory(Configuration configuration) {
final String[] tmpDirectories = parseTempDirectories(configuration);
Preconditions.checkState(
tmpDirectories.length > 0,
String.format(
"No temporary directory has been specified for %s",
CoreOptions.TMP_DIRS.key()));
final int randomIndex = ThreadLocalRandom.current().nextInt(tmpDirectories.length);
return new File(tmpDirectories[randomIndex]);
} | 3.68 |
streampipes_MqttClient_publish | /**
* Publish received event to MQTT broker.
*
* @param event event to be published
*/
public void publish(Event event) {
JsonDataFormatDefinition dataFormatDefinition = new JsonDataFormatDefinition();
byte[] payload = new String(dataFormatDefinition.fromMap(event.getRaw())).getBytes();
try {
this.conn.publish(options.getTopic(), payload, options.getQos(), options.isRetain());
} catch (Exception e) {
throw new SpRuntimeException("Could not publish to MQTT broker: "
+ uri.toString() + ", " + e.getMessage(), e);
}
} | 3.68 |
framework_UIDL_getPaintableAttribute | /**
* Gets the Paintable with the id found in the named attributes's value.
*
* @param name
* the name of the attribute
* @return the Paintable referenced by the attribute, if it exists
*/
public ServerConnector getPaintableAttribute(String name,
ApplicationConnection connection) {
return ConnectorMap.get(connection)
.getConnector(getStringAttribute(name));
} | 3.68 |
querydsl_AbstractJPAQuery_getSingleResult | /**
* Transforms results using FactoryExpression if ResultTransformer can't be used
*
* @param query query
* @return single result
*/
@Nullable
private Object getSingleResult(Query query) {
if (projection != null) {
Object result = query.getSingleResult();
if (result != null) {
if (!result.getClass().isArray()) {
result = new Object[]{result};
}
return projection.newInstance((Object[]) result);
} else {
return null;
}
} else {
return query.getSingleResult();
}
} | 3.68 |
flink_Tuple4_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple4<T0, T1, T2, T3> copy() {
return new Tuple4<>(this.f0, this.f1, this.f2, this.f3);
} | 3.68 |
framework_ComputedStyle_getMarginHeight | /**
* Returns the sum of the top and bottom margin.
*
* @since 7.5.6
* @return the sum of the top and bottom margin
*/
public double getMarginHeight() {
double marginHeight = getDoubleProperty("marginTop");
marginHeight += getDoubleProperty("marginBottom");
return marginHeight;
} | 3.68 |
flink_Tuple7_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
public static <T0, T1, T2, T3, T4, T5, T6> Tuple7<T0, T1, T2, T3, T4, T5, T6> of(
T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6) {
return new Tuple7<>(f0, f1, f2, f3, f4, f5, f6);
} | 3.68 |
AreaShop_BuyRegion_isBuyer | /**
* Check if a player is the buyer of this region.
* @param player Player to check
* @return true if this player owns this region, otherwise false
*/
public boolean isBuyer(OfflinePlayer player) {
return player != null && isBuyer(player.getUniqueId());
} | 3.68 |
hudi_StreamerUtil_getMaxCompactionMemoryInBytes | /**
* Returns the max compaction memory in bytes with given conf.
*/
public static long getMaxCompactionMemoryInBytes(Configuration conf) {
return (long) conf.getInteger(FlinkOptions.COMPACTION_MAX_MEMORY) * 1024 * 1024;
} | 3.68 |
framework_VAbsoluteLayout_destroy | /**
* Removes the wrapper caption and itself from the layout.
*/
public void destroy() {
if (caption != null) {
caption.removeFromParent();
}
removeFromParent();
} | 3.68 |
hudi_SchemaRegistryProvider_fetchSchemaFromRegistry | /**
* The method takes the provided url {@code registryUrl} and gets the schema from the schema registry using that url.
* If the caller provides userInfo credentials in the url (e.g "https://foo:[email protected]") then the credentials
* are extracted the url using the Matcher and the extracted credentials are set on the request as an Authorization
* header.
*
* @param registryUrl
* @return the Schema in String form.
* @throws IOException
*/
public String fetchSchemaFromRegistry(String registryUrl) throws IOException {
URL registry;
HttpURLConnection connection;
Matcher matcher = Pattern.compile("://(.*?)@").matcher(registryUrl);
if (matcher.find()) {
String creds = matcher.group(1);
String urlWithoutCreds = registryUrl.replace(creds + "@", "");
registry = new URL(urlWithoutCreds);
connection = (HttpURLConnection) registry.openConnection();
setAuthorizationHeader(matcher.group(1), connection);
} else {
registry = new URL(registryUrl);
connection = (HttpURLConnection) registry.openConnection();
}
ObjectMapper mapper = new ObjectMapper();
JsonNode node = mapper.readTree(getStream(connection));
return node.get("schema").asText();
} | 3.68 |
hbase_Call_setTimeout | /**
* called from timeoutTask, prevent self cancel
*/
public void setTimeout(IOException error) {
synchronized (this) {
if (done) {
return;
}
this.done = true;
this.error = error;
}
callback.run(this);
} | 3.68 |
hadoop_ConnectionContext_getClient | /**
* Get the connection client.
*
* @return Connection client.
*/
public synchronized ProxyAndInfo<?> getClient() {
this.numThreads++;
this.lastActiveTs = Time.monotonicNow();
return this.client;
} | 3.68 |
hbase_Client_getExtraHeaders | /**
* Get all extra headers (read-only).
*/
public Map<String, String> getExtraHeaders() {
return Collections.unmodifiableMap(extraHeaders);
} | 3.68 |
flink_ExternalResourceUtils_getExternalResourcesCollection | /** Get the collection of all enabled external resources. */
public static Collection<ExternalResource> getExternalResourcesCollection(
Configuration config) {
return getExternalResourceAmountMap(config).entrySet().stream()
.map(entry -> new ExternalResource(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
} | 3.68 |
hbase_TableSchemaModel___setIsRoot | /**
* @param value desired value of IS_ROOT attribute
*/
public void __setIsRoot(boolean value) {
attrs.put(IS_ROOT, Boolean.toString(value));
} | 3.68 |
framework_VCalendar_updateEventToMonthGrid | /**
* Updates an event in the month grid.
*
* @param changedEvent
* The event that has changed
*/
public void updateEventToMonthGrid(CalendarEvent changedEvent) {
removeMonthEvent(changedEvent, true);
changedEvent.setSlotIndex(-1);
addEventToMonthGrid(changedEvent, true);
} | 3.68 |
hbase_MobUtils_getMobHome | /**
* Gets the root dir of the mob files under the qualified HBase root dir. It's {rootDir}/mobdir.
* @param rootDir The qualified path of HBase root directory.
* @return The root dir of the mob file.
*/
public static Path getMobHome(Path rootDir) {
return new Path(rootDir, MobConstants.MOB_DIR_NAME);
} | 3.68 |
framework_Page_add | /**
* Injects a CSS resource into the page.
*
* @param resource
* The resource to inject.
*/
public void add(Resource resource) {
if (resource == null) {
throw new IllegalArgumentException(
"Cannot inject null resource");
}
InjectedStyleResource injection = new InjectedStyleResource(
resource);
if (!injectedStyles.contains(injection)
&& pendingInjections.add(injection)) {
ui.markAsDirty();
}
} | 3.68 |
flink_ExecutionEnvironment_readTextFile | /**
* Creates a {@link DataSet} that represents the Strings produced by reading the given file line
* wise. The {@link java.nio.charset.Charset} with the given name will be used to read the
* files.
*
* @param filePath The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path").
* @param charsetName The name of the character set used to read the file.
* @return A {@link DataSet} that represents the data read from the given file as text lines.
*/
public DataSource<String> readTextFile(String filePath, String charsetName) {
Preconditions.checkNotNull(filePath, "The file path may not be null.");
TextInputFormat format = new TextInputFormat(new Path(filePath));
format.setCharsetName(charsetName);
return new DataSource<>(
this, format, BasicTypeInfo.STRING_TYPE_INFO, Utils.getCallLocationName());
} | 3.68 |
flink_HiveParserASTNodeOrigin_getUsageNode | /**
* @return the expression node triggering usage of an object from which an HiveParserASTNode
* originated, e.g. <code>v as v1</code> (this can help with debugging context-dependent
* expansions)
*/
public HiveParserASTNode getUsageNode() {
return usageNode;
} | 3.68 |
flink_RocksDBResourceContainer_createBaseCommonColumnOptions | /** Create a {@link ColumnFamilyOptions} for RocksDB, including some common settings. */
ColumnFamilyOptions createBaseCommonColumnOptions() {
return new ColumnFamilyOptions();
} | 3.68 |
framework_Panel_removeAllActionHandlers | /**
* Removes all action handlers.
*/
public void removeAllActionHandlers() {
if (actionManager != null) {
actionManager.removeAllActionHandlers();
}
} | 3.68 |
hbase_ZKProcedureUtil_getReachedBarrierNode | /**
* Get the full znode path for the node used by the coordinator to trigger a global barrier
* execution and release on each subprocedure.
* @param controller controller running the procedure
* @param opInstanceName name of the running procedure instance (not the procedure description).
* @return full znode path to the commit barrier
*/
public static String getReachedBarrierNode(ZKProcedureUtil controller, String opInstanceName) {
return ZNodePaths.joinZNode(controller.reachedZnode, opInstanceName);
} | 3.68 |
framework_AbstractInMemoryContainer_setItemSorter | /**
* Sets the ItemSorter used for comparing items in a sort. The
* {@link ItemSorter#compare(Object, Object)} method is called with item ids
* to perform the sorting. A default ItemSorter is used if this is not
* explicitly set.
*
* @param itemSorter
* The ItemSorter used for comparing two items in a sort (not
* null).
*/
protected void setItemSorter(ItemSorter itemSorter) {
this.itemSorter = itemSorter;
} | 3.68 |
flink_ConnectionLimitingFactory_decorateIfLimited | /**
* Decorates the given factory for a {@code ConnectionLimitingFactory}, if the given
* configuration configured connection limiting for the given file system scheme. Otherwise, it
* returns the given factory as is.
*
* @param factory The factory to potentially decorate.
* @param scheme The file scheme for which to check the configuration.
* @param config The configuration
* @return The decorated factors, if connection limiting is configured, the original factory
* otherwise.
*/
public static FileSystemFactory decorateIfLimited(
FileSystemFactory factory, String scheme, Configuration config) {
checkNotNull(factory, "factory");
final ConnectionLimitingSettings settings =
ConnectionLimitingSettings.fromConfig(config, scheme);
// decorate only if any limit is configured
if (settings == null) {
// no limit configured
return factory;
} else {
return new ConnectionLimitingFactory(factory, settings);
}
} | 3.68 |
druid_DruidAbstractDataSource_setConnectTimeout | /**
* @since 1.2.12
*/
public void setConnectTimeout(int milliSeconds) {
this.connectTimeout = milliSeconds;
this.connectTimeoutStr = null;
} | 3.68 |
framework_LegacyLocatorStrategy_findSubPartAwareParentWidget | /**
* Finds the first widget in the hierarchy (moving upwards) that implements
* SubPartAware. Returns the SubPartAware implementor or null if none is
* found.
*
* @param w
* The widget to start from. This is returned if it implements
* SubPartAware.
* @return The first widget (upwards in hierarchy) that implements
* SubPartAware or null
*/
Widget findSubPartAwareParentWidget(Widget w) {
while (w != null) {
if (w instanceof SubPartAware) {
return w;
}
w = w.getParent();
}
return null;
} | 3.68 |
hadoop_CopyOutputFormat_setWorkingDirectory | /**
* Setter for the working directory for DistCp (where files will be copied
* before they are moved to the final commit-directory.)
* @param job The Job on whose configuration the working-directory is to be set.
* @param workingDirectory The path to use as the working directory.
*/
public static void setWorkingDirectory(Job job, Path workingDirectory) {
job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,
workingDirectory.toString());
} | 3.68 |
hbase_ConstantSizeRegionSplitPolicy_isExceedSize | /** Returns true if region size exceed the sizeToCheck */
protected final boolean isExceedSize(long sizeToCheck) {
if (overallHRegionFiles) {
long sumSize = 0;
for (HStore store : region.getStores()) {
sumSize += store.getSize();
}
if (sumSize > sizeToCheck) {
LOG.debug("Should split because region size is big enough " + "sumSize={}, sizeToCheck={}",
StringUtils.humanSize(sumSize), StringUtils.humanSize(sizeToCheck));
return true;
}
} else {
for (HStore store : region.getStores()) {
long size = store.getSize();
if (size > sizeToCheck) {
LOG.debug("Should split because {} size={}, sizeToCheck={}", store.getColumnFamilyName(),
StringUtils.humanSize(size), StringUtils.humanSize(sizeToCheck));
return true;
}
}
}
return false;
} | 3.68 |
hadoop_ProbeStatus_succeed | /**
* The probe has succeeded -capture the current timestamp, set
* success to true, and record any other data needed.
* @param probe probe
*/
public void succeed(Probe probe) {
finish(probe, true, probe.getName(), null);
} | 3.68 |
framework_Form_addItemProperty | /**
* Adds a new property to form and create corresponding field.
*
* @see Item#addItemProperty(Object, Property)
*/
@Override
public boolean addItemProperty(Object id, Property property) {
// Checks inputs
if (id == null || property == null) {
throw new NullPointerException("Id and property must be non-null");
}
// Checks that the property id is not reserved
if (propertyIds.contains(id)) {
return false;
}
propertyIds.add(id);
ownProperties.put(id, property);
// Gets suitable field
final Field<?> field = fieldFactory.createField(this, id, this);
if (field == null) {
return false;
}
// Configures the field
bindPropertyToField(id, property, field);
// Register and attach the created field
addField(id, field);
return true;
} | 3.68 |
hadoop_ContainerStatus_getContainerSubState | /**
* Get Extra state information of the container (SCHEDULED, LOCALIZING etc.).
* @return Extra State information.
*/
@Private
@Unstable
public ContainerSubState getContainerSubState() {
throw new UnsupportedOperationException(
"subclass must implement this method");
} | 3.68 |
framework_MonthEventLabel_setTimeSpecificEvent | /**
* Is the event bound to a specific time.
*
* @param timeSpecificEvent
* True if the event is bound to a time, false if it is only
* bound to the day
*/
public void setTimeSpecificEvent(boolean timeSpecificEvent) {
this.timeSpecificEvent = timeSpecificEvent;
} | 3.68 |
hbase_RegionServerObserver_postRollWALWriterRequest | /**
* This will be called after executing user request to roll a region server WAL.
* @param ctx the environment to interact with the framework and region server.
*/
default void postRollWALWriterRequest(
final ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException {
} | 3.68 |
hbase_HBaseTestingUtility_setupDataTestDirOnTestFS | /**
* Sets up a path in test filesystem to be used by tests. Creates a new directory if not already
* setup.
*/
private void setupDataTestDirOnTestFS() throws IOException {
if (dataTestDirOnTestFS != null) {
LOG.warn("Data test on test fs dir already setup in " + dataTestDirOnTestFS.toString());
return;
}
dataTestDirOnTestFS = getNewDataTestDirOnTestFS();
} | 3.68 |
framework_Alignment_isCenter | /**
* Checks if component is aligned center (horizontally) of the available
* space.
*
* @return true if aligned center
*/
public boolean isCenter() {
return (bitMask
& Bits.ALIGNMENT_HORIZONTAL_CENTER) == Bits.ALIGNMENT_HORIZONTAL_CENTER;
} | 3.68 |
framework_VCalendar_getFirstDayNumber | /**
* Get the number when a week starts.
*/
public int getFirstDayNumber() {
return firstDay;
} | 3.68 |
pulsar_RateLimiter_tryAcquire | /**
* Acquires permits from this {@link RateLimiter} if it can be acquired immediately without delay.
*
* @param acquirePermit
* the number of permits to acquire
* @return {@code true} if the permits were acquired, {@code false} otherwise
*/
public synchronized boolean tryAcquire(long acquirePermit) {
checkArgument(!isClosed(), "Rate limiter is already shutdown");
// lazy init and start task only once application start using it
if (renewTask == null) {
renewTask = createTask();
}
boolean canAcquire = acquirePermit < 0 || acquiredPermits < this.permits;
if (isDispatchOrPrecisePublishRateLimiter) {
// for dispatch rate limiter just add acquirePermit
acquiredPermits += acquirePermit;
// we want to back-pressure from the current state of the rateLimiter therefore we should check if there
// are any available premits again
canAcquire = acquirePermit < 0 || acquiredPermits < this.permits;
} else {
// acquired-permits can't be larger than the rate
if (acquirePermit + acquiredPermits > this.permits) {
return false;
}
if (canAcquire) {
acquiredPermits += acquirePermit;
}
}
return canAcquire;
} | 3.68 |
hmily_ThreadLocalHmilyContext_set | /**
* set value.
*
* @param context context
*/
public void set(final HmilyTransactionContext context) {
CURRENT_LOCAL.set(context);
} | 3.68 |
pulsar_ProxyConnection_getValidClientAuthData | /**
* Thread-safe method to retrieve unexpired client auth data. Due to inherent race conditions,
* the auth data may expire before it is used.
*/
CompletableFuture<AuthData> getValidClientAuthData() {
final CompletableFuture<AuthData> clientAuthDataFuture = new CompletableFuture<>();
ctx().executor().execute(Runnables.catchingAndLoggingThrowables(() -> {
// authState is not thread safe, so this must run on the ProxyConnection's event loop.
if (!authState.isExpired()) {
clientAuthDataFuture.complete(clientAuthData);
} else if (state == State.ProxyLookupRequests) {
maybeSendAuthChallenge();
if (pendingBrokerAuthChallenges == null) {
pendingBrokerAuthChallenges = new HashSet<>();
}
pendingBrokerAuthChallenges.add(clientAuthDataFuture);
} else {
clientAuthDataFuture.completeExceptionally(new PulsarClientException.AlreadyClosedException(
"ProxyConnection is not in a valid state to get client auth data for " + remoteAddress));
}
}));
return clientAuthDataFuture;
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_getKeySegment | /**
* Get the {@link MemorySegment} wrapping up the serialized key bytes.
*
* @param key the key.
* @param namespace the namespace.
* @return the {@link MemorySegment} wrapping up the serialized key bytes.
*/
private MemorySegment getKeySegment(K key, N namespace) {
return skipListKeySerializer.serializeToSegment(key, namespace);
} | 3.68 |
zxing_BarcodeValue_getValue | /**
* Determines the maximum occurrence of a set value and returns all values which were set with this occurrence.
* @return an array of int, containing the values with the highest occurrence, or null, if no value was set
*/
int[] getValue() {
int maxConfidence = -1;
Collection<Integer> result = new ArrayList<>();
for (Entry<Integer,Integer> entry : values.entrySet()) {
if (entry.getValue() > maxConfidence) {
maxConfidence = entry.getValue();
result.clear();
result.add(entry.getKey());
} else if (entry.getValue() == maxConfidence) {
result.add(entry.getKey());
}
}
return PDF417Common.toIntArray(result);
} | 3.68 |