name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
graphhopper_ShortcutUnpacker_visitOriginalEdgesFwd | /**
* Finds an edge/shortcut with the given id and adjNode and calls the visitor for each original edge that is
* packed inside this shortcut (or if an original edge is given simply calls the visitor on it).
*
* @param reverseOrder if true the original edges will be traversed in reverse order
*/
public void visitOriginalEdgesFwd(int edgeId, int adjNode, boolean reverseOrder, int prevOrNextEdgeId) {
doVisitOriginalEdges(edgeId, adjNode, reverseOrder, false, prevOrNextEdgeId);
} | 3.68 |
flink_BinaryInMemorySortBuffer_getIterator | /**
* Gets an iterator over all records in this buffer in their logical order.
*
* @return An iterator returning the records in their logical order.
*/
public MutableObjectIterator<BinaryRowData> getIterator() {
return new MutableObjectIterator<BinaryRowData>() {
private final int size = size();
private int current = 0;
private int currentSegment = 0;
private int currentOffset = 0;
private MemorySegment currentIndexSegment = sortIndex.get(0);
@Override
public BinaryRowData next(BinaryRowData target) {
if (this.current < this.size) {
this.current++;
if (this.currentOffset > lastIndexEntryOffset) {
this.currentOffset = 0;
this.currentIndexSegment = sortIndex.get(++this.currentSegment);
}
long pointer = this.currentIndexSegment.getLong(this.currentOffset);
this.currentOffset += indexEntrySize;
try {
return getRecordFromBuffer(target, pointer);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
} else {
return null;
}
}
@Override
public BinaryRowData next() {
throw new RuntimeException("Not support!");
}
};
} | 3.68 |
hadoop_HsJobPage_preHead | /*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML)
*/
@Override protected void preHead(Page.HTML<__> html) {
String jobID = $(JOB_ID);
set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID"
: join("MapReduce Job ", $(JOB_ID)));
commonPreHead(html);
//Override the nav config from the commonPreHead
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:1}");
} | 3.68 |
framework_WebBrowser_isOpera | /**
* Tests whether the user is using Opera.
*
* @return true if the user is using Opera, false if the user is not using
* Opera or if no information on the browser is present
*/
public boolean isOpera() {
if (browserDetails == null) {
return false;
}
return browserDetails.isOpera();
} | 3.68 |
hadoop_NoopAuditManagerS3A_getUnbondedSpan | /**
* Unbonded span to use after deactivation.
*/
private AuditSpanS3A getUnbondedSpan() {
return auditor.getUnbondedSpan();
} | 3.68 |
hudi_HoodieBloomIndex_getFileInfoForLatestBaseFiles | /**
* Get BloomIndexFileInfo for all the latest base files for the requested partitions.
*
* @param partitions - List of partitions to get the base files for
* @param context - Engine context
* @param hoodieTable - Hoodie Table
* @return List of partition and file column range info pairs
*/
private List<Pair<String, BloomIndexFileInfo>> getFileInfoForLatestBaseFiles(
List<String> partitions, final HoodieEngineContext context, final HoodieTable hoodieTable) {
List<Pair<String, String>> partitionPathFileIDList = getLatestBaseFilesForAllPartitions(partitions, context,
hoodieTable).stream()
.map(pair -> Pair.of(pair.getKey(), pair.getValue().getFileId()))
.collect(toList());
return partitionPathFileIDList.stream()
.map(pf -> Pair.of(pf.getKey(), new BloomIndexFileInfo(pf.getValue()))).collect(toList());
} | 3.68 |
hbase_TableDescriptorBuilder_setCoprocessor | /**
* Add a table coprocessor to this table. The coprocessor type must be
* org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class
* can be loaded or not. Whether a coprocessor is loadable or not will be determined when a
* region is opened.
* @throws IOException any illegal parameter key/value
* @return the modifyable TD
*/
public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) throws IOException {
checkHasCoprocessor(cp.getClassName());
if (cp.getPriority() < 0) {
throw new IOException(
"Priority must be bigger than or equal with zero, current:" + cp.getPriority());
}
// Validate parameter kvs and then add key/values to kvString.
StringBuilder kvString = new StringBuilder();
for (Map.Entry<String, String> e : cp.getProperties().entrySet()) {
if (!e.getKey().matches(CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) {
throw new IOException("Illegal parameter key = " + e.getKey());
}
if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) {
throw new IOException("Illegal parameter (" + e.getKey() + ") value = " + e.getValue());
}
if (kvString.length() != 0) {
kvString.append(',');
}
kvString.append(e.getKey());
kvString.append('=');
kvString.append(e.getValue());
}
String value = cp.getJarPath().orElse("") + "|" + cp.getClassName() + "|"
+ Integer.toString(cp.getPriority()) + "|" + kvString.toString();
return setCoprocessorToMap(value);
} | 3.68 |
hadoop_PathOutputCommitter_hasOutputPath | /**
* Predicate: is there an output path?
* @return true if we have an output path set, else false.
*/
public boolean hasOutputPath() {
return getOutputPath() != null;
} | 3.68 |
zxing_BitArray_toBytes | /**
*
* @param bitOffset first bit to start writing
* @param array array to write into. Bytes are written most-significant byte first. This is the opposite
* of the internal representation, which is exposed by {@link #getBitArray()}
* @param offset position in array to start writing
* @param numBytes how many bytes to write
*/
public void toBytes(int bitOffset, byte[] array, int offset, int numBytes) {
for (int i = 0; i < numBytes; i++) {
int theByte = 0;
for (int j = 0; j < 8; j++) {
if (get(bitOffset)) {
theByte |= 1 << (7 - j);
}
bitOffset++;
}
array[offset + i] = (byte) theByte;
}
} | 3.68 |
hudi_OptionsResolver_needsAsyncClustering | /**
* Returns whether there is need to schedule the async clustering.
*
* @param conf The flink configuration.
*/
public static boolean needsAsyncClustering(Configuration conf) {
return isInsertOperation(conf) && conf.getBoolean(FlinkOptions.CLUSTERING_ASYNC_ENABLED);
} | 3.68 |
framework_ConverterUtil_canConverterPossiblyHandle | /**
* Checks if it possible that the given converter can handle conversion
* between the given presentation and model type somehow.
*
* @param converter
* The converter to check. If this is null the result is always
* false.
* @param presentationType
* The presentation type
* @param modelType
* The model type
* @return true if the converter possibly support conversion between the
* given presentation and model type, false otherwise
*/
public static boolean canConverterPossiblyHandle(Converter<?, ?> converter,
Class<?> presentationType, Class<?> modelType) {
if (converter == null) {
return false;
}
Class<?> converterModelType = converter.getModelType();
if (!modelType.isAssignableFrom(converterModelType)
&& !converterModelType.isAssignableFrom(modelType)) {
// model types are not compatible in any way
return false;
}
Class<?> converterPresentationType = converter.getPresentationType();
if (!presentationType.isAssignableFrom(converterPresentationType)
&& !converterPresentationType
.isAssignableFrom(presentationType)) {
// presentation types are not compatible in any way
return false;
}
return true;
} | 3.68 |
framework_DDEventHandleStrategy_handleKeyDownEvent | /**
* Handles key down {@code event}.
*
* Default implementation doesn't do anything.
*
* @param event
* key down GWT event
* @param mediator
* VDragAndDropManager data accessor
*/
public void handleKeyDownEvent(NativePreviewEvent event,
DDManagerMediator mediator) {
// no use for handling for any key down event
} | 3.68 |
hbase_ScannerContext_setSizeScope | /**
* Change the scope in which the size limit is enforced
*/
void setSizeScope(LimitScope scope) {
this.sizeScope = scope;
} | 3.68 |
flink_RateLimiterStrategy_perCheckpoint | /**
* Creates a {@code RateLimiterStrategy} that is limiting the number of records per checkpoint.
*
* @param recordsPerCheckpoint The number of records produced per checkpoint. This value has to
* be greater or equal to parallelism. The actual number of produced records is subject to
* rounding due to dividing the number of produced records among the parallel instances.
*/
static RateLimiterStrategy perCheckpoint(int recordsPerCheckpoint) {
return parallelism -> {
int recordsPerSubtask = recordsPerCheckpoint / parallelism;
checkArgument(
recordsPerSubtask > 0,
"recordsPerCheckpoint has to be greater or equal to parallelism. "
+ "Either decrease the parallelism or increase the number of "
+ "recordsPerCheckpoint.");
return new GatedRateLimiter(recordsPerSubtask);
};
} | 3.68 |
flink_RocksDBResourceContainer_relocateDefaultDbLogDir | /**
* Relocates the default log directory of RocksDB with the Flink log directory. Finds the Flink
* log directory using log.file Java property that is set during startup.
*
* @param dbOptions The RocksDB {@link DBOptions}.
*/
private void relocateDefaultDbLogDir(DBOptions dbOptions) {
String logFilePath = System.getProperty("log.file");
if (logFilePath != null) {
File logFile = resolveFileLocation(logFilePath);
if (logFile != null && resolveFileLocation(logFile.getParent()) != null) {
dbOptions.setDbLogDir(logFile.getParent());
}
}
} | 3.68 |
flink_SourceTestSuiteBase_testScaleUp | /**
* Test connector source restart from a savepoint with a higher parallelism.
*
* <p>This test will create 4 splits in the external system first, write test data to all splits
* and consume back via a Flink job with parallelism 2. Then stop the job with savepoint,
* restart the job from the checkpoint with a higher parallelism 4. After the job has been
* running, add some extra data to the source and compare the result.
*
* <p>The number and order of records in each split consumed by Flink need to be identical to
* the test data written into the external system to pass this test. There's no requirement for
* record order across splits.
*/
@TestTemplate
@DisplayName("Test source restarting with a higher parallelism")
public void testScaleUp(
TestEnvironment testEnv,
DataStreamSourceExternalContext<T> externalContext,
CheckpointingMode semantic)
throws Exception {
restartFromSavepoint(testEnv, externalContext, semantic, 4, 2, 4);
} | 3.68 |
hadoop_AuxiliaryService_setAuxiliaryLocalPathHandler | /**
* Method that sets the local dirs path handler for this Auxiliary Service.
*
* @param auxiliaryLocalPathHandler the pathHandler for this auxiliary service
*/
public void setAuxiliaryLocalPathHandler(
AuxiliaryLocalPathHandler auxiliaryLocalPathHandler) {
this.auxiliaryLocalPathHandler = auxiliaryLocalPathHandler;
} | 3.68 |
flink_TableConfig_set | /**
* Sets an application-specific string-based value for the given string-based key.
*
* <p>The value will be parsed by the framework on access.
*
* <p>This method exists for convenience when configuring a session with string-based
* properties. Use {@link #set(ConfigOption, Object)} for more type-safety and inline
* documentation.
*
* @see TableConfigOptions
* @see ExecutionConfigOptions
* @see OptimizerConfigOptions
*/
public TableConfig set(String key, String value) {
configuration.setString(key, value);
return this;
} | 3.68 |
hadoop_ActiveAuditManagerS3A_afterTransmission | /**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public void afterTransmission(Context.AfterTransmission context,
ExecutionAttributes executionAttributes) {
span.afterTransmission(context, executionAttributes);
} | 3.68 |
hbase_RegionStateStore_getMergeRegions | /**
* Returns Return all regioninfos listed in the 'info:merge*' columns of the given {@code region}.
*/
public List<RegionInfo> getMergeRegions(RegionInfo region) throws IOException {
return CatalogFamilyFormat.getMergeRegions(getRegionCatalogResult(region).rawCells());
} | 3.68 |
hadoop_Verifier_writeFlavorAndVerifier | /**
* Write AuthFlavor and the verifier to the XDR.
* @param verifier written to XDR
* @param xdr XDR message
*/
public static void writeFlavorAndVerifier(Verifier verifier, XDR xdr) {
if (verifier instanceof VerifierNone) {
xdr.writeInt(AuthFlavor.AUTH_NONE.getValue());
} else if (verifier instanceof VerifierGSS) {
xdr.writeInt(AuthFlavor.RPCSEC_GSS.getValue());
} else {
throw new UnsupportedOperationException("Cannot recognize the verifier");
}
verifier.write(xdr);
} | 3.68 |
hadoop_CoderUtil_cloneAsDirectByteBuffer | /**
* Clone an input bytes array as direct ByteBuffer.
*/
static ByteBuffer cloneAsDirectByteBuffer(byte[] input, int offset, int len) {
if (input == null) { // an input can be null, if erased or not to read
return null;
}
ByteBuffer directBuffer = ByteBuffer.allocateDirect(len);
directBuffer.put(input, offset, len);
directBuffer.flip();
return directBuffer;
} | 3.68 |
hbase_HbckTableInfo_handleOverlapGroup | /**
* This takes set of overlapping regions and merges them into a single region. This covers cases
* like degenerate regions, shared start key, general overlaps, duplicate ranges, and partial
* overlapping regions. Cases: - Clean regions that overlap - Only .oldlogs regions (can't find
* start/stop range, or figure out) This is basically threadsafe, except for the fixer increment
* in mergeOverlaps.
*/
@Override
public void handleOverlapGroup(Collection<HbckRegionInfo> overlap) throws IOException {
Preconditions.checkNotNull(overlap);
Preconditions.checkArgument(overlap.size() > 0);
if (!this.fixOverlaps) {
LOG.warn("Not attempting to repair overlaps.");
return;
}
if (overlap.size() > hbck.getMaxMerge()) {
LOG.warn(
"Overlap group has " + overlap.size() + " overlapping " + "regions which is greater than "
+ hbck.getMaxMerge() + ", the max number of regions to merge");
if (hbck.shouldSidelineBigOverlaps()) {
// we only sideline big overlapped groups that exceeds the max number of regions to merge
sidelineBigOverlaps(overlap);
}
return;
}
if (hbck.shouldRemoveParents()) {
removeParentsAndFixSplits(overlap);
}
mergeOverlaps(overlap);
} | 3.68 |
framework_VCalendar_setEventResizeAllowed | /**
* Is resizing an event allowed.
*
* @param eventResizeAllowed
* True if allowed false if not
*/
public void setEventResizeAllowed(boolean eventResizeAllowed) {
this.eventResizeAllowed = eventResizeAllowed;
} | 3.68 |
hadoop_AuthenticationFilterInitializer_initFilter | /**
* Initializes hadoop-auth AuthenticationFilter.
* <p>
* Propagates to hadoop-auth AuthenticationFilter configuration all Hadoop
* configuration properties prefixed with "hadoop.http.authentication."
*
* @param container The filter container
* @param conf Configuration for run-time parameters
*/
@Override
public void initFilter(FilterContainer container, Configuration conf) {
Map<String, String> filterConfig = getFilterConfigMap(conf, PREFIX);
container.addFilter("authentication",
AuthenticationFilter.class.getName(),
filterConfig);
} | 3.68 |
framework_Overlay_positionOrSizeUpdated | /**
* Recalculates proper position and dimensions for the shadow and shim
* elements. Can be used to animate the related elements, using the
* 'progress' parameter (used to animate the shadow in sync with GWT
* PopupPanel's default animation 'PopupPanel.AnimationType.CENTER').
*
* @param progress
* A value between 0.0 and 1.0, indicating the progress of the
* animation (0=start, 1=end).
*/
private void positionOrSizeUpdated(final double progress) {
// Don't do anything if overlay element is not attached
if (!isAttached()) {
return;
}
// Calculate proper z-index
int zIndex = -1;
try {
// Odd behavior with Windows Hosted Mode forces us to use
// this redundant try/catch block (See dev.vaadin.com #2011)
zIndex = Integer.parseInt(getElement().getStyle().getZIndex());
} catch (Exception ignore) {
// Ignored, will cause no harm
zIndex = 1000;
}
if (zIndex == -1) {
zIndex = Z_INDEX;
}
// Calculate position and size
if (BrowserInfo.get().isIE()) {
// Shake IE
getOffsetHeight();
getOffsetWidth();
}
if (needsShimElement()) {
PositionAndSize positionAndSize = new PositionAndSize(
getActualLeft(), getActualTop(), getOffsetWidth(),
getOffsetHeight());
// Animate the size
positionAndSize.setAnimationFromCenterProgress(progress);
Element container = getElement().getParentElement();
if (needsShimElement()) {
updateShimPosition(positionAndSize);
if (shimElement.getParentElement() == null) {
container.insertBefore(shimElement, getElement());
}
}
}
} | 3.68 |
flink_TaskManagerServices_shutDown | /** Shuts the {@link TaskExecutor} services down. */
public void shutDown() throws FlinkException {
Exception exception = null;
try {
taskManagerStateStore.shutdown();
} catch (Exception e) {
exception = e;
}
try {
ioManager.close();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
shuffleEnvironment.close();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
kvStateService.shutdown();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
taskSlotTable.close();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
jobLeaderService.stop();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
ioExecutor.shutdown();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
jobTable.close();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
libraryCacheManager.shutdown();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
taskEventDispatcher.clearAll();
if (exception != null) {
throw new FlinkException(
"Could not properly shut down the TaskManager services.", exception);
}
} | 3.68 |
pulsar_ResourceGroupService_resourceGroupCreate | /**
* Create RG, with non-default functions for resource-usage transport-manager.
*
* @throws if RG with that name already exists (even if the resource usage handlers are different).
*/
public void resourceGroupCreate(String rgName,
org.apache.pulsar.common.policies.data.ResourceGroup rgConfig,
ResourceUsagePublisher rgPublisher,
ResourceUsageConsumer rgConsumer) throws PulsarAdminException {
this.checkRGCreateParams(rgName, rgConfig);
ResourceGroup rg = new ResourceGroup(this, rgName, rgConfig, rgPublisher, rgConsumer);
resourceGroupsMap.put(rgName, rg);
} | 3.68 |
hadoop_StagingCommitter_getFinalPath | /**
* Returns the final S3 location for a relative path as a Hadoop {@link Path}.
* This is a final method that calls {@link #getFinalKey(String, JobContext)}
* to determine the final location.
*
* @param relative the path of a file relative to the task attempt path
* @param context the JobContext or TaskAttemptContext for this job
* @return the S3 Path where the file will be uploaded
* @throws IOException IO problem
*/
protected final Path getFinalPath(String relative, JobContext context)
throws IOException {
return getDestS3AFS().keyToQualifiedPath(getFinalKey(relative, context));
} | 3.68 |
hudi_TableSchemaResolver_getTableParquetSchema | /**
* Gets users data schema for a hoodie table in Parquet format.
*
* @return Parquet schema for the table
*/
public MessageType getTableParquetSchema(boolean includeMetadataField) throws Exception {
return convertAvroSchemaToParquet(getTableAvroSchema(includeMetadataField));
} | 3.68 |
framework_AbstractListing_doWriteDesign | /**
* Writes listing specific state into the given design.
* <p>
* This method is separated from
* {@link #writeDesign(Element, DesignContext)} to be overridable in
* subclasses that need to replace this, but still must be able to call
* {@code super.writeDesign(...)}.
*
* @see #doReadDesign(Element, DesignContext)
*
* @param design
* The element to write the component state to. Any previous
* attributes or child nodes are <i>not</i> cleared.
* @param designContext
* The DesignContext instance used for writing the design
*
*/
protected void doWriteDesign(Element design, DesignContext designContext) {
// Write options if warranted
if (designContext.shouldWriteData(this)) {
writeItems(design, designContext);
}
AbstractListing<T> select = designContext.getDefaultInstance(this);
Attributes attr = design.attributes();
DesignAttributeHandler.writeAttribute("readonly", attr, isReadOnly(),
select.isReadOnly(), Boolean.class, designContext);
} | 3.68 |
hbase_QuotaTableUtil_makeGetForSnapshotSize | /**
* Creates a {@link Get} for the HBase snapshot's size against the given table.
*/
static Get makeGetForSnapshotSize(TableName tn, String snapshot) {
Get g = new Get(Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, Bytes.toBytes(tn.toString())));
g.addColumn(QUOTA_FAMILY_USAGE,
Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot)));
return g;
} | 3.68 |
framework_GridMultiSelect_getSelectAllCheckBoxVisibility | /**
* Gets the current mode for the select all checkbox visibility.
*
* @return the select all checkbox visibility mode
* @see SelectAllCheckBoxVisibility
* @see #isSelectAllCheckBoxVisible()
*/
public SelectAllCheckBoxVisibility getSelectAllCheckBoxVisibility() {
return model.getSelectAllCheckBoxVisibility();
} | 3.68 |
zxing_ResultPoint_crossProductZ | /**
* Returns the z component of the cross product between vectors BC and BA.
*/
private static float crossProductZ(ResultPoint pointA,
ResultPoint pointB,
ResultPoint pointC) {
float bX = pointB.x;
float bY = pointB.y;
return ((pointC.x - bX) * (pointA.y - bY)) - ((pointC.y - bY) * (pointA.x - bX));
} | 3.68 |
hbase_Bytes_contains | /**
* Return true if target is present as an element anywhere in the given array.
* @param array an array of {@code byte} values, possibly empty
* @param target an array of {@code byte}
* @return {@code true} if {@code target} is present anywhere in {@code array}
*/
public static boolean contains(byte[] array, byte[] target) {
return indexOf(array, target) > -1;
} | 3.68 |
flink_KeyedStream_timeWindow | /**
* Windows this {@code KeyedStream} into sliding time windows.
*
* <p>This is a shortcut for either {@code .window(SlidingEventTimeWindows.of(size, slide))} or
* {@code .window(SlidingProcessingTimeWindows.of(size, slide))} depending on the time
* characteristic set using {@link
* org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#setStreamTimeCharacteristic(org.apache.flink.streaming.api.TimeCharacteristic)}
*
* @param size The size of the window.
* @deprecated Please use {@link #window(WindowAssigner)} with either {@link
* SlidingEventTimeWindows} or {@link SlidingProcessingTimeWindows}. For more information,
* see the deprecation notice on {@link TimeCharacteristic}
*/
@Deprecated
public WindowedStream<T, KEY, TimeWindow> timeWindow(Time size, Time slide) {
if (environment.getStreamTimeCharacteristic() == TimeCharacteristic.ProcessingTime) {
return window(SlidingProcessingTimeWindows.of(size, slide));
} else {
return window(SlidingEventTimeWindows.of(size, slide));
}
} | 3.68 |
pulsar_DnsResolverUtil_applyJdkDnsCacheSettings | /**
* Configure Netty's {@link DnsNameResolverBuilder}'s ttl and negativeTtl to match the JDK's DNS caching settings.
* If the JDK setting for TTL is forever (-1), the TTL will be set to 60 seconds.
*
* @param dnsNameResolverBuilder The Netty {@link DnsNameResolverBuilder} instance to apply the settings
*/
public static void applyJdkDnsCacheSettings(DnsNameResolverBuilder dnsNameResolverBuilder) {
dnsNameResolverBuilder.ttl(MIN_TTL, TTL);
dnsNameResolverBuilder.negativeTtl(NEGATIVE_TTL);
} | 3.68 |
hadoop_SCMStore_cleanResourceReferences | /**
* Clean all resource references to a cache resource that contain application
* ids pointing to finished applications. If the resource key does not exist,
* do nothing.
*
* @param key a unique identifier for a resource
* @throws YarnException
*/
@Private
public void cleanResourceReferences(String key) throws YarnException {
Collection<SharedCacheResourceReference> refs = getResourceReferences(key);
if (!refs.isEmpty()) {
Set<SharedCacheResourceReference> refsToRemove =
new HashSet<SharedCacheResourceReference>();
for (SharedCacheResourceReference r : refs) {
if (!appChecker.isApplicationActive(r.getAppId())) {
// application in resource reference is dead, it is safe to remove the
// reference
refsToRemove.add(r);
}
}
if (refsToRemove.size() > 0) {
removeResourceReferences(key, refsToRemove, false);
}
}
} | 3.68 |
hadoop_S3ClientFactory_getRegion | /**
* Get the region.
* @return invoker
*/
public String getRegion() {
return region;
} | 3.68 |
hbase_ScannerModel_setColumns | /**
* @param columns list of columns of interest in column:qualifier format, or empty for all
*/
public void setColumns(List<byte[]> columns) {
this.columns = columns;
} | 3.68 |
hadoop_HttpExceptionUtils_createServletExceptionResponse | /**
* Creates a HTTP servlet response serializing the exception in it as JSON.
*
* @param response the servlet response
* @param status the error code to set in the response
* @param ex the exception to serialize in the response
* @throws IOException thrown if there was an error while creating the
* response
*/
public static void createServletExceptionResponse(
HttpServletResponse response, int status, Throwable ex)
throws IOException {
response.setStatus(status);
response.setContentType(APPLICATION_JSON_MIME);
Map<String, Object> json = new LinkedHashMap<String, Object>();
json.put(ERROR_MESSAGE_JSON, getOneLineMessage(ex));
json.put(ERROR_EXCEPTION_JSON, ex.getClass().getSimpleName());
json.put(ERROR_CLASSNAME_JSON, ex.getClass().getName());
Map<String, Object> jsonResponse =
Collections.singletonMap(ERROR_JSON, json);
Writer writer = response.getWriter();
JsonSerialization.writer().writeValue(writer, jsonResponse);
writer.flush();
} | 3.68 |
hbase_TableDescriptorBuilder_setDurability | /**
* Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
* @param durability enum value
* @return the modifyable TD
*/
public ModifyableTableDescriptor setDurability(Durability durability) {
return setValue(DURABILITY_KEY, durability.name());
} | 3.68 |
hbase_BackupRestoreFactory_getBackupMergeJob | /**
* Gets backup merge job
* @param conf configuration
* @return backup merge job instance
*/
public static BackupMergeJob getBackupMergeJob(Configuration conf) {
Class<? extends BackupMergeJob> cls = conf.getClass(HBASE_BACKUP_MERGE_IMPL_CLASS,
MapReduceBackupMergeJob.class, BackupMergeJob.class);
BackupMergeJob service = ReflectionUtils.newInstance(cls, conf);
service.setConf(conf);
return service;
} | 3.68 |
MagicPlugin_Wand_checkHotbarCount | // This catches the hotbar_count having changed since the last time the inventory was built
// in which case we want to add a new hotbar inventory without re-arranging the main inventories
// newly added hotbars will be empty, spells in removed hotbars will be added to the end of the inventories.
protected void checkHotbarCount() {
if (!hasInventory || getHotbarCount() == 0) return;
int hotbarCount = Math.max(1, getInt("hotbar_count", 1));
if (hotbarCount != hotbars.size()) {
while (hotbars.size() < hotbarCount) {
hotbars.add(new WandInventory(HOTBAR_INVENTORY_SIZE));
}
while (hotbars.size() > hotbarCount) {
hotbars.remove(0);
}
List<WandInventory> pages = new ArrayList<>(inventories);
int slotOffset = getInt("hotbar_count") * HOTBAR_INVENTORY_SIZE;
int index = 0;
for (WandInventory inventory : pages) {
for (ItemStack itemStack : inventory.items) {
updateSlot(index + slotOffset, itemStack);
index++;
}
}
updateSpellInventory();
updateBrushInventory();
}
setProperty("hotbar_inventory_count", hotbarCount);
} | 3.68 |
hbase_JVMClusterUtil_startup | /**
* Start the cluster. Waits until there is a primary master initialized and returns its address.
* @return Address to use contacting primary master.
*/
public static String startup(final List<JVMClusterUtil.MasterThread> masters,
final List<JVMClusterUtil.RegionServerThread> regionservers) throws IOException {
// Implementation note: This method relies on timed sleeps in a loop. It's not great, and
// should probably be re-written to use actual synchronization objects, but it's ok for now
Configuration configuration = null;
if (masters == null || masters.isEmpty()) {
return null;
}
for (JVMClusterUtil.MasterThread t : masters) {
configuration = t.getMaster().getConfiguration();
t.start();
}
// Wait for an active master
// having an active master before starting the region threads allows
// then to succeed on their connection to master
final int startTimeout = configuration != null
? Integer.parseInt(configuration.get("hbase.master.start.timeout.localHBaseCluster", "30000"))
: 30000;
waitForEvent(startTimeout, "active", () -> findActiveMaster(masters) != null);
if (regionservers != null) {
for (JVMClusterUtil.RegionServerThread t : regionservers) {
t.start();
}
}
// Wait for an active master to be initialized (implies being master)
// with this, when we return the cluster is complete
final int initTimeout = configuration != null
? Integer.parseInt(configuration.get("hbase.master.init.timeout.localHBaseCluster", "200000"))
: 200000;
waitForEvent(initTimeout, "initialized", () -> {
JVMClusterUtil.MasterThread t = findActiveMaster(masters);
// master thread should never be null at this point, but let's keep the check anyway
return t != null && t.master.isInitialized();
});
return findActiveMaster(masters).master.getServerName().toString();
} | 3.68 |
hadoop_AbstractManifestData_unmarshallPath | /**
* Convert a string path to Path type, by way of a URI.
* @param path path as a string
* @return path value
* @throws RuntimeException marshalling failure.
*/
public static Path unmarshallPath(String path) {
try {
return new Path(new URI(requireNonNull(path, "No path")));
} catch (URISyntaxException e) {
throw new RuntimeException(
"Failed to parse \"" + path + "\" : " + e,
e);
}
} | 3.68 |
morf_HumanReadableStatementHelper_generateFromAndWhereClause | /**
* Generates the from and where clause for a select statement. If there is no selection
* expression then the 'where' fragment will be omitted.
*
* @param statement the statement to describe.
* @param prefix whether to include the " from " prefix in the clause
* @return a string containing the human-readable description of the source data.
*/
private static String generateFromAndWhereClause(final AbstractSelectStatement<?> statement, final boolean prefix) {
final StringBuilder sb = new StringBuilder();
if (statement.getTable() == null) {
if (statement.getFromSelects() == null) {
throw new UnsupportedOperationException("No table or sub-selects for: [" + statement.getClass().getName() + "]");
} else {
boolean comma = false;
for (AbstractSelectStatement<?> subSelect : statement.getFromSelects()) {
if (comma) {
sb.append(", ");
} else {
if (prefix) {
sb.append(" from ");
}
comma = true;
}
sb.append(generateSelectStatementString(subSelect, true));
}
return sb.toString();
}
} else {
if (prefix) {
sb.append(" from ");
}
sb.append(statement.getTable().getName());
}
if (statement.getJoins() != null) {
for (Join join : statement.getJoins()) {
sb.append(" and ");
if (join.getTable() == null) {
if (join.getSubSelect() == null) {
throw new UnsupportedOperationException("No table or sub-selects for: [" + join.getClass().getName() + "]");
} else {
sb.append('(').append(generateSelectStatementString(join.getSubSelect(), false)).append(')');
}
} else {
sb.append(join.getTable().getName());
}
if (join.getCriterion() != null) {
sb.append(", joined on ").append(generateCriterionString(join.getCriterion(), false)).append(',');
}
}
if (sb.charAt(sb.length() - 1) == ',' && statement.getWhereCriterion() == null) {
sb.delete(sb.length() - 1, sb.length());
}
}
sb.append(generateWhereClause(statement.getWhereCriterion()));
return sb.toString();
} | 3.68 |
flink_Either_isRight | /** @return true if this is a Right value, false if this is a Left value */
public final boolean isRight() {
return getClass() == Right.class;
} | 3.68 |
pulsar_Authentication_newRequestHeader | /**
* Add an authenticationStage that will complete along with authFuture.
*/
default Set<Entry<String, String>> newRequestHeader(String hostName,
AuthenticationDataProvider authData,
Map<String, String> previousResHeaders) throws Exception {
return authData.getHttpHeaders();
} | 3.68 |
hbase_HMaster_move | // Public so can be accessed by tests. Blocks until move is done.
// Replace with an async implementation from which you can get
// a success/failure result.
@InterfaceAudience.Private
public void move(final byte[] encodedRegionName, byte[] destServerName) throws IOException {
RegionState regionState =
assignmentManager.getRegionStates().getRegionState(Bytes.toString(encodedRegionName));
RegionInfo hri;
if (regionState != null) {
hri = regionState.getRegion();
} else {
throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
}
ServerName dest;
List<ServerName> exclude = hri.getTable().isSystemTable()
? assignmentManager.getExcludedServersForSystemTable()
: new ArrayList<>(1);
if (
destServerName != null && exclude.contains(ServerName.valueOf(Bytes.toString(destServerName)))
) {
LOG.info(Bytes.toString(encodedRegionName) + " can not move to "
+ Bytes.toString(destServerName) + " because the server is in exclude list");
destServerName = null;
}
if (destServerName == null || destServerName.length == 0) {
LOG.info("Passed destination servername is null/empty so " + "choosing a server at random");
exclude.add(regionState.getServerName());
final List<ServerName> destServers = this.serverManager.createDestinationServersList(exclude);
dest = balancer.randomAssignment(hri, destServers);
if (dest == null) {
LOG.debug("Unable to determine a plan to assign " + hri);
return;
}
} else {
ServerName candidate = ServerName.valueOf(Bytes.toString(destServerName));
dest = balancer.randomAssignment(hri, Lists.newArrayList(candidate));
if (dest == null) {
LOG.debug("Unable to determine a plan to assign " + hri);
return;
}
// TODO: deal with table on master for rs group.
if (dest.equals(serverName)) {
// To avoid unnecessary region moving later by balancer. Don't put user
// regions on master.
LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
+ " to avoid unnecessary region moving later by load balancer,"
+ " because it should not be on master");
return;
}
}
if (dest.equals(regionState.getServerName())) {
LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
+ " because region already assigned to the same server " + dest + ".");
return;
}
// Now we can do the move
RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest);
assert rp.getDestination() != null : rp.toString() + " " + dest;
try {
checkInitialized();
if (this.cpHost != null) {
this.cpHost.preMove(hri, rp.getSource(), rp.getDestination());
}
TransitRegionStateProcedure proc =
this.assignmentManager.createMoveRegionProcedure(rp.getRegionInfo(), rp.getDestination());
if (conf.getBoolean(WARMUP_BEFORE_MOVE, DEFAULT_WARMUP_BEFORE_MOVE)) {
// Warmup the region on the destination before initiating the move.
// A region server could reject the close request because it either does not
// have the specified region or the region is being split.
LOG.info(getClientIdAuditPrefix() + " move " + rp + ", warming up region on "
+ rp.getDestination());
warmUpRegion(rp.getDestination(), hri);
}
LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
Future<byte[]> future = ProcedureSyncWait.submitProcedure(this.procedureExecutor, proc);
try {
// Is this going to work? Will we throw exception on error?
// TODO: CompletableFuture rather than this stunted Future.
future.get();
} catch (InterruptedException | ExecutionException e) {
throw new HBaseIOException(e);
}
if (this.cpHost != null) {
this.cpHost.postMove(hri, rp.getSource(), rp.getDestination());
}
} catch (IOException ioe) {
if (ioe instanceof HBaseIOException) {
throw (HBaseIOException) ioe;
}
throw new HBaseIOException(ioe);
}
} | 3.68 |
framework_VAccordion_setContent | /**
* Updates the content of the open tab of the accordion.
*
* This method is mostly for internal use and may change in future
* versions.
*
* @since 7.2
* @param newWidget
* new content
*/
public void setContent(Widget newWidget) {
if (widget == null) {
widget = newWidget;
widgets.add(newWidget);
} else if (widget != newWidget) {
replaceWidget(newWidget);
}
if (isOpen() && isDynamicHeight()) {
setHeightFromWidget();
}
} | 3.68 |
Activiti_ObjectValueExpression_isLiteralText | /**
* Answer <code>false</code>.
*/
@Override
public boolean isLiteralText() {
return false;
} | 3.68 |
hadoop_S3ListResult_v1 | /**
* Restricted constructors to ensure v1 or v2, not both.
* @param result v1 result
* @return new list result container
*/
public static S3ListResult v1(ListObjectsResponse result) {
return new S3ListResult(requireNonNull(result), null);
} | 3.68 |
hbase_MasterRpcServices_switchBalancer | /**
* Assigns balancer switch according to BalanceSwitchMode
* @param b new balancer switch
* @param mode BalanceSwitchMode
* @return old balancer switch
*/
boolean switchBalancer(final boolean b, BalanceSwitchMode mode) throws IOException {
boolean oldValue = server.loadBalancerStateStore.get();
boolean newValue = b;
try {
if (server.cpHost != null) {
server.cpHost.preBalanceSwitch(newValue);
}
if (mode == BalanceSwitchMode.SYNC) {
synchronized (server.getLoadBalancer()) {
server.loadBalancerStateStore.set(newValue);
}
} else {
server.loadBalancerStateStore.set(newValue);
}
LOG.info(server.getClientIdAuditPrefix() + " set balanceSwitch=" + newValue);
if (server.cpHost != null) {
server.cpHost.postBalanceSwitch(oldValue, newValue);
}
server.getLoadBalancer().updateBalancerStatus(newValue);
} catch (IOException ioe) {
LOG.warn("Error flipping balance switch", ioe);
}
return oldValue;
} | 3.68 |
hadoop_TypedBytesInput_readRawList | /**
* Reads the raw bytes following a <code>Type.LIST</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawList() throws IOException {
Buffer buffer = new Buffer(new byte[] { (byte) Type.LIST.code });
byte[] bytes = readRaw();
while (bytes != null) {
buffer.append(bytes);
bytes = readRaw();
}
buffer.append(new byte[] { (byte) Type.MARKER.code });
return buffer.get();
} | 3.68 |
hbase_BucketCache_retrieveFromFile | /**
* @see #persistToFile()
*/
private void retrieveFromFile(int[] bucketSizes) throws IOException {
LOG.info("Started retrieving bucket cache from file");
File persistenceFile = new File(persistencePath);
if (!persistenceFile.exists()) {
LOG.warn("Persistence file missing! "
+ "It's ok if it's first run after enabling persistent cache.");
bucketAllocator = new BucketAllocator(cacheCapacity, bucketSizes, backingMap, realCacheSize);
blockNumber.add(backingMap.size());
backingMapValidated.set(true);
return;
}
assert !cacheEnabled;
try (FileInputStream in = new FileInputStream(persistenceFile)) {
int pblen = ProtobufMagic.lengthOfPBMagic();
byte[] pbuf = new byte[pblen];
int read = in.read(pbuf);
if (read != pblen) {
throw new IOException("Incorrect number of bytes read while checking for protobuf magic "
+ "number. Requested=" + pblen + ", Received= " + read + ", File=" + persistencePath);
}
if (!ProtobufMagic.isPBMagicPrefix(pbuf)) {
// In 3.0 we have enough flexibility to dump the old cache data.
// TODO: In 2.x line, this might need to be filled in to support reading the old format
throw new IOException(
"Persistence file does not start with protobuf magic number. " + persistencePath);
}
parsePB(BucketCacheProtos.BucketCacheEntry.parseDelimitedFrom(in));
bucketAllocator = new BucketAllocator(cacheCapacity, bucketSizes, backingMap, realCacheSize);
blockNumber.add(backingMap.size());
LOG.info("Bucket cache retrieved from file successfully");
}
} | 3.68 |
framework_VTooltip_setQuickOpenDelay | /**
* Sets the time (in ms) that should elapse before a tooltip will be shown,
* in the situation when a tooltip has very recently been shown (within
* {@link #getQuickOpenDelay()} ms).
*
* @param quickOpenDelay
* The quick open delay (in ms)
*/
public void setQuickOpenDelay(int quickOpenDelay) {
this.quickOpenDelay = quickOpenDelay;
} | 3.68 |
flink_ExecutionConfig_getDefaultKryoSerializerClasses | /** Returns the registered default Kryo Serializer classes. */
public LinkedHashMap<Class<?>, Class<? extends Serializer<?>>>
getDefaultKryoSerializerClasses() {
return defaultKryoSerializerClasses;
} | 3.68 |
zxing_DecoderResult_setNumBits | /**
* @param numBits overrides the number of bits that are valid in {@link #getRawBytes()}
* @since 3.3.0
*/
public void setNumBits(int numBits) {
this.numBits = numBits;
} | 3.68 |
streampipes_ElasticsearchSinkBase_disableFlushOnCheckpoint | /**
* Disable flushing on checkpoint. When disabled, the sink will not wait for all
* pending action requests to be acknowledged by Elasticsearch on checkpoints.
*
* <p>NOTE: If flushing on checkpoint is disabled, the Flink Elasticsearch Sink does NOT
* provide any strong guarantees for at-least-once delivery of action requests.
*/
public void disableFlushOnCheckpoint() {
this.flushOnCheckpoint = false;
} | 3.68 |
hbase_Region_checkAndRowMutate | /**
* Atomically checks if a row matches the filter and if it does, it performs the row mutations.
* Use to do many mutations on a single row. Use checkAndMutate to do one checkAndMutate at a
* time.
* @param row to check
* @param filter the filter
* @param mutations data to put if check succeeds
* @return true if mutations were applied, false otherwise
* @deprecated since 3.0.0 and will be removed in 4.0.0. Use
* {@link #checkAndMutate(CheckAndMutate)} instead.
*/
@Deprecated
default boolean checkAndRowMutate(byte[] row, Filter filter, RowMutations mutations)
throws IOException {
return checkAndRowMutate(row, filter, TimeRange.allTime(), mutations);
} | 3.68 |
hbase_HBaseTestingUtility_predicateTableDisabled | /**
* Returns a {@link Predicate} for checking that table is enabled
*/
public Waiter.Predicate<IOException> predicateTableDisabled(final TableName tableName) {
return new ExplainingPredicate<IOException>() {
@Override
public String explainFailure() throws IOException {
return explainTableState(tableName, TableState.State.DISABLED);
}
@Override
public boolean evaluate() throws IOException {
return getAdmin().isTableDisabled(tableName);
}
};
} | 3.68 |
hbase_Bytes_toBigDecimal | /** Converts a byte array to a BigDecimal value */
public static BigDecimal toBigDecimal(byte[] bytes, int offset, final int length) {
if (bytes == null || length < SIZEOF_INT + 1 || (offset + length > bytes.length)) {
return null;
}
int scale = toInt(bytes, offset);
byte[] tcBytes = new byte[length - SIZEOF_INT];
System.arraycopy(bytes, offset + SIZEOF_INT, tcBytes, 0, length - SIZEOF_INT);
return new BigDecimal(new BigInteger(tcBytes), scale);
} | 3.68 |
hudi_JmxReporterServer_forRegistry | /**
* Returns a new {@link JmxReporterServer.Builder} for {@link JmxReporterServer}.
*
* @param registry the registry to report
* @return a {@link JmxReporterServer.Builder} instance for a {@link JmxReporterServer}
*/
public static JmxReporterServer.Builder forRegistry(MetricRegistry registry) {
return new JmxReporterServer.Builder(registry);
} | 3.68 |
framework_TabSheet_readDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractComponent#readDesign(org.jsoup.nodes .Element,
* com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void readDesign(Element design, DesignContext designContext) {
super.readDesign(design, designContext);
// create new tabs
for (Element tab : design.children()) {
if (!tab.tagName().equals("tab")) {
throw new DesignException(
"Invalid tag name for tabsheet tab " + tab.tagName());
}
readTabFromDesign(tab, designContext);
}
} | 3.68 |
hadoop_WordMean_getMean | /**
* Only valuable after run() called.
*
* @return Returns the mean value.
*/
public double getMean() {
return mean;
} | 3.68 |
pulsar_JavaInstanceRunnable_run | /**
* The core logic that initialize the instance thread and executes the function.
*/
@Override
public void run() {
try {
setup();
Thread currentThread = Thread.currentThread();
Consumer<Throwable> asyncErrorHandler = throwable -> currentThread.interrupt();
AsyncResultConsumer asyncResultConsumer = this::handleResult;
while (true) {
currentRecord = readInput();
// increment number of records received from source
stats.incrTotalReceived();
if (instanceConfig.getFunctionDetails().getProcessingGuarantees() == org.apache.pulsar.functions
.proto.Function.ProcessingGuarantees.ATMOST_ONCE) {
if (instanceConfig.getFunctionDetails().getAutoAck()) {
currentRecord.ack();
}
}
JavaExecutionResult result;
// set last invocation time
stats.setLastInvocation(System.currentTimeMillis());
// start time for process latency stat
stats.processTimeStart();
// process the message
Thread.currentThread().setContextClassLoader(functionClassLoader);
result = javaInstance.handleMessage(
currentRecord,
currentRecord.getValue(),
asyncResultConsumer,
asyncErrorHandler);
Thread.currentThread().setContextClassLoader(instanceClassLoader);
// register end time
stats.processTimeEnd();
if (result != null) {
// process the synchronous results
handleResult(currentRecord, result);
}
if (deathException != null) {
// Ideally the current java instance thread will be interrupted when the deathException is set.
// But if the CompletableFuture returned by the Pulsar Function is completed exceptionally(the
// function has invoked the fatal method) before being put into the JavaInstance
// .pendingAsyncRequests, the interrupted exception may be thrown when putting this future to
// JavaInstance.pendingAsyncRequests. The interrupted exception would be caught by the JavaInstance
// and be skipped.
// Therefore, we need to handle this case by checking the deathException here and rethrow it.
throw deathException;
}
}
} catch (Throwable t) {
if (deathException != null) {
log.error("[{}] Fatal exception occurred in the instance", FunctionCommon.getFullyQualifiedInstanceId(
instanceConfig.getFunctionDetails().getTenant(),
instanceConfig.getFunctionDetails().getNamespace(),
instanceConfig.getFunctionDetails().getName(),
instanceConfig.getInstanceId()), deathException);
} else {
log.error("[{}] Uncaught exception in Java Instance", FunctionCommon.getFullyQualifiedInstanceId(
instanceConfig.getFunctionDetails().getTenant(),
instanceConfig.getFunctionDetails().getNamespace(),
instanceConfig.getFunctionDetails().getName(),
instanceConfig.getInstanceId()), t);
deathException = t;
}
if (stats != null) {
stats.incrSysExceptions(deathException);
}
} finally {
log.info("Closing instance");
close();
}
} | 3.68 |
hadoop_ServiceRecord_clone | /**
* Shallow clone: all endpoints will be shared across instances
* @return a clone of the instance
* @throws CloneNotSupportedException
*/
@Override
protected Object clone() throws CloneNotSupportedException {
return super.clone();
} | 3.68 |
hbase_HBaseTestingUtility_flush | /**
* Flushes all caches in the mini hbase cluster
*/
public void flush(TableName tableName) throws IOException {
getMiniHBaseCluster().flushcache(tableName);
} | 3.68 |
hadoop_OBSPosixBucketUtils_fsIsFolder | /**
* Used to judge that an object is a file or folder.
*
* @param attr posix object attribute
* @return is posix folder
*/
static boolean fsIsFolder(final ObsFSAttribute attr) {
final int ifDir = 0x004000;
int mode = attr.getMode();
// object mode is -1 when the object is migrated from
// object bucket to posix bucket.
// -1 is a file, not folder.
if (mode < 0) {
return false;
}
return (mode & ifDir) != 0;
} | 3.68 |
framework_SortOrderBuilder_thenAsc | /**
* Appends sorting with ascending sort direction.
*
* @param by
* the object to sort by
* @return this sort builder
*/
public SortOrderBuilder<T, V> thenAsc(V by) {
return append(createSortOrder(by, SortDirection.ASCENDING));
} | 3.68 |
hadoop_RequestFactoryImpl_getCannedACL | /**
* Get the canned ACL of this FS.
* @return an ACL, if any
*/
@Override
public String getCannedACL() {
return cannedACL;
} | 3.68 |
hudi_BaseConsistentHashingBucketClusteringPlanStrategy_constructExtraMetadata | /**
* Construct extra metadata for clustering group
*/
private Map<String, String> constructExtraMetadata(String partition, List<ConsistentHashingNode> nodes, int seqNo) {
Map<String, String> extraMetadata = new HashMap<>();
try {
extraMetadata.put(METADATA_PARTITION_KEY, partition);
extraMetadata.put(METADATA_CHILD_NODE_KEY, ConsistentHashingNode.toJsonString(nodes));
extraMetadata.put(METADATA_SEQUENCE_NUMBER_KEY, Integer.toString(seqNo));
} catch (IOException e) {
LOG.error("Failed to construct extra metadata, partition: {}, nodes:{}", partition, nodes);
throw new HoodieClusteringException("Failed to construct extra metadata, partition: " + partition + ", nodes:" + nodes);
}
return extraMetadata;
} | 3.68 |
dubbo_AbstractZookeeperTransporter_writeToClientMap | /**
* write address-ZookeeperClient relationship to Map
*
* @param addressList
* @param zookeeperClient
*/
void writeToClientMap(List<String> addressList, ZookeeperClient zookeeperClient) {
for (String address : addressList) {
zookeeperClientMap.put(address, zookeeperClient);
}
} | 3.68 |
hbase_Table_append | /**
* Appends values to one or more columns within a single row.
* <p>
* This operation guaranteed atomicity to readers. Appends are done under a single row lock, so
* write operations to a row are synchronized, and readers are guaranteed to see this operation
* fully completed.
* @param append object that specifies the columns and values to be appended
* @throws IOException e
* @return values of columns after the append operation (maybe null)
*/
default Result append(final Append append) throws IOException {
throw new NotImplementedException("Add an implementation!");
} | 3.68 |
querydsl_SQLExpressions_regrSxy | /**
* REGR_SXY makes the following computation after the elimination of null (arg1, arg2) pairs:
*
* <p>REGR_COUNT(arg1, arg2) * COVAR_POP(arg1, arg2)</p>
*
* @param arg1 first arg
* @param arg2 second arg
* @return regr_sxy(arg1, arg2)
*/
public static WindowOver<Double> regrSxy(Expression<? extends Number> arg1, Expression<? extends Number> arg2) {
return new WindowOver<Double>(Double.class, SQLOps.REGR_SXY, arg1, arg2);
} | 3.68 |
framework_CustomizedSystemMessages_setCommunicationErrorMessage | /**
* Sets the message of the notification. Set to null for no message. If both
* caption and message is null, the notification is disabled;
*
* @param communicationErrorMessage
* the message
*/
public void setCommunicationErrorMessage(String communicationErrorMessage) {
this.communicationErrorMessage = communicationErrorMessage;
} | 3.68 |
hmily_IndexMetaDataLoader_load | /**
* Load index meta data list.
* In a few jdbc implementation(eg. oracle), return value of getIndexInfo contains a statistics record that not a index itself and INDEX_NAME is null.
*
* @param connection connection
* @param table table name
* @return index meta data list
* @throws SQLException SQL exception
*/
public static Collection<IndexMetaData> load(final Connection connection, final String table) throws SQLException {
Collection<IndexMetaData> result = new HashSet<>();
try (ResultSet resultSet = connection.getMetaData().getIndexInfo(connection.getCatalog(), connection.getSchema(), table, false, false)) {
while (resultSet.next()) {
String indexName = resultSet.getString(INDEX_NAME);
if (null != indexName) {
result.add(new IndexMetaData(indexName));
}
}
}
return result;
} | 3.68 |
framework_VScrollTable_setExpandRatio | /**
* Sets the expand ratio of the cell.
*
* @param floatAttribute
* The expand ratio
*/
public void setExpandRatio(float floatAttribute) {
expandRatio = floatAttribute;
} | 3.68 |
hadoop_RLESparseResourceAllocation_addInterval | /**
* Add a resource for the specified interval.
*
* @param reservationInterval the interval for which the resource is to be
* added
* @param totCap the resource to be added
* @return true if addition is successful, false otherwise
*/
public boolean addInterval(ReservationInterval reservationInterval,
Resource totCap) {
if (totCap.equals(ZERO_RESOURCE)) {
return true;
}
writeLock.lock();
try {
NavigableMap<Long, Resource> addInt = new TreeMap<Long, Resource>();
addInt.put(reservationInterval.getStartTime(), totCap);
addInt.put(reservationInterval.getEndTime(), ZERO_RESOURCE);
try {
cumulativeCapacity =
merge(resourceCalculator, totCap, cumulativeCapacity, addInt,
Long.MIN_VALUE, Long.MAX_VALUE, RLEOperator.add);
} catch (PlanningException e) {
// never happens for add
}
return true;
} finally {
writeLock.unlock();
}
} | 3.68 |
hadoop_TypedBytesInput_readRawByte | /**
* Reads the raw byte following a <code>Type.BYTE</code> code.
* @return the obtained byte
* @throws IOException
*/
public byte[] readRawByte() throws IOException {
byte[] bytes = new byte[2];
bytes[0] = (byte) Type.BYTE.code;
in.readFully(bytes, 1, 1);
return bytes;
} | 3.68 |
framework_VaadinPortletSession_firePortletEventRequest | /**
* For internal use by the framework only - API subject to change.
*/
public void firePortletEventRequest(UI uI, EventRequest request,
EventResponse response) {
for (PortletListener l : new ArrayList<>(portletListeners)) {
l.handleEventRequest(request, response, uI);
}
} | 3.68 |
hadoop_SubClusterState_fromString | /**
* Convert a string into {@code SubClusterState}.
*
* @param state the string to convert in SubClusterState
* @return the respective {@code SubClusterState}
*/
public static SubClusterState fromString(String state) {
try {
return SubClusterState.valueOf(state);
} catch (Exception e) {
LOG.error("Invalid SubCluster State value({}) in the StateStore does not"
+ " match with the YARN Federation standard.", state);
return null;
}
} | 3.68 |
hudi_SpillableMapUtils_getPreCombineVal | /**
* Returns the preCombine value with given field name.
*
* @param rec The avro record
* @param preCombineField The preCombine field name
* @return the preCombine field value or 0 if the field does not exist in the avro schema
*/
private static Object getPreCombineVal(GenericRecord rec, String preCombineField) {
if (preCombineField == null) {
return 0;
}
Schema.Field field = rec.getSchema().getField(preCombineField);
return field == null ? 0 : rec.get(field.pos());
} | 3.68 |
hbase_RegionCoprocessorHost_postExists | /**
* @param get the Get request
* @param result the result returned by the region server
* @return the result to return to the client
* @exception IOException Exception
*/
public boolean postExists(final Get get, boolean result) throws IOException {
if (this.coprocEnvironments.isEmpty()) {
return result;
}
return execOperationWithResult(
new ObserverOperationWithResult<RegionObserver, Boolean>(regionObserverGetter, result) {
@Override
public Boolean call(RegionObserver observer) throws IOException {
return observer.postExists(this, get, getResult());
}
});
} | 3.68 |
flink_HiveFunctionWrapper_deserializeUDF | /**
* Deserialize UDF used the udfSerializedString held on.
*
* @return the UDF deserialized
*/
@SuppressWarnings("unchecked")
private UDFType deserializeUDF() {
return (UDFType)
deserializeObjectFromKryo(udfSerializedBytes, (Class<Serializable>) getUDFClass());
} | 3.68 |
dubbo_ServiceBean_exported | /**
* @since 2.6.5
*/
@Override
protected void exported() {
super.exported();
// Publish ServiceBeanExportedEvent
publishExportEvent();
} | 3.68 |
dubbo_Environment_getConfiguration | /**
* There are two ways to get configuration during exposure / reference or at runtime:
* 1. URL, The value in the URL is relatively fixed. we can get value directly.
* 2. The configuration exposed in this method is convenient for us to query the latest values from multiple
* prioritized sources, it also guarantees that configs changed dynamically can take effect on the fly.
*/
public CompositeConfiguration getConfiguration() {
if (globalConfiguration == null) {
CompositeConfiguration configuration = new CompositeConfiguration();
configuration.addConfiguration(systemConfiguration);
configuration.addConfiguration(environmentConfiguration);
configuration.addConfiguration(appExternalConfiguration);
configuration.addConfiguration(externalConfiguration);
configuration.addConfiguration(appConfiguration);
configuration.addConfiguration(propertiesConfiguration);
globalConfiguration = configuration;
}
return globalConfiguration;
} | 3.68 |
hudi_AvroOrcUtils_addUnionValue | /**
* Match value with its ORC type and add to the union vector at a given position.
*
* @param unionVector The vector to store value.
* @param unionChildTypes All possible types for the value Object.
* @param avroSchema Avro union schema for the value Object.
* @param value Object to be added to the unionVector
* @param vectorPos The position in the vector where value will be stored at.
* @return succeeded or failed
*/
public static boolean addUnionValue(
UnionColumnVector unionVector,
List<TypeDescription> unionChildTypes,
Schema avroSchema,
Object value,
int vectorPos
) {
int matchIndex = -1;
TypeDescription matchType = null;
Object matchValue = null;
for (int t = 0; t < unionChildTypes.size(); t++) {
TypeDescription childType = unionChildTypes.get(t);
boolean matches = false;
switch (childType.getCategory()) {
case BOOLEAN:
matches = value instanceof Boolean;
break;
case BYTE:
matches = value instanceof Byte;
break;
case SHORT:
matches = value instanceof Short;
break;
case INT:
matches = value instanceof Integer;
break;
case LONG:
matches = value instanceof Long;
break;
case FLOAT:
matches = value instanceof Float;
break;
case DOUBLE:
matches = value instanceof Double;
break;
case STRING:
case VARCHAR:
case CHAR:
if (value instanceof String) {
matches = true;
matchValue = getUTF8Bytes((String) value);
} else if (value instanceof Utf8) {
matches = true;
matchValue = ((Utf8) value).getBytes();
}
break;
case DATE:
matches = value instanceof Date;
break;
case TIMESTAMP:
matches = value instanceof Timestamp;
break;
case BINARY:
matches = value instanceof byte[] || value instanceof GenericData.Fixed;
break;
case DECIMAL:
matches = value instanceof BigDecimal;
break;
case LIST:
matches = value instanceof List;
break;
case MAP:
matches = value instanceof Map;
break;
case STRUCT:
throw new UnsupportedOperationException("Cannot handle STRUCT within UNION.");
case UNION:
List<TypeDescription> children = childType.getChildren();
if (value == null) {
matches = children == null || children.size() == 0;
} else {
matches = addUnionValue(unionVector, children, avroSchema, value, vectorPos);
}
break;
default:
throw new IllegalArgumentException("Invalid TypeDescription " + childType.getCategory().toString() + ".");
}
if (matches) {
matchIndex = t;
matchType = childType;
break;
}
}
if (value == null && matchValue != null) {
value = matchValue;
}
if (matchIndex >= 0) {
unionVector.tags[vectorPos] = matchIndex;
if (value == null) {
unionVector.isNull[vectorPos] = true;
unionVector.noNulls = false;
} else {
addToVector(matchType, unionVector.fields[matchIndex], avroSchema.getTypes().get(matchIndex), value, vectorPos);
}
return true;
} else {
return false;
}
} | 3.68 |
nifi-maven_NarProvidedDependenciesMojo_isTest | /**
* Returns whether the specified dependency has test scope.
*
* @param node The dependency
* @return What the dependency is a test scoped dep
*/
private boolean isTest(final DependencyNode node) {
return "test".equals(node.getArtifact().getScope());
} | 3.68 |
hbase_StripeStoreFileManager_processResults | /**
* Process new files, and add them either to the structure of existing stripes, or to the list
* of new candidate stripes.
* @return New candidate stripes.
*/
private TreeMap<byte[], HStoreFile> processResults() {
TreeMap<byte[], HStoreFile> newStripes = null;
for (HStoreFile sf : this.results) {
byte[] startRow = startOf(sf), endRow = endOf(sf);
if (isInvalid(endRow) || isInvalid(startRow)) {
if (!isFlush) {
LOG.warn("The newly compacted file doesn't have stripes set: " + sf.getPath());
}
insertFileIntoStripe(getLevel0Copy(), sf);
this.l0Results.add(sf);
continue;
}
if (!this.stripeFiles.isEmpty()) {
int stripeIndex = findStripeIndexByEndRow(endRow);
if ((stripeIndex >= 0) && rowEquals(getStartRow(stripeIndex), startRow)) {
// Simple/common case - add file to an existing stripe.
insertFileIntoStripe(getStripeCopy(stripeIndex), sf);
continue;
}
}
// Make a new candidate stripe.
if (newStripes == null) {
newStripes = new TreeMap<>(MAP_COMPARATOR);
}
HStoreFile oldSf = newStripes.put(endRow, sf);
if (oldSf != null) {
throw new IllegalStateException(
"Compactor has produced multiple files for the stripe ending in ["
+ Bytes.toString(endRow) + "], found " + sf.getPath() + " and " + oldSf.getPath());
}
}
return newStripes;
} | 3.68 |
flink_CreditBasedSequenceNumberingViewReader_getNextDataType | /**
* Returns the {@link org.apache.flink.runtime.io.network.buffer.Buffer.DataType} of the next
* buffer in line.
*
* <p>Returns the next data type only if the next buffer is an event or the reader has both
* available credits and buffers.
*
* @implSpec BEWARE: this must be in sync with {@link #getAvailabilityAndBacklog()}, such that
* {@code getNextDataType(bufferAndBacklog) != NONE <=>
* AvailabilityWithBacklog#isAvailable()}!
* @param bufferAndBacklog current buffer and backlog including information about the next
* buffer
* @return the next data type if the next buffer can be pulled immediately or {@link
* Buffer.DataType#NONE}
*/
private Buffer.DataType getNextDataType(BufferAndBacklog bufferAndBacklog) {
final Buffer.DataType nextDataType = bufferAndBacklog.getNextDataType();
if (numCreditsAvailable > 0 || nextDataType.isEvent()) {
return nextDataType;
}
return Buffer.DataType.NONE;
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_getValueForSnapshot | /**
* Returns the value pointer used by the snapshot of the given version.
*
* @param snapshotVersion version of snapshot.
* @return the value pointer of the version used by the given snapshot. NIL_VALUE_POINTER will
* be returned if there is no value for this snapshot.
*/
long getValueForSnapshot(long node, int snapshotVersion) {
long snapshotValuePointer = NIL_VALUE_POINTER;
ValueVersionIterator versionIterator = new ValueVersionIterator(node);
long valuePointer;
while (versionIterator.hasNext()) {
valuePointer = versionIterator.getValuePointer();
int version = versionIterator.next();
// the first value whose version is less than snapshotVersion
if (version < snapshotVersion) {
snapshotValuePointer = valuePointer;
break;
}
}
return snapshotValuePointer;
} | 3.68 |
hbase_TableRecordReader_nextKeyValue | /**
* Positions the record reader to the next record.
* @return <code>true</code> if there was another record.
* @throws IOException When reading the record failed.
* @throws InterruptedException When the job was aborted.
* @see org.apache.hadoop.mapreduce.RecordReader#nextKeyValue()
*/
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
return this.recordReaderImpl.nextKeyValue();
} | 3.68 |
flink_TtlIncrementalCleanup_setTtlState | /**
* As TTL state wrapper depends on this class through access callback, it has to be set here
* after its construction is done.
*/
public void setTtlState(@Nonnull AbstractTtlState<K, N, ?, S, ?> ttlState) {
this.ttlState = ttlState;
} | 3.68 |
hadoop_BlockBlobAppendStream_close | /**
* Force all data in the output stream to be written to Azure storage.
* Wait to return until this is complete. Close the access to the stream and
* shutdown the upload thread pool.
* If the blob was created, its lease will be released.
* Any error encountered caught in threads and stored will be rethrown here
* after cleanup.
*/
@Override
public synchronized void close() throws IOException {
LOG.debug("close {} ", key);
if (closed) {
return;
}
// Upload the last block regardless of compactionEnabled flag
flush();
// Initiates an orderly shutdown in which previously submitted tasks are
// executed.
ioThreadPool.shutdown();
try {
// wait up to CLOSE_UPLOAD_DELAY minutes to upload all the blocks
if (!ioThreadPool.awaitTermination(CLOSE_UPLOAD_DELAY, TimeUnit.MINUTES)) {
LOG.error("Time out occurred while close() is waiting for IO request to"
+ " finish in append"
+ " for blob : {}",
key);
NativeAzureFileSystemHelper.logAllLiveStackTraces();
throw new AzureException("Timed out waiting for IO requests to finish");
}
} catch(InterruptedException ex) {
Thread.currentThread().interrupt();
}
// release the lease
if (firstError.get() == null && blobExist) {
try {
lease.free();
} catch (StorageException ex) {
LOG.debug("Lease free update blob {} encountered Storage Exception:"
+ " {} Error Code : {}",
key,
ex,
ex.getErrorCode());
maybeSetFirstError(new AzureException(ex));
}
}
closed = true;
// finally, throw the first exception raised if it has not
// been thrown elsewhere.
if (firstError.get() != null && !firstErrorThrown) {
throw firstError.get();
}
} | 3.68 |
flink_Tuple23_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple23<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17,
T18,
T19,
T20,
T21,
T22>
copy() {
return new Tuple23<>(
this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8,
this.f9, this.f10, this.f11, this.f12, this.f13, this.f14, this.f15, this.f16,
this.f17, this.f18, this.f19, this.f20, this.f21, this.f22);
} | 3.68 |
flink_EdgeManagerBuildUtil_computeMaxEdgesToTargetExecutionVertex | /**
* Given parallelisms of two job vertices, compute the max number of edges connected to a target
* execution vertex from the source execution vertices. Note that edge is considered undirected
* here. It can be an edge connected from an upstream job vertex to a downstream job vertex, or
* in a reversed way.
*
* @param targetParallelism parallelism of the target job vertex.
* @param sourceParallelism parallelism of the source job vertex.
* @param distributionPattern the {@link DistributionPattern} of the connecting edge.
*/
public static int computeMaxEdgesToTargetExecutionVertex(
int targetParallelism, int sourceParallelism, DistributionPattern distributionPattern) {
switch (distributionPattern) {
case POINTWISE:
return (sourceParallelism + targetParallelism - 1) / targetParallelism;
case ALL_TO_ALL:
return sourceParallelism;
default:
throw new IllegalArgumentException("Unrecognized distribution pattern.");
}
} | 3.68 |
flink_TaskLocalStateStoreImpl_deleteDirectory | /** Helper method to delete a directory. */
protected void deleteDirectory(File directory) throws IOException {
Path path = new Path(directory.toURI());
FileSystem fileSystem = path.getFileSystem();
if (fileSystem.exists(path)) {
fileSystem.delete(path, true);
}
} | 3.68 |
flink_TaskExecutor_onFatalError | /**
* Notifies the TaskExecutor that a fatal error has occurred and it cannot proceed.
*
* @param t The exception describing the fatal error
*/
void onFatalError(final Throwable t) {
try {
log.error("Fatal error occurred in TaskExecutor {}.", getAddress(), t);
} catch (Throwable ignored) {
}
// The fatal error handler implementation should make sure that this call is non-blocking
fatalErrorHandler.onFatalError(t);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.