name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_HFileLink_buildFromHFileLinkPattern | /**
* @param rootDir Path to the root directory where hbase files are stored
* @param archiveDir Path to the hbase archive directory
* @param hFileLinkPattern The path of the HFile Link.
*/
public final static HFileLink buildFromHFileLinkPattern(final Path rootDir, final Path archiveDir,
final Path hFileLinkPattern) {
Path hfilePath = getHFileLinkPatternRelativePath(hFileLinkPattern);
Path tempPath = new Path(new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY), hfilePath);
Path originPath = new Path(rootDir, hfilePath);
Path mobPath = new Path(new Path(rootDir, MobConstants.MOB_DIR_NAME), hfilePath);
Path archivePath = new Path(archiveDir, hfilePath);
return new HFileLink(originPath, tempPath, mobPath, archivePath);
} | 3.68 |
hadoop_CallableSupplier_waitForCompletionIgnoringExceptions | /**
* Wait for a single of future to complete, ignoring exceptions raised.
* @param future future to wait for.
* @param <T> type
* @return the outcome if successfully retrieved.
*/
public static <T> Optional<T> waitForCompletionIgnoringExceptions(
@Nullable final CompletableFuture<T> future) {
try {
return maybeAwaitCompletion(future);
} catch (Exception e) {
LOG.debug("Ignoring exception raised in task completion: ", e);
return Optional.empty();
}
} | 3.68 |
hbase_RegionMover_maxthreads | /**
* Set the max number of threads that will be used to move regions
*/
public RegionMoverBuilder maxthreads(int threads) {
this.maxthreads = threads;
return this;
} | 3.68 |
pulsar_PositionImpl_toString | /**
* String representation of virtual cursor - LedgerId:EntryId.
*/
@Override
public String toString() {
return ledgerId + ":" + entryId;
} | 3.68 |
hudi_LSMTimeline_getFileLayer | /**
* Parse the layer number from the file name.
*/
public static int getFileLayer(String fileName) {
try {
Matcher fileMatcher = ARCHIVE_FILE_PATTERN.matcher(fileName);
if (fileMatcher.matches()) {
return Integer.parseInt(fileMatcher.group(3));
}
} catch (NumberFormatException e) {
// log and ignore any format warnings
LOG.warn("error getting file layout for archived file: " + fileName);
}
// return default value in case of any errors
return 0;
} | 3.68 |
framework_DropTargetExtension_setDropCriteria | /**
* Sets multiple drop criteria to allow drop on this drop target. When data
* is dragged on top of the drop target, the value of the given criteria is
* compared to the drag source's payload with the same key.
* <p>
* The drag passes these criteria if, depending on {@code match}, any or all
* of the criteria matches the payload, that is the value of the payload
* compared to the value of the criterion using the criterion's operator
* holds.
* <p>
* Note that calling this method will overwrite the previously set criteria.
* <p>
* To handle more complex criteria, define a custom script with
* {@link #setDropCriteriaScript(String)}. Drop will be allowed if both this
* criterion and the criteria script are passed.
*
* @param match
* defines whether any or all of the given criteria should match
* to allow drop on this drop target
* @param criteria
* criteria to be compared to the payload
*/
public void setDropCriteria(Criterion.Match match, Criterion... criteria) {
getState().criteriaMatch = match;
getState().criteria = Arrays.asList(criteria);
} | 3.68 |
pulsar_LoadManagerShared_isLoadSheddingEnabled | /**
* If load balancing is enabled, load shedding is enabled by default unless forced off by dynamic configuration.
*
* @return true by default
*/
public static boolean isLoadSheddingEnabled(final PulsarService pulsar) {
return pulsar.getConfiguration().isLoadBalancerEnabled()
&& pulsar.getConfiguration().isLoadBalancerSheddingEnabled();
} | 3.68 |
morf_InsertStatementBuilder_into | /**
* Inserts into a specific table.
*
* <blockquote><pre>
* new InsertStatement().into(new TableReference("agreement"));</pre></blockquote>
*
* @param intoTable the table to insert into.
* @return this, for method chaining.
*/
public InsertStatementBuilder into(TableReference intoTable) {
this.table = intoTable;
return this;
} | 3.68 |
hadoop_RegistryPathUtils_validateElementsAsDNS | /**
* Validate ZK path as valid for a DNS hostname.
* @param path path to validate
* @return the path parameter
* @throws InvalidPathnameException if the pathname is invalid.
*/
public static String validateElementsAsDNS(String path) throws
InvalidPathnameException {
List<String> splitpath = split(path);
for (String fragment : splitpath) {
if (!PATH_ENTRY_VALIDATION_PATTERN.matcher(fragment).matches()) {
throw new InvalidPathnameException(path,
"Invalid Path element \"" + fragment + "\"");
}
}
return path;
} | 3.68 |
hadoop_EntityCacheItem_getStore | /**
* @return The timeline store, either loaded or unloaded, of this cache item.
* This method will not hold the storage from being reclaimed.
*/
public synchronized TimelineStore getStore() {
return store;
} | 3.68 |
pulsar_AuthenticationState_getStateId | /**
* Get AuthenticationState ID.
*/
default long getStateId() {
return -1L;
} | 3.68 |
hadoop_DiskBalancerDataNode_isBalancingNeeded | /**
* Computes if this node needs balancing at all.
*
* @param threshold - Percentage
* @return true or false
*/
public boolean isBalancingNeeded(double threshold) {
for (DiskBalancerVolumeSet vSet : getVolumeSets().values()) {
if (vSet.isBalancingNeeded(threshold)) {
return true;
}
}
return false;
} | 3.68 |
hbase_Table_coprocessorService | /**
* Creates an instance of the given {@link Service} subclass for each table region spanning the
* range from the {@code startKey} row to {@code endKey} row (inclusive), and invokes the passed
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method with each
* {@link Service} instance.
* <p/>
* The given
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[],byte[],Object)}
* method will be called with the return value from each region's
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation.
* @param service the protocol buffer {@code Service} implementation to call
* @param startKey start region selection with region containing this row. If {@code null}, the
* selection will start with the first table region.
* @param endKey select regions up to and including the region containing this row. If
* {@code null}, selection will continue through the last table region.
* @param callable this instance's
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method will
* be invoked once per table region, using the {@link Service} instance connected
* to that region.
* @param <T> the {@link Service} subclass to connect to
* @param <R> Return type for the {@code callable} parameter's
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
* @deprecated since 3.0.0, will removed in 4.0.0. The batch call here references the blocking
* interface for of a protobuf stub, so it is not possible to do it in an asynchronous
* way, even if now we are building the {@link Table} implementation based on the
* {@link AsyncTable}, which is not good. Use the coprocessorService methods in
* {@link AsyncTable} directly instead.
* @see Connection#toAsyncConnection()
*/
@Deprecated
default <T extends Service, R> void coprocessorService(final Class<T> service, byte[] startKey,
byte[] endKey, final Batch.Call<T, R> callable, final Batch.Callback<R> callback)
throws ServiceException, Throwable {
throw new NotImplementedException("Add an implementation!");
} | 3.68 |
hudi_AvroOrcUtils_getActualSchemaType | /**
* Returns the actual schema of a field.
*
* All types in ORC is nullable whereas Avro uses a union that contains the NULL type to imply
* the nullability of an Avro type. To achieve consistency between the Avro and ORC schema,
* non-NULL types are extracted from the union type.
* @param unionSchema A schema of union type.
* @return An Avro schema that is either NULL or a UNION without NULL fields.
*/
private static Schema getActualSchemaType(Schema unionSchema) {
final List<Schema> nonNullMembers = unionSchema.getTypes().stream().filter(
schema -> !Schema.Type.NULL.equals(schema.getType())
).collect(Collectors.toList());
if (nonNullMembers.isEmpty()) {
return Schema.create(Schema.Type.NULL);
} else if (nonNullMembers.size() == 1) {
return nonNullMembers.get(0);
} else {
return Schema.createUnion(nonNullMembers);
}
} | 3.68 |
flink_BytesMap_free | /** @param reservedRecordMemory reserved fixed memory or not. */
public void free(boolean reservedRecordMemory) {
returnSegments(this.bucketSegments);
this.bucketSegments.clear();
if (!reservedRecordMemory) {
memoryPool.close();
}
numElements = 0;
} | 3.68 |
flink_DefaultDelegationTokenManager_start | /**
* Creates a re-occurring task which obtains new tokens and automatically distributes them to
* task managers.
*/
@Override
public void start(Listener listener) throws Exception {
checkNotNull(scheduledExecutor, "Scheduled executor must not be null");
checkNotNull(ioExecutor, "IO executor must not be null");
this.listener = checkNotNull(listener, "Listener must not be null");
synchronized (tokensUpdateFutureLock) {
checkState(tokensUpdateFuture == null, "Manager is already started");
}
startTokensUpdate();
} | 3.68 |
framework_Design_write | /**
* Writes the given jsoup document to the output stream (in UTF-8)
*
* @param doc
* the document to write
* @param outputStream
* the stream to write to
* @throws IOException
* if writing fails
*/
private static void write(Document doc, OutputStream outputStream)
throws IOException {
doc.outputSettings().indentAmount(4);
doc.outputSettings().syntax(Syntax.html);
doc.outputSettings().prettyPrint(true);
outputStream.write(doc.html().getBytes(UTF_8));
} | 3.68 |
framework_AbstractTextField_addBlurListener | /**
* Adds a {@link BlurListener} to this component, which gets fired when this
* component loses keyboard focus.
*
* @param listener
* the blur listener
* @return a registration for the listener
*
* @see Registration
*/
@Override
public Registration addBlurListener(BlurListener listener) {
return addListener(BlurEvent.EVENT_ID, BlurEvent.class, listener,
BlurListener.blurMethod);
} | 3.68 |
hbase_ActiveMasterManager_setInfoPort | // will be set after jetty server is started
public void setInfoPort(int infoPort) {
this.infoPort = infoPort;
} | 3.68 |
framework_BaseAlignment_buildLayout | /**
* Build Layout for test
*/
private void buildLayout() {
for (int i = 0; i < components.length; i++) {
AbstractOrderedLayout layout = null;
try {
layout = (AbstractOrderedLayout) layoutClass.newInstance();
} catch (InstantiationException | IllegalAccessException e) {
e.printStackTrace();
}
layout.setMargin(false);
layout.setSpacing(false);
layout.setHeight("100px");
layout.setWidth("200px");
layout.addComponent(components[i]);
layout.setComponentAlignment(components[i], alignments[i]);
if (i < components.length / 2) {
l1.addComponent(layout);
} else {
l2.addComponent(layout);
}
}
} | 3.68 |
hadoop_AwsStatisticsCollector_counter | /**
* Process a counter.
* @param collection metric collection
* @param metric metric
* @param consumer consumer
*/
private void counter(
MetricCollection collection,
SdkMetric<Integer> metric,
LongConsumer consumer) {
collection
.metricValues(metric)
.forEach(v -> consumer.accept(v.longValue()));
} | 3.68 |
framework_CalendarDateRange_toString | /*
* (non-Javadoc)
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "CalendarDateRange [start=" + start + ", end=" + end + "]";
} | 3.68 |
flink_WindowedStateTransformation_trigger | /** Sets the {@code Trigger} that should be used to trigger window emission. */
@PublicEvolving
public WindowedStateTransformation<T, K, W> trigger(Trigger<? super T, ? super W> trigger) {
builder.trigger(trigger);
return this;
} | 3.68 |
flink_SuperstepKickoffLatchBroker_instance | /** Retrieve the singleton instance. */
public static Broker<SuperstepKickoffLatch> instance() {
return INSTANCE;
} | 3.68 |
hadoop_Find_isStop | /** Returns true if the {@link PathData} item is in the stop set. */
private boolean isStop(PathData item) {
return stopPaths.contains(item.path);
} | 3.68 |
hbase_Scan_setMaxResultSize | /**
* Set the maximum result size. The default is -1; this means that no specific maximum result size
* will be set for this scan, and the global configured value will be used instead. (Defaults to
* unlimited).
* @param maxResultSize The maximum result size in bytes.
*/
public Scan setMaxResultSize(long maxResultSize) {
this.maxResultSize = maxResultSize;
return this;
} | 3.68 |
hbase_CompactionTool_getStoreDirHosts | /**
* return the top hosts of the store files, used by the Split
*/
private static String[] getStoreDirHosts(final FileSystem fs, final Path path)
throws IOException {
FileStatus[] files = CommonFSUtils.listStatus(fs, path);
if (files == null) {
return new String[] {};
}
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
for (FileStatus hfileStatus : files) {
HDFSBlocksDistribution storeFileBlocksDistribution =
FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
hdfsBlocksDistribution.add(storeFileBlocksDistribution);
}
List<String> hosts = hdfsBlocksDistribution.getTopHosts();
return hosts.toArray(new String[hosts.size()]);
} | 3.68 |
dubbo_FrameworkModel_getApplicationModels | /**
* Get all application models except for the internal application model.
*/
public List<ApplicationModel> getApplicationModels() {
synchronized (globalLock) {
return Collections.unmodifiableList(pubApplicationModels);
}
} | 3.68 |
flink_IntValue_getValue | /**
* Returns the value of the encapsulated int.
*
* @return the value of the encapsulated int.
*/
public int getValue() {
return this.value;
} | 3.68 |
flink_FlinkContainers_getJobManager | /** Gets JobManager container. */
public GenericContainer<?> getJobManager() {
return this.jobManager;
} | 3.68 |
hbase_HRegion_attachRegionReplicationToMVCCEntry | /**
* Create {@link WALKeyImpl} and get {@link WALEdit} from miniBatchOp and attach
* {@link RegionReplicationSink#add} to the mvccWriteEntry.
*/
private void attachRegionReplicationToMVCCEntry(
final MiniBatchOperationInProgress<Mutation> miniBatchOp, WriteEntry mvccWriteEntry, long now)
throws IOException {
if (!this.regionReplicateEnable) {
return;
}
assert !mvccWriteEntry.getCompletionAction().isPresent();
final WALKeyImpl walKey = this.createWALKey(now);
walKey.setWriteEntry(mvccWriteEntry);
region.doAttachReplicateRegionReplicaAction(walKey,
miniBatchOp.getWalEditForReplicateIfExistsSkipWAL(), mvccWriteEntry);
} | 3.68 |
hadoop_FederationPolicyInitializationContext_getSubClusterPolicyConfiguration | /**
* Getter for the {@link SubClusterPolicyConfiguration}.
*
* @return the {@link SubClusterPolicyConfiguration} to be used for
* initialization.
*/
public SubClusterPolicyConfiguration getSubClusterPolicyConfiguration() {
return federationPolicyConfiguration;
} | 3.68 |
flink_RemoteInputChannel_onBuffer | /**
* Handles the input buffer. This method is taking over the ownership of the buffer and is fully
* responsible for cleaning it up both on the happy path and in case of an error.
*/
public void onBuffer(Buffer buffer, int sequenceNumber, int backlog) throws IOException {
boolean recycleBuffer = true;
try {
if (expectedSequenceNumber != sequenceNumber) {
onError(new BufferReorderingException(expectedSequenceNumber, sequenceNumber));
return;
}
if (buffer.getDataType().isBlockingUpstream()) {
onBlockingUpstream();
checkArgument(backlog == 0, "Illegal number of backlog: %s, should be 0.", backlog);
}
final boolean wasEmpty;
boolean firstPriorityEvent = false;
synchronized (receivedBuffers) {
NetworkActionsLogger.traceInput(
"RemoteInputChannel#onBuffer",
buffer,
inputGate.getOwningTaskName(),
channelInfo,
channelStatePersister,
sequenceNumber);
// Similar to notifyBufferAvailable(), make sure that we never add a buffer
// after releaseAllResources() released all buffers from receivedBuffers
// (see above for details).
if (isReleased.get()) {
return;
}
wasEmpty = receivedBuffers.isEmpty();
SequenceBuffer sequenceBuffer = new SequenceBuffer(buffer, sequenceNumber);
DataType dataType = buffer.getDataType();
if (dataType.hasPriority()) {
firstPriorityEvent = addPriorityBuffer(sequenceBuffer);
recycleBuffer = false;
} else {
receivedBuffers.add(sequenceBuffer);
recycleBuffer = false;
if (dataType.requiresAnnouncement()) {
firstPriorityEvent = addPriorityBuffer(announce(sequenceBuffer));
}
}
totalQueueSizeInBytes += buffer.getSize();
final OptionalLong barrierId =
channelStatePersister.checkForBarrier(sequenceBuffer.buffer);
if (barrierId.isPresent() && barrierId.getAsLong() > lastBarrierId) {
// checkpoint was not yet started by task thread,
// so remember the numbers of buffers to spill for the time when
// it will be started
lastBarrierId = barrierId.getAsLong();
lastBarrierSequenceNumber = sequenceBuffer.sequenceNumber;
}
channelStatePersister.maybePersist(buffer);
++expectedSequenceNumber;
}
if (firstPriorityEvent) {
notifyPriorityEvent(sequenceNumber);
}
if (wasEmpty) {
notifyChannelNonEmpty();
}
if (backlog >= 0) {
onSenderBacklog(backlog);
}
} finally {
if (recycleBuffer) {
buffer.recycleBuffer();
}
}
} | 3.68 |
framework_Window_setAssistivePrefix | /**
* Sets the accessibility prefix for the window caption.
*
* This prefix is read to assistive device users before the window caption,
* but not visible on the page.
*
* @param prefix
* String that is placed before the window caption
*/
public void setAssistivePrefix(String prefix) {
getState().assistivePrefix = prefix;
} | 3.68 |
hudi_InternalSchemaChangeApplier_applyReOrderColPositionChange | /**
* Reorder the position of col.
*
* @param colName column which need to be reordered. if we want to change col from a nested filed, the fullName should be specify.
* @param referColName reference position.
* @param positionType col position change type. now support three change types: first/after/before
*/
public InternalSchema applyReOrderColPositionChange(
String colName,
String referColName,
TableChange.ColumnPositionChange.ColumnPositionType positionType) {
TableChanges.ColumnUpdateChange updateChange = TableChanges.ColumnUpdateChange.get(latestSchema);
String parentName = TableChangesHelper.getParentName(colName);
String referParentName = TableChangesHelper.getParentName(referColName);
if (positionType.equals(TableChange.ColumnPositionChange.ColumnPositionType.FIRST)) {
updateChange.addPositionChange(colName, "", positionType);
} else if (parentName.equals(referParentName)) {
updateChange.addPositionChange(colName, referColName, positionType);
} else {
throw new IllegalArgumentException("cannot reorder two columns which has different parent");
}
return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, updateChange);
} | 3.68 |
cron-utils_CronDefinitionBuilder_unixCrontab | /**
* Creates CronDefinition instance matching unix crontab specification.
*
* @return CronDefinition instance, never null;
*/
private static CronDefinition unixCrontab() {
return CronDefinitionBuilder.defineCron()
.withMinutes().withValidRange(0, 59).withStrictRange().and()
.withHours().withValidRange(0, 23).withStrictRange().and()
.withDayOfMonth().withValidRange(1, 31).withStrictRange().and()
.withMonth().withValidRange(1, 12).withStrictRange().and()
.withDayOfWeek().withValidRange(0, 7).withMondayDoWValue(1).withIntMapping(7, 0).withStrictRange().and()
.instance();
} | 3.68 |
flink_TypeExtractionUtils_typeToClass | /** Convert ParameterizedType or Class to a Class. */
@SuppressWarnings("unchecked")
public static <T> Class<T> typeToClass(Type t) {
if (t instanceof Class) {
return (Class<T>) t;
} else if (t instanceof ParameterizedType) {
return ((Class<T>) ((ParameterizedType) t).getRawType());
}
throw new IllegalArgumentException("Cannot convert type to class");
} | 3.68 |
hadoop_AbfsConfiguration_getString | /**
* Returns the account-specific value if it exists, then looks for an
* account-agnostic value.
* @param key Account-agnostic configuration key
* @return value if one exists, else the default value
*/
public String getString(String key, String defaultValue) {
return rawConfig.get(accountConf(key), rawConfig.get(key, defaultValue));
} | 3.68 |
flink_FutureUtils_supplyAsync | /**
* Returns a future which is completed with the result of the {@link SupplierWithException}.
*
* @param supplier to provide the future's value
* @param executor to execute the supplier
* @param <T> type of the result
* @return Future which is completed with the value of the supplier
*/
public static <T> CompletableFuture<T> supplyAsync(
SupplierWithException<T, ?> supplier, Executor executor) {
return CompletableFuture.supplyAsync(
() -> {
try {
return supplier.get();
} catch (Throwable e) {
throw new CompletionException(e);
}
},
executor);
} | 3.68 |
hbase_ZKWatcher_getNumberOfListeners | /** Returns The number of currently registered listeners */
public int getNumberOfListeners() {
return listeners.size();
} | 3.68 |
hbase_Response_getBody | /** Returns the HTTP response body */
public byte[] getBody() {
if (body == null) {
try {
body = Client.getResponseBody(resp);
} catch (IOException ioe) {
LOG.debug("encountered ioe when obtaining body", ioe);
}
}
return body;
} | 3.68 |
framework_ComboBoxScrollingWithArrows_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return DESCRIPTION;
} | 3.68 |
druid_NameResolveVisitor_isAliasColumn | /**
* 是否是 select item 字段的别名
*
* @param x x 是否是 select item 字段的别名
* @param source 从 source 数据中查找 and 判断
* @return true:是、false:不是
*/
public boolean isAliasColumn(SQLExpr x, SQLSelectQueryBlock source) {
if (x instanceof SQLIdentifierExpr) {
SQLIdentifierExpr identifierExpr = (SQLIdentifierExpr) x;
long nameHashCode64 = identifierExpr.nameHashCode64();
SQLSelectQueryBlock queryBlock = source;
SQLSelectItem selectItem = queryBlock.findSelectItem(nameHashCode64);
if (selectItem != null) {
return true;
}
if (queryBlock.getFrom() instanceof SQLSubqueryTableSource
&& ((SQLSubqueryTableSource) queryBlock.getFrom()).getSelect().getQuery() instanceof SQLSelectQueryBlock) {
SQLSelectQueryBlock subQueryBlock = ((SQLSubqueryTableSource) queryBlock.getFrom()).getSelect().getQueryBlock();
if (isAliasColumn(x, subQueryBlock)) {
return true;
}
}
}
return false;
} | 3.68 |
hadoop_DiffList_emptyList | /**
* Returns an empty DiffList.
*/
static <T extends Comparable<Integer>> DiffList<T> emptyList() {
return EMPTY_LIST;
} | 3.68 |
framework_VLazyExecutor_trigger | /**
* Triggers execution of the command. Each call reschedules any existing
* execution to {@link #delayMs} milliseconds from that point in time.
*/
public void trigger() {
if (timer == null) {
timer = new Timer() {
@Override
public void run() {
timer = null;
cmd.execute();
}
};
}
// Schedule automatically cancels any old schedule
timer.schedule(delayMs);
} | 3.68 |
hudi_AbstractTableFileSystemView_init | /**
* Initialize the view.
*/
protected void init(HoodieTableMetaClient metaClient, HoodieTimeline visibleActiveTimeline) {
this.metaClient = metaClient;
this.completionTimeQueryView = new CompletionTimeQueryView(metaClient);
refreshTimeline(visibleActiveTimeline);
resetFileGroupsReplaced(visibleCommitsAndCompactionTimeline);
this.bootstrapIndex = BootstrapIndex.getBootstrapIndex(metaClient);
// Load Pending Compaction Operations
resetPendingCompactionOperations(CompactionUtils.getAllPendingCompactionOperations(metaClient).values().stream()
.map(e -> Pair.of(e.getKey(), CompactionOperation.convertFromAvroRecordInstance(e.getValue()))));
// Load Pending LogCompaction Operations.
resetPendingLogCompactionOperations(CompactionUtils.getAllPendingLogCompactionOperations(metaClient).values().stream()
.map(e -> Pair.of(e.getKey(), CompactionOperation.convertFromAvroRecordInstance(e.getValue()))));
resetBootstrapBaseFileMapping(Stream.empty());
resetFileGroupsInPendingClustering(ClusteringUtils.getAllFileGroupsInPendingClusteringPlans(metaClient));
} | 3.68 |
flink_AbstractBinaryExternalMerger_getMergingIterator | /**
* Returns an iterator that iterates over the merged result from all given channels.
*
* @param channelIDs The channels that are to be merged and returned.
* @return An iterator over the merged records of the input channels.
* @throws IOException Thrown, if the readers encounter an I/O problem.
*/
public BinaryMergeIterator<Entry> getMergingIterator(
List<ChannelWithMeta> channelIDs, List<FileIOChannel> openChannels) throws IOException {
// create one iterator per channel id
if (LOG.isDebugEnabled()) {
LOG.debug("Performing merge of " + channelIDs.size() + " sorted streams.");
}
final List<MutableObjectIterator<Entry>> iterators = new ArrayList<>(channelIDs.size() + 1);
for (ChannelWithMeta channel : channelIDs) {
AbstractChannelReaderInputView view =
FileChannelUtil.createInputView(
ioManager,
channel,
openChannels,
compressionEnabled,
compressionCodecFactory,
compressionBlockSize,
pageSize);
iterators.add(channelReaderInputViewIterator(view));
}
return new BinaryMergeIterator<>(
iterators, mergeReusedEntries(channelIDs.size()), mergeComparator());
} | 3.68 |
hbase_MutableRegionInfo_setSplit | /**
* Change the split status flag.
* @param split set split status
*/
public MutableRegionInfo setSplit(boolean split) {
this.split = split;
return this;
} | 3.68 |
hbase_MobFile_getFileName | /**
* Gets the file name.
* @return The file name.
*/
public String getFileName() {
return sf.getPath().getName();
} | 3.68 |
pulsar_ProducerResponse_getSchemaVersion | // Shadow the default getter generated by lombok. In broker, if the schema version is an empty byte array, it means
// the topic doesn't have schema.
public byte[] getSchemaVersion() {
if (schemaVersion != null && schemaVersion.length != 0) {
return schemaVersion;
} else {
return null;
}
} | 3.68 |
hadoop_LocalityMulticastAMRMProxyPolicy_computeIntegerAssignment | /**
* Split the integer into bins according to the weights.
*
* @param totalNum total number of containers to split
* @param weightsList the weights for each subcluster
* @return the container allocation after split
* @throws YarnException if fails
*/
@VisibleForTesting
protected ArrayList<Integer> computeIntegerAssignment(int totalNum,
ArrayList<Float> weightsList) throws YarnException {
int i, residue;
ArrayList<Integer> ret = new ArrayList<>();
float totalWeight = 0, totalNumFloat = totalNum;
if (weightsList.size() == 0) {
return ret;
}
for (i = 0; i < weightsList.size(); i++) {
ret.add(0);
if (weightsList.get(i) > 0) {
totalWeight += weightsList.get(i);
}
}
if (totalWeight == 0) {
StringBuilder sb = new StringBuilder();
for (Float weight : weightsList) {
sb.append(weight + ", ");
}
throw new FederationPolicyException(
"No positive value found in weight array " + sb.toString());
}
// First pass, do flooring for all bins
residue = totalNum;
for (i = 0; i < weightsList.size(); i++) {
if (weightsList.get(i) > 0) {
int base = (int) (totalNumFloat * weightsList.get(i) / totalWeight);
ret.set(i, ret.get(i) + base);
residue -= base;
}
}
// By now residue < weights.length, assign one a time
for (i = 0; i < residue; i++) {
int index = FederationPolicyUtils.getWeightedRandom(weightsList);
ret.set(index, ret.get(index) + 1);
}
return ret;
} | 3.68 |
hadoop_HdfsFileStatus_getFullPath | /**
* Get the full path.
* @param parent the parent path
* @return the full path
*/
default Path getFullPath(Path parent) {
if (isEmptyLocalName()) {
return parent;
}
return new Path(parent, getLocalName());
} | 3.68 |
morf_SqlScriptExecutor_withParameterData | /**
* @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.QueryBuilder#withParameterData(org.alfasoftware.morf.metadata.DataValueLookup)
*/
@Override
public QueryBuilder withParameterData(DataValueLookup parameterData) {
this.parameterData = parameterData;
return this;
} | 3.68 |
framework_ReverseConverter_getPresentationType | /*
* (non-Javadoc)
*
* @see com.vaadin.data.util.converter.Converter#getTargetType()
*/
@Override
public Class<PRESENTATION> getPresentationType() {
return realConverter.getModelType();
} | 3.68 |
flink_JobSubmissionResult_getJobExecutionResult | /**
* Returns the JobExecutionResult if available.
*
* @return The JobExecutionResult
* @throws ClassCastException if this is not a JobExecutionResult
*/
public JobExecutionResult getJobExecutionResult() {
throw new ClassCastException("This JobSubmissionResult is not a JobExecutionResult.");
} | 3.68 |
hadoop_AMRMProxyService_allocate | /**
* This is called by the AMs started on this node to send heart beat to RM.
* This method does the initial authorization and then forwards the request to
* the application instance specific pipeline, which is a chain of request
* interceptor objects. One application request processing pipeline is created
* per AM instance.
*/
@Override
public AllocateResponse allocate(AllocateRequest request)
throws YarnException, IOException {
this.metrics.incrAllocateCount();
long startTime = clock.getTime();
try {
AMRMTokenIdentifier amrmTokenIdentifier =
YarnServerSecurityUtils.authorizeRequest();
RequestInterceptorChainWrapper pipeline =
getInterceptorChain(amrmTokenIdentifier);
AllocateResponse allocateResponse =
pipeline.getRootInterceptor().allocate(request);
updateAMRMTokens(amrmTokenIdentifier, pipeline, allocateResponse);
long endTime = clock.getTime();
this.metrics.succeededAllocateRequests(endTime - startTime);
LOG.info("Allocate processing finished in {} ms for application {}.",
endTime - startTime, pipeline.getApplicationAttemptId());
return allocateResponse;
} catch (Throwable t) {
this.metrics.incrFailedAllocateRequests();
throw t;
}
} | 3.68 |
hadoop_BlockBlobInputStream_seekToNewSource | /**
* Seeks an secondary copy of the data. This method is not supported.
* @param targetPos a zero-based byte offset in the stream.
* @return false
* @throws IOException IO failure
*/
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
return false;
} | 3.68 |
framework_TableElement_openCollapseMenu | /**
* Opens the collapse menu of this table and returns the element for it.
*
* @return collapse menu element
*/
public CollapseMenuElement openCollapseMenu() {
getCollapseMenuToggle().click();
WebElement cm = getDriver()
.findElement(By.xpath("//*[@id='PID_VAADIN_CM']"));
return wrapElement(cm, getCommandExecutor())
.wrap(CollapseMenuElement.class);
} | 3.68 |
pulsar_NamespacesBase_internalSetReplicatorDispatchRate | /**
* Base method for setReplicatorDispatchRate v1 and v2.
* Notion: don't re-use this logic.
*/
protected void internalSetReplicatorDispatchRate(AsyncResponse asyncResponse, DispatchRateImpl dispatchRate) {
validateSuperUserAccessAsync()
.thenAccept(__ -> {
log.info("[{}] Set namespace replicator dispatch-rate {}/{}",
clientAppId(), namespaceName, dispatchRate);
}).thenCompose(__ -> namespaceResources().setPoliciesAsync(namespaceName, policies -> {
String clusterName = pulsar().getConfiguration().getClusterName();
policies.replicatorDispatchRate.put(clusterName, dispatchRate);
return policies;
})).thenAccept(__ -> {
asyncResponse.resume(Response.noContent().build());
log.info("[{}] Successfully updated the replicatorDispatchRate for cluster on namespace {}",
clientAppId(), namespaceName);
}).exceptionally(ex -> {
resumeAsyncResponseExceptionally(asyncResponse, ex);
log.error("[{}] Failed to update the replicatorDispatchRate for cluster on namespace {}",
clientAppId(), namespaceName, ex);
return null;
});
} | 3.68 |
framework_VaadinServletService_getCancelingRelativePath | /**
* Gets a relative path that cancels the provided path. This essentially
* adds one .. for each part of the path to cancel.
*
* @param pathToCancel
* the path that should be canceled
* @return a relative path that cancels out the provided path segment
*/
public static String getCancelingRelativePath(String pathToCancel) {
StringBuilder sb = new StringBuilder(".");
// Start from i = 1 to ignore first slash
for (int i = 1; i < pathToCancel.length(); i++) {
if (pathToCancel.charAt(i) == '/') {
sb.append("/..");
}
}
return sb.toString();
} | 3.68 |
hbase_AsyncRpcRetryingCallerFactory_call | /**
* Shortcut for {@code build().call()}
*/
public CompletableFuture<T> call() {
return build().call();
} | 3.68 |
querydsl_GroupByBuilder_as | /**
* Get the results as a map
*
* @param expression projection
* @return new result transformer
*/
public <V> ResultTransformer<Map<K, V>> as(FactoryExpression<V> expression) {
final FactoryExpression<?> transformation = FactoryExpressionUtils.wrap(expression);
List<Expression<?>> args = transformation.getArgs();
return new GroupByMap<K, V>(key, args.toArray(new Expression<?>[0])) {
@Override
protected Map<K, V> transform(Map<K, Group> groups) {
Map<K, V> results = new LinkedHashMap<K, V>((int) Math.ceil(groups.size() / 0.75), 0.75f);
for (Map.Entry<K, Group> entry : groups.entrySet()) {
results.put(entry.getKey(), transform(entry.getValue()));
}
return results;
}
@SuppressWarnings("unchecked")
protected V transform(Group group) {
// XXX Isn't group.toArray() suitable here?
List<Object> args = new ArrayList<Object>(groupExpressions.size() - 1);
for (int i = 1; i < groupExpressions.size(); i++) {
args.add(group.getGroup(groupExpressions.get(i)));
}
return (V) transformation.newInstance(args.toArray());
}
};
} | 3.68 |
streampipes_Protocols_jms | /**
* Defines the transport protocol JMS used by a data stream at runtime.
*
* @param jmsHost The hostname of any JMS broker
* @param jmsPort The port of any JMS broker
* @param topic The topic identifier
* @return The {@link org.apache.streampipes.model.grounding.JmsTransportProtocol} containing URL and topic where data
* arrives.
*/
public static JmsTransportProtocol jms(String jmsHost, Integer jmsPort, String topic) {
return new JmsTransportProtocol(jmsHost, jmsPort, topic);
} | 3.68 |
framework_TableMoveFocusWithSelection_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
final Table t = new Table();
t.setImmediate(true);
t.setId("test-table");
t.setSizeFull();
t.setSelectable(true);
t.addContainerProperty("layout", VerticalLayout.class, null);
t.addContainerProperty("string", String.class, null);
for (int i = 0; i < 100; i++) {
t.addItem(i);
final VerticalLayout l = new VerticalLayout();
l.setId("row-" + i);
l.setHeight(20, Unit.PIXELS);
l.setData(i);
l.addLayoutClickListener(event -> {
if (t.isMultiSelect()) {
Set<Object> values = new HashSet<>(
(Set<Object>) t.getValue());
values.add(l.getData());
t.setValue(values);
} else {
t.setValue(l.getData());
}
});
t.getContainerProperty(i, "layout").setValue(l);
t.getContainerProperty(i, "string").setValue("Item #" + i);
}
addComponent(t);
// Select mode
Button toggleSelectMode = new Button(
t.isMultiSelect() ? "Press to use single select"
: "Press to use multi select");
toggleSelectMode.setId("toggle-mode");
toggleSelectMode.addClickListener(event -> {
t.setMultiSelect(!t.isMultiSelect());
event.getButton()
.setCaption(t.isMultiSelect() ? "Press to use single select"
: "Press to use multi select");
});
addComponent(toggleSelectMode);
Button select5210 = new Button("Select row 5-10",
event -> t.setValue(Arrays.asList(5, 6, 7, 8, 9, 10)));
select5210.setId("select-510");
addComponent(select5210);
} | 3.68 |
flink_DataStreamSink_setMaxParallelism | /**
* Sets the max parallelism for this sink.
*
* <p>The maximum parallelism specifies the upper bound for dynamic scaling. The degree must be
* higher than zero and less than the upper bound.
*
* @param maxParallelism The max parallelism for this sink.
* @return The sink with set parallelism.
*/
public DataStreamSink<T> setMaxParallelism(int maxParallelism) {
OperatorValidationUtils.validateMaxParallelism(maxParallelism, true);
transformation.setMaxParallelism(maxParallelism);
return this;
} | 3.68 |
framework_ConnectorHierarchyChangeEvent_getParent | /**
* Returns the {@link HasComponentsConnector} for which this event occurred.
*
* @return The {@link HasComponentsConnector} whose child collection has
* changed. Never returns null.
*/
public HasComponentsConnector getParent() {
return (HasComponentsConnector) getConnector();
} | 3.68 |
hadoop_AbfsHttpOperation_processResponse | /**
* Gets and processes the HTTP response.
*
* @param buffer a buffer to hold the response entity body
* @param offset an offset in the buffer where the data will being.
* @param length the number of bytes to be written to the buffer.
*
* @throws IOException if an error occurs.
*/
public void processResponse(final byte[] buffer, final int offset, final int length) throws IOException {
// get the response
long startTime = 0;
startTime = System.nanoTime();
this.statusCode = getConnResponseCode();
this.recvResponseTimeMs = elapsedTimeMs(startTime);
this.statusDescription = getConnResponseMessage();
this.requestId = this.connection.getHeaderField(HttpHeaderConfigurations.X_MS_REQUEST_ID);
if (this.requestId == null) {
this.requestId = AbfsHttpConstants.EMPTY_STRING;
}
// dump the headers
AbfsIoUtils.dumpHeadersToDebugLog("Response Headers",
connection.getHeaderFields());
if (AbfsHttpConstants.HTTP_METHOD_HEAD.equals(this.method)) {
// If it is HEAD, and it is ERROR
return;
}
startTime = System.nanoTime();
if (statusCode >= HttpURLConnection.HTTP_BAD_REQUEST) {
processStorageErrorResponse();
this.recvResponseTimeMs += elapsedTimeMs(startTime);
this.bytesReceived = this.connection.getHeaderFieldLong(HttpHeaderConfigurations.CONTENT_LENGTH, 0);
} else {
// consume the input stream to release resources
int totalBytesRead = 0;
try (InputStream stream = this.connection.getInputStream()) {
if (isNullInputStream(stream)) {
return;
}
boolean endOfStream = false;
// this is a list operation and need to retrieve the data
// need a better solution
if (AbfsHttpConstants.HTTP_METHOD_GET.equals(this.method) && buffer == null) {
parseListFilesResponse(stream);
} else {
if (buffer != null) {
while (totalBytesRead < length) {
int bytesRead = stream.read(buffer, offset + totalBytesRead, length - totalBytesRead);
if (bytesRead == -1) {
endOfStream = true;
break;
}
totalBytesRead += bytesRead;
}
}
if (!endOfStream && stream.read() != -1) {
// read and discard
int bytesRead = 0;
byte[] b = new byte[CLEAN_UP_BUFFER_SIZE];
while ((bytesRead = stream.read(b)) >= 0) {
totalBytesRead += bytesRead;
}
}
}
} catch (IOException ex) {
LOG.warn("IO/Network error: {} {}: {}",
method, getMaskedUrl(), ex.getMessage());
LOG.debug("IO Error: ", ex);
throw ex;
} finally {
this.recvResponseTimeMs += elapsedTimeMs(startTime);
this.bytesReceived = totalBytesRead;
}
}
} | 3.68 |
flink_MetricFetcherImpl_update | /**
* This method can be used to signal this MetricFetcher that the metrics are still in use and
* should be updated.
*/
@Override
public void update() {
synchronized (this) {
long currentTime = System.currentTimeMillis();
// Before all querying metric tasks are completed, new metric updating tasks cannot
// be added. This is to avoid resource waste or other problems, such as OOM, caused by
// adding too many useless querying tasks. See FLINK-29134.
if (currentTime - lastUpdateTime > updateInterval && fetchMetricsFuture.isDone()) {
lastUpdateTime = currentTime;
fetchMetricsFuture = fetchMetrics();
}
}
} | 3.68 |
framework_AbstractBeanContainer_removeListener | /**
* @deprecated As of 7.0, replaced by
* {@link #removePropertySetChangeListener(Container.PropertySetChangeListener)}
*/
@Deprecated
@Override
public void removeListener(Container.PropertySetChangeListener listener) {
removePropertySetChangeListener(listener);
} | 3.68 |
morf_OracleDialect_alterTableAddColumnStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#alterTableAddColumnStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Column)
*/
@Override
public Collection<String> alterTableAddColumnStatements(Table table, Column column) {
List<String> result = new ArrayList<>();
String truncatedTableName = truncatedTableName(table.getName());
result.add(String.format("ALTER TABLE %s%s ADD (%s %s)",
schemaNamePrefix(),
truncatedTableName,
column.getName(),
sqlRepresentationOfColumnType(column, true)
));
result.add(columnComment(column, truncatedTableName));
return result;
} | 3.68 |
framework_VTabsheet_getCloseTabKey | /**
* Returns the key code of the keyboard shortcut that closes the currently
* focused tab (if closable) in a focused tabsheet.
*
* @return the key to close the current tab
*/
protected int getCloseTabKey() {
return KeyCodes.KEY_DELETE;
} | 3.68 |
hbase_BucketCache_freeEntireBuckets | /**
* This method will find the buckets that are minimally occupied and are not reference counted and
* will free them completely without any constraint on the access times of the elements, and as a
* process will completely free at most the number of buckets passed, sometimes it might not due
* to changing refCounts
* @param completelyFreeBucketsNeeded number of buckets to free
**/
private void freeEntireBuckets(int completelyFreeBucketsNeeded) {
if (completelyFreeBucketsNeeded != 0) {
// First we will build a set where the offsets are reference counted, usually
// this set is small around O(Handler Count) unless something else is wrong
Set<Integer> inUseBuckets = new HashSet<>();
backingMap.forEach((k, be) -> {
if (be.isRpcRef()) {
inUseBuckets.add(bucketAllocator.getBucketIndex(be.offset()));
}
});
Set<Integer> candidateBuckets =
bucketAllocator.getLeastFilledBuckets(inUseBuckets, completelyFreeBucketsNeeded);
for (Map.Entry<BlockCacheKey, BucketEntry> entry : backingMap.entrySet()) {
if (candidateBuckets.contains(bucketAllocator.getBucketIndex(entry.getValue().offset()))) {
evictBucketEntryIfNoRpcReferenced(entry.getKey(), entry.getValue());
}
}
}
} | 3.68 |
flink_BlobServerConnection_get | /**
* Handles an incoming GET request from a BLOB client.
*
* <p>Transient BLOB files are deleted after a successful read operation by the client. Note
* that we do not enforce atomicity here, i.e. multiple clients reading from the same BLOB may
* still succeed.
*
* @param inputStream the input stream to read incoming data from
* @param outputStream the output stream to send data back to the client
* @param buf an auxiliary buffer for data serialization/deserialization
* @throws IOException thrown if an I/O error occurs while reading/writing data from/to the
* respective streams
*/
private void get(InputStream inputStream, OutputStream outputStream, byte[] buf)
throws IOException {
/*
* Retrieve the file from the (distributed?) BLOB store and store it
* locally, then send it to the service which requested it.
*
* Instead, we could send it from the distributed store directly but
* chances are high that if there is one request, there will be more
* so a local cache makes more sense.
*/
final File blobFile;
final JobID jobId;
final BlobKey blobKey;
try {
// read HEADER contents: job ID, key, HA mode/permanent or transient BLOB
final int mode = inputStream.read();
if (mode < 0) {
throw new EOFException("Premature end of GET request");
}
// Receive the jobId and key
if (mode == JOB_UNRELATED_CONTENT) {
jobId = null;
} else if (mode == JOB_RELATED_CONTENT) {
byte[] jidBytes = new byte[JobID.SIZE];
readFully(inputStream, jidBytes, 0, JobID.SIZE, "JobID");
jobId = JobID.fromByteArray(jidBytes);
} else {
throw new IOException("Unknown type of BLOB addressing: " + mode + '.');
}
blobKey = BlobKey.readFromInputStream(inputStream);
checkArgument(
blobKey instanceof TransientBlobKey || jobId != null,
"Invalid BLOB addressing for permanent BLOBs");
if (LOG.isDebugEnabled()) {
LOG.debug(
"Received GET request for BLOB {}/{} from {}.",
jobId,
blobKey,
clientSocket.getInetAddress());
}
// up to here, an error can give a good message
} catch (Throwable t) {
LOG.error("GET operation from {} failed.", clientSocket.getInetAddress(), t);
try {
writeErrorToStream(outputStream, t);
} catch (IOException e) {
// since we are in an exception case, it means that we could not send the error
// ignore this
}
clientSocket.close();
return;
}
try {
readLock.lock();
try {
// copy the file to local store if it does not exist yet
try {
blobFile = blobServer.getFileInternal(jobId, blobKey);
// enforce a 2GB max for now (otherwise the protocol's length field needs to be
// increased)
if (blobFile.length() > Integer.MAX_VALUE) {
throw new IOException("BLOB size exceeds the maximum size (2 GB).");
}
outputStream.write(RETURN_OKAY);
} catch (Throwable t) {
LOG.error(
"GET operation failed for BLOB {}/{} from {}.",
jobId,
blobKey,
clientSocket.getInetAddress(),
t);
try {
writeErrorToStream(outputStream, t);
} catch (IOException e) {
// since we are in an exception case, it means that we could not send the
// error
// ignore this
}
clientSocket.close();
return;
}
// from here on, we started sending data, so all we can do is close the connection
// when something happens
int blobLen = (int) blobFile.length();
writeLength(blobLen, outputStream);
try (FileInputStream fis = new FileInputStream(blobFile)) {
int bytesRemaining = blobLen;
while (bytesRemaining > 0) {
int read = fis.read(buf);
if (read < 0) {
throw new IOException(
"Premature end of BLOB file stream for "
+ blobFile.getAbsolutePath());
}
outputStream.write(buf, 0, read);
bytesRemaining -= read;
}
}
} finally {
readLock.unlock();
}
// on successful transfer, delete transient files
int result = inputStream.read();
if (result < 0) {
throw new EOFException("Premature end of GET request");
} else if (blobKey instanceof TransientBlobKey && result == RETURN_OKAY) {
// ignore the result from the operation
if (!blobServer.deleteInternal(jobId, (TransientBlobKey) blobKey)) {
LOG.warn(
"DELETE operation failed for BLOB {}/{} from {}.",
jobId,
blobKey,
clientSocket.getInetAddress());
}
}
} catch (SocketException e) {
// happens when the other side disconnects
LOG.debug("Socket connection closed", e);
} catch (Throwable t) {
LOG.error("GET operation failed", t);
clientSocket.close();
}
} | 3.68 |
hbase_HRegion_openReadOnlyFileSystemHRegion | /**
* Open a Region on a read-only file-system (like hdfs snapshots)
* @param conf The Configuration object to use.
* @param fs Filesystem to use
* @param info Info for region to be opened.
* @param htd the table descriptor
* @return new HRegion
* @throws NullPointerException if {@code info} is {@code null}
*/
public static HRegion openReadOnlyFileSystemHRegion(final Configuration conf, final FileSystem fs,
final Path tableDir, RegionInfo info, final TableDescriptor htd) throws IOException {
Objects.requireNonNull(info, "RegionInfo cannot be null");
if (LOG.isDebugEnabled()) {
LOG.debug("Opening region (readOnly filesystem): " + info);
}
if (info.getReplicaId() <= 0) {
info = RegionReplicaUtil.getRegionInfoForReplica(info, 1);
}
HRegion r = HRegion.newHRegion(tableDir, null, fs, conf, info, htd, null);
r.writestate.setReadOnly(true);
return r.openHRegion(null);
} | 3.68 |
framework_AbstractDateField_isLenient | /**
* Returns whether date/time interpretation is lenient.
*
* @see #setLenient(boolean)
*
* @return {@code true} if the interpretation mode of this calendar is
* lenient; {@code false} otherwise.
*/
public boolean isLenient() {
return getState(false).lenient;
} | 3.68 |
morf_AbstractSelectStatement_getOrderBys | /**
* Gets the fields which the select is ordered by
*
* @return the order by fields
*/
public List<AliasedField> getOrderBys() {
return orderBys;
} | 3.68 |
framework_VScrollTable_isDefinedWidth | /**
* Detects if width is fixed by developer on server side or resized to
* current width by user.
*
* @return true if defined, false if "natural" width
*/
public boolean isDefinedWidth() {
return definedWidth && width >= 0;
} | 3.68 |
flink_FlinkProjectJoinTransposeRule_onMatch | // implement RelOptRule
public void onMatch(RelOptRuleCall call) {
Project origProj = call.rel(0);
final Join join = call.rel(1);
if (!join.getJoinType().projectsRight()) {
return; // TODO: support SEMI/ANTI join later
}
// locate all fields referenced in the projection and join condition;
// determine which inputs are referenced in the projection and
// join condition; if all fields are being referenced and there are no
// special expressions, no point in proceeding any further
PushProjector pushProject =
new PushProjector(
origProj, join.getCondition(), join, preserveExprCondition, call.builder());
if (pushProject.locateAllRefs()) {
return;
}
// create left and right projections, projecting only those
// fields referenced on each side
RelNode leftProjRel = pushProject.createProjectRefsAndExprs(join.getLeft(), true, false);
RelNode rightProjRel = pushProject.createProjectRefsAndExprs(join.getRight(), true, true);
// convert the join condition to reference the projected columns
RexNode newJoinFilter = null;
int[] adjustments = pushProject.getAdjustments();
if (join.getCondition() != null) {
List<RelDataTypeField> projJoinFieldList = new ArrayList<>();
projJoinFieldList.addAll(join.getSystemFieldList());
projJoinFieldList.addAll(leftProjRel.getRowType().getFieldList());
projJoinFieldList.addAll(rightProjRel.getRowType().getFieldList());
newJoinFilter =
pushProject.convertRefsAndExprs(
join.getCondition(), projJoinFieldList, adjustments);
}
// create a new join with the projected children
Join newJoinRel =
join.copy(
join.getTraitSet(),
newJoinFilter,
leftProjRel,
rightProjRel,
join.getJoinType(),
join.isSemiJoinDone());
// put the original project on top of the join, converting it to
// reference the modified projection list
RelNode topProject = pushProject.createNewProject(newJoinRel, adjustments);
call.transformTo(topProject);
} | 3.68 |
hadoop_GangliaMetricVisitor_getSlope | /**
* @return the slope of a visited metric. Slope is positive for counters and
* null for others
*/
GangliaSlope getSlope() {
return slope;
} | 3.68 |
flink_RocksDBNativeMetricOptions_getProperties | /** @return the enabled RocksDB property-based metrics */
public Collection<String> getProperties() {
return Collections.unmodifiableCollection(properties);
} | 3.68 |
hbase_CellBlockBuilder_buildCellBlockStream | /**
* Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
* <code>compressor</code>.
* @param codec to use for encoding
* @param compressor to use for encoding
* @param cellScanner to encode
* @param allocator to allocate the {@link ByteBuff}.
* @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
* passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has
* been flipped and is ready for reading. Use limit to find total size. If
* <code>pool</code> was not null, then this returned ByteBuffer came from there and
* should be returned to the pool when done.
* @throws IOException if encoding the cells fail
*/
public ByteBufferListOutputStream buildCellBlockStream(Codec codec, CompressionCodec compressor,
CellScanner cellScanner, ByteBuffAllocator allocator) throws IOException {
if (cellScanner == null) {
return null;
}
if (codec == null) {
throw new CellScannerButNoCodecException();
}
ByteBufferListOutputStream bbos = new ByteBufferListOutputStream(allocator);
encodeCellsTo(bbos, cellScanner, codec, compressor);
if (bbos.size() == 0) {
bbos.releaseResources();
return null;
}
return bbos;
} | 3.68 |
hadoop_IOStatisticsStoreImpl_reset | /**
* Reset all statistics.
*/
@Override
public synchronized void reset() {
counterMap.values().forEach(a -> a.set(0));
gaugeMap.values().forEach(a -> a.set(0));
minimumMap.values().forEach(a -> a.set(0));
maximumMap.values().forEach(a -> a.set(0));
meanStatisticMap.values().forEach(a -> a.clear());
} | 3.68 |
rocketmq-connect_JsonConverter_convertToJson | /**
* Convert this object, in the org.apache.kafka.connect.data format, into a JSON object, returning both the schema
* and the converted object.
*/
private Object convertToJson(Schema schema, Object value) {
if (value == null) {
if (schema == null) {
return null;
}
if (schema.getDefaultValue() != null) {
return convertToJson(schema, schema.getDefaultValue());
}
if (schema.isOptional()) {
return null;
}
throw new ConnectException("Conversion error: null value for field that is required and has no default value");
}
if (schema != null && schema.getName() != null) {
LogicalTypeConverter logicalConverter = LOGICAL_CONVERTERS.get(schema.getName());
if (logicalConverter != null) {
if (value == null) {
return null;
} else {
return logicalConverter.toJson(schema, value, converterConfig);
}
}
}
try {
final FieldType schemaType;
if (schema == null) {
schemaType = Schema.schemaType(value.getClass());
if (schemaType == null) {
throw new ConnectException("Java class " + value.getClass() + " does not have corresponding schema type.");
}
} else {
schemaType = schema.getFieldType();
}
switch (schemaType) {
case INT8:
case INT16:
case INT32:
case INT64:
case FLOAT32:
case FLOAT64:
case BOOLEAN:
case STRING:
return value;
case BYTES:
if (value instanceof byte[]) {
return (byte[]) value;
} else if (value instanceof ByteBuffer) {
return ((ByteBuffer) value).array();
} else {
throw new ConnectException("Invalid type for bytes type: " + value.getClass());
}
case ARRAY: {
Collection collection = (Collection) value;
List list = new ArrayList();
for (Object elem : collection) {
Schema valueSchema = schema == null ? null : schema.getValueSchema();
Object fieldValue = convertToJson(valueSchema, elem);
list.add(fieldValue);
}
return list;
}
case MAP: {
Map<?, ?> map = (Map<?, ?>) value;
boolean objectMode;
if (schema == null) {
objectMode = true;
for (Map.Entry<?, ?> entry : map.entrySet()) {
if (!(entry.getKey() instanceof String)) {
objectMode = false;
break;
}
}
} else {
objectMode = schema.getKeySchema().getFieldType() == FieldType.STRING;
}
JSONArray resultArray = new JSONArray();
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<?, ?> entry : map.entrySet()) {
Schema keySchema = schema == null ? null : schema.getKeySchema();
Schema valueSchema = schema == null ? null : schema.getValueSchema();
Object mapKey = convertToJson(keySchema, entry.getKey());
Object mapValue = convertToJson(valueSchema, entry.getValue());
if (objectMode) {
resultMap.put((String) mapKey, mapValue);
} else {
JSONArray entryArray = new JSONArray();
entryArray.add(0, mapKey);
entryArray.add(1, mapValue);
resultArray.add(entryArray);
}
}
return objectMode ? resultMap : resultArray;
}
case STRUCT: {
Struct struct = (Struct) value;
if (!struct.schema().equals(schema)) {
throw new ConnectException("Mismatching schema.");
}
JSONObject obj = new JSONObject(new LinkedHashMap());
for (Field field : struct.schema().getFields()) {
obj.put(field.getName(), convertToJson(field.getSchema(), struct.get(field)));
}
return obj;
}
}
throw new ConnectException("Couldn't convert " + value + " to JSON.");
} catch (ClassCastException e) {
String schemaTypeStr = (schema != null) ? schema.getFieldType().toString() : "unknown schema";
throw new ConnectException("Invalid type for " + schemaTypeStr + ": " + value.getClass());
}
} | 3.68 |
framework_AbsoluteLayoutConnector_layoutVertically | /*
* (non-Javadoc)
*
* @see com.vaadin.client.DirectionalManagedLayout#layoutVertically()
*/
@Override
public void layoutVertically() {
getWidget().layoutVertically();
for (ComponentConnector connector : getChildComponents()) {
if (connector.isRelativeHeight()) {
getLayoutManager().reportHeightAssignedToRelative(connector,
getWidget().getWidgetSlotHeight(connector.getWidget()));
}
}
} | 3.68 |
framework_ListSelect_setColumns | /**
* Sets the width of the component so that it can display approximately the
* given number of letters.
* <p>
* Calling {@code setColumns(10);} is equivalent to calling
* {@code setWidth("10em");}
* </p>
*
* @deprecated As of 7.0. "Columns" does not reflect the exact number of
* characters that will be displayed. It is better to use
* setWidth together with "em" to control the width of the
* field.
* @param columns
* the number of columns to set.
*/
@Deprecated
public void setColumns(int columns) {
if (columns < 0) {
columns = 0;
}
if (this.columns != columns) {
this.columns = columns;
markAsDirty();
}
} | 3.68 |
flink_JobGraph_getCoLocationGroups | /**
* Returns all {@link CoLocationGroup} instances associated with this {@code JobGraph}.
*
* @return The associated {@code CoLocationGroup} instances.
*/
public Set<CoLocationGroup> getCoLocationGroups() {
final Set<CoLocationGroup> coLocationGroups =
IterableUtils.toStream(getVertices())
.map(JobVertex::getCoLocationGroup)
.filter(Objects::nonNull)
.collect(Collectors.toSet());
return Collections.unmodifiableSet(coLocationGroups);
} | 3.68 |
hadoop_Utils_toString | /**
* Return a string representation of the version.
*/
@Override
public String toString() {
return new StringBuilder("v").append(major).append(".").append(minor)
.toString();
} | 3.68 |
pulsar_ProducerConfiguration_getCryptoFailureAction | /**
* @return The ProducerCryptoFailureAction
*/
public ProducerCryptoFailureAction getCryptoFailureAction() {
return conf.getCryptoFailureAction();
} | 3.68 |
framework_FieldGroup_bindFields | /**
* Binds all fields to the properties in the item in use.
*
* @since 7.7.5
*/
protected void bindFields() {
for (Field<?> f : fieldToPropertyId.keySet()) {
bind(f, fieldToPropertyId.get(f));
}
} | 3.68 |
morf_AbstractSqlDialectTest_testAddDateColumn | /**
* Test adding a date column.
*/
@Test
public void testAddDateColumn() {
testAlterTableColumn(AlterationType.ADD, column("dateField_new", DataType.DATE).nullable(), expectedAlterTableAddDateColumnStatement());
} | 3.68 |
hbase_Scan_getCacheBlocks | /**
* Get whether blocks should be cached for this Scan.
* @return true if default caching should be used, false if blocks should not be cached
*/
public boolean getCacheBlocks() {
return cacheBlocks;
} | 3.68 |
dubbo_MetadataService_version | /**
* Gets the version of {@link MetadataService} that always equals {@link #VERSION}
*
* @return non-null
* @see #VERSION
*/
default String version() {
return VERSION;
} | 3.68 |
hudi_HoodieInputFormatUtils_listAffectedFilesForCommits | /**
* Iterate through a list of commit metadata in natural order, and extract the file status of
* all affected files from the commits metadata grouping by file full path. If the files has
* been touched multiple times in the given commits, the return value will keep the one
* from the latest commit.
*
* @param basePath The table base path
* @param metadataList The metadata list to read the data from
* @return the affected file status array
*/
public static FileStatus[] listAffectedFilesForCommits(Configuration hadoopConf, Path basePath, List<HoodieCommitMetadata> metadataList) {
// TODO: Use HoodieMetaTable to extract affected file directly.
HashMap<String, FileStatus> fullPathToFileStatus = new HashMap<>();
// Iterate through the given commits.
for (HoodieCommitMetadata metadata : metadataList) {
fullPathToFileStatus.putAll(metadata.getFullPathToFileStatus(hadoopConf, basePath.toString()));
}
return fullPathToFileStatus.values().toArray(new FileStatus[0]);
} | 3.68 |
hadoop_AbfsInputStreamStatisticsImpl_getIOStatistics | /**
* Getter for IOStatistics instance used.
* @return IOStatisticsStore instance which extends IOStatistics.
*/
@Override
public IOStatistics getIOStatistics() {
return ioStatisticsStore;
} | 3.68 |
dubbo_ReferenceBean_getInterfaceClass | /**
* The interface of this ReferenceBean, for injection purpose
* @return
*/
public Class<?> getInterfaceClass() {
// Compatible with seata-1.4.0: io.seata.rm.tcc.remoting.parser.DubboRemotingParser#getServiceDesc()
return interfaceClass;
} | 3.68 |
hbase_Scan_setRowOffsetPerColumnFamily | /**
* Set offset for the row per Column Family.
* @param offset is the number of kvs that will be skipped.
*/
public Scan setRowOffsetPerColumnFamily(int offset) {
this.storeOffset = offset;
return this;
} | 3.68 |
rocketmq-connect_AbstractKafkaSinkConnector_originalSinkConnector | /**
* try override start and stop
*
* @return
*/
protected org.apache.kafka.connect.sink.SinkConnector originalSinkConnector() {
return sinkConnector;
} | 3.68 |
hadoop_JsonSerDeser_fromJson | /**
* Convert from JSON
* @param json input
* @return the parsed JSON
* @throws IOException IO
* @throws JsonMappingException failure to map from the JSON to this class
*/
public T fromJson(String json)
throws IOException, JsonParseException, JsonMappingException {
try {
return mapper.readValue(json, classType);
} catch (IOException e) {
log.error("Exception while parsing json : " + e + "\n" + json, e);
throw e;
}
} | 3.68 |
morf_SchemaResourceImpl_close | /**
* @see org.alfasoftware.morf.metadata.SchemaResource#close()
*/
@Override
public void close() {
try {
// restore auto-commit state
connection.commit();
connection.setAutoCommit(wasAutoCommit);
connection.close();
} catch (SQLException e) {
throw new RuntimeSqlException("Closing", e);
}
} | 3.68 |
hadoop_BCFile_getRawSize | /**
* Get the uncompressed size of the block.
*
* @return uncompressed size of the block.
*/
public long getRawSize() {
return rBlkState.getBlockRegion().getRawSize();
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.