name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
AreaShop_TeleportFeature_canSpawnIn | /**
* Check if a player can spawn in here.
* @param material Material to check (assumed that this is at the feet or head level)
* @return true when it is safe to spawn inside, otherwise false
*/
private static boolean canSpawnIn(Material material) {
String name = material.name();
return name.contains("DOOR")
|| name.contains("SIGN")
|| name.contains("PLATE") // Redstone plates
|| name.equals("DRAGON_EGG");
} | 3.68 |
hbase_PermissionStorage_removeTablePermissions | /**
* Remove specified table column from the acl table.
*/
static void removeTablePermissions(Configuration conf, TableName tableName, byte[] column,
Table t) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Removing permissions of removed column " + Bytes.toString(column) + " from table "
+ tableName);
}
removeTablePermissions(tableName, column, t, true);
} | 3.68 |
rocketmq-connect_AvroDatumReaderFactory_get | /**
* Get avro datum factory
*
* @return
*/
public static AvroDatumReaderFactory get(boolean useSchemaReflection, boolean avroUseLogicalTypeConverters, boolean useSpecificAvroReader, boolean avroReflectionAllowNull) {
return new AvroDatumReaderFactory(useSchemaReflection, avroUseLogicalTypeConverters, useSpecificAvroReader, avroReflectionAllowNull);
} | 3.68 |
hadoop_CacheDirectiveStats_setHasExpired | /**
* Sets whether this directive has expired.
*
* @param hasExpired if this directive has expired
* @return This builder, for call chaining.
*/
public Builder setHasExpired(boolean hasExpired) {
this.hasExpired = hasExpired;
return this;
} | 3.68 |
framework_VCalendar_getEventComparator | /**
* Returns the default comparator which can compare calendar events by
* duration.
*
* @deprecated this returns just one default comparator, but there are
* number of comparators that are used to sort events depending
* on order.
*
* @return
*/
@Deprecated
public static Comparator<CalendarEvent> getEventComparator() {
return DEFAULT_COMPARATOR;
} | 3.68 |
framework_VAbstractOrderedLayout_updateExpandCompensation | /**
* Updates the expand compensation based on the measured sizes of children
* without expand.
*/
public void updateExpandCompensation() {
boolean isExpanding = false;
for (Widget slot : getChildren()) {
// FIXME expandRatio might be <0
if (((Slot) slot).getExpandRatio() != 0) {
isExpanding = true;
break;
}
}
if (isExpanding) {
/*
* Expanded slots have relative sizes that together add up to 100%.
* To make room for slots without expand, we will add padding that
* is not considered for relative sizes and a corresponding negative
* margin for the unexpanded slots. We calculate the size by summing
* the size of all non-expanded non-relative slots.
*
* Relatively sized slots without expansion are considered to get
* 0px, but we still keep them visible (causing overflows) to help
* the developer see what's happening. Forcing them to only get 0px
* would make them disappear which would avoid overflows but would
* instead cause confusion as they would then just disappear without
* any obvious reason.
*/
int totalSize = 0;
for (Widget w : getChildren()) {
Slot slot = (Slot) w;
if (slot.getExpandRatio() == 0
&& !slot.isRelativeInDirection(vertical)) {
if (layoutManager != null) {
// TODO check caption position
if (vertical) {
int size = layoutManager.getOuterHeight(
slot.getWidget().getElement());
if (slot.hasCaption()) {
size += layoutManager.getOuterHeight(
slot.getCaptionElement());
}
if (size > 0) {
totalSize += size;
}
} else {
int max = layoutManager.getOuterWidth(
slot.getWidget().getElement());
if (slot.hasCaption()) {
int max2 = layoutManager.getOuterWidth(
slot.getCaptionElement());
max = Math.max(max, max2);
}
if (max > 0) {
totalSize += max;
}
}
} else {
// FIXME expandRatio might be <0
totalSize += vertical ? slot.getOffsetHeight()
: slot.getOffsetWidth();
}
}
// TODO fails in Opera, always returns 0
int spacingSize = vertical ? slot.getVerticalSpacing()
: slot.getHorizontalSpacing();
if (spacingSize > 0) {
totalSize += spacingSize;
}
}
// When we set the margin to the first child, we don't need
// overflow:hidden in the layout root element, since the wrapper
// would otherwise be placed outside of the layout root element
// and block events on elements below it.
if (vertical) {
expandWrapper.getStyle().setPaddingTop(totalSize, Unit.PX);
expandWrapper.getFirstChildElement().getStyle()
.setMarginTop(-totalSize, Unit.PX);
} else {
expandWrapper.getStyle().setPaddingLeft(totalSize, Unit.PX);
expandWrapper.getFirstChildElement().getStyle()
.setMarginLeft(-totalSize, Unit.PX);
}
// Measure expanded children again if their size might have changed
if (totalSize != lastExpandSize) {
lastExpandSize = totalSize;
for (Widget w : getChildren()) {
Slot slot = (Slot) w;
// FIXME expandRatio might be <0
if (slot.getExpandRatio() != 0) {
if (layoutManager != null) {
layoutManager.setNeedsMeasure(
Util.findConnectorFor(slot.getWidget()));
} else if (slot.getWidget() instanceof RequiresResize) {
((RequiresResize) slot.getWidget()).onResize();
}
}
}
}
}
} | 3.68 |
flink_Task_cancelExecution | /**
* Cancels the task execution. If the task is already in a terminal state (such as FINISHED,
* CANCELED, FAILED), or if the task is already canceling this does nothing. Otherwise it sets
* the state to CANCELING, and, if the invokable code is running, starts an asynchronous thread
* that aborts that code.
*
* <p>This method never blocks.
*/
public void cancelExecution() {
LOG.info("Attempting to cancel task {} ({}).", taskNameWithSubtask, executionId);
cancelOrFailAndCancelInvokable(ExecutionState.CANCELING, null);
} | 3.68 |
hbase_ServerCommandLine_doMain | /**
* Parse and run the given command line. This will exit the JVM with the exit code returned from
* <code>run()</code>. If return code is 0, wait for atmost 30 seconds for all non-daemon threads
* to quit, otherwise exit the jvm
*/
public void doMain(String args[]) {
try {
int ret = ToolRunner.run(HBaseConfiguration.create(), this, args);
if (ret != 0) {
System.exit(ret);
}
// Return code is 0 here.
boolean forceStop = false;
long startTime = EnvironmentEdgeManager.currentTime();
while (isNonDaemonThreadRunning()) {
if (EnvironmentEdgeManager.currentTime() - startTime > 30 * 1000) {
forceStop = true;
break;
}
Thread.sleep(1000);
}
if (forceStop) {
LOG.error("Failed to stop all non-daemon threads, so terminating JVM");
System.exit(-1);
}
} catch (Exception e) {
LOG.error("Failed to run", e);
System.exit(-1);
}
} | 3.68 |
hadoop_DiskBalancerWorkItem_setBlocksCopied | /**
* Number of blocks copied so far.
*
* @param blocksCopied Blocks copied.
*/
public void setBlocksCopied(long blocksCopied) {
this.blocksCopied = blocksCopied;
} | 3.68 |
hadoop_TimelineEntity_setOtherInfo | /**
* Set the other info map to the given map of other information
*
* @param otherInfo
* a map of other information
*/
public void setOtherInfo(Map<String, Object> otherInfo) {
this.otherInfo = TimelineServiceHelper.mapCastToHashMap(otherInfo);
} | 3.68 |
flink_SqlConstraintEnforcement_symbol | /**
* Creates a parse-tree node representing an occurrence of this keyword at a particular position
* in the parsed text.
*/
public SqlLiteral symbol(SqlParserPos pos) {
return SqlLiteral.createSymbol(this, pos);
} | 3.68 |
pulsar_AbstractPushSource_getQueueLength | /**
* Get length of the queue that records are push onto.
* Users can override this method to customize the queue length
* @return queue length
*/
public int getQueueLength() {
return DEFAULT_QUEUE_LENGTH;
} | 3.68 |
flink_DefaultCheckpointPlanCalculator_collectTaskRunningStatus | /**
* Collects the task running status for each job vertex.
*
* @return The task running status for each job vertex.
*/
@VisibleForTesting
Map<JobVertexID, BitSet> collectTaskRunningStatus() {
Map<JobVertexID, BitSet> runningStatusByVertex = new HashMap<>();
for (ExecutionJobVertex vertex : jobVerticesInTopologyOrder) {
BitSet runningTasks = new BitSet(vertex.getTaskVertices().length);
for (int i = 0; i < vertex.getTaskVertices().length; ++i) {
if (!vertex.getTaskVertices()[i].getCurrentExecutionAttempt().isFinished()) {
runningTasks.set(i);
}
}
runningStatusByVertex.put(vertex.getJobVertexId(), runningTasks);
}
return runningStatusByVertex;
} | 3.68 |
zilla_HpackContext_valid | // @return true if the index is valid
// false otherwise
public boolean valid(int index)
{
return index != 0 && index < STATIC_TABLE.length + table.size();
} | 3.68 |
hbase_Union2_decodeA | /**
* Read an instance of the first type parameter from buffer {@code src}.
*/
public A decodeA(PositionedByteRange src) {
return (A) decode(src);
} | 3.68 |
hadoop_TypedBytesInput_readRawVector | /**
* Reads the raw bytes following a <code>Type.VECTOR</code> code.
* @return the obtained bytes sequence
* @throws IOException
*/
public byte[] readRawVector() throws IOException {
Buffer buffer = new Buffer();
int length = readVectorHeader();
buffer.append(new byte[] {
(byte) Type.VECTOR.code,
(byte) (0xff & (length >> 24)), (byte) (0xff & (length >> 16)),
(byte) (0xff & (length >> 8)), (byte) (0xff & length)
});
for (int i = 0; i < length; i++) {
buffer.append(readRaw());
}
return buffer.get();
} | 3.68 |
hadoop_OBSFileSystem_getPartSize | /**
* Return the part size for multipart upload used by {@link
* OBSBlockOutputStream}.
*
* @return the part size
*/
long getPartSize() {
return partSize;
} | 3.68 |
shardingsphere-elasticjob_FailoverService_updateFailoverComplete | /**
* Update sharding items status when failover execution complete.
*
* @param items sharding items of failover execution completed
*/
public void updateFailoverComplete(final Collection<Integer> items) {
for (int each : items) {
jobNodeStorage.removeJobNodeIfExisted(FailoverNode.getExecutionFailoverNode(each));
jobNodeStorage.removeJobNodeIfExisted(FailoverNode.getExecutingFailoverNode(each));
}
} | 3.68 |
AreaShop_FileManager_getFallbackRegionSettings | /**
* Get the default regions settings as provided by AreaShop (default.yml).
* @return YamlConfiguration with the default settings
*/
public YamlConfiguration getFallbackRegionSettings() {
return defaultConfigFallback;
} | 3.68 |
framework_VCalendarPanel_onTabOut | /**
* If true should be returned if the panel will not be used after this
* event.
*
* @param event
* @return
*/
protected boolean onTabOut(DomEvent<?> event) {
if (focusOutListener != null) {
return focusOutListener.onFocusOut(event);
}
return false;
} | 3.68 |
querydsl_ExpressionUtils_inAny | /**
* Create a {@code left in right or...} expression for each list
*
* @param <D> element type
* @param left
* @param lists
* @return a {@code left in right or...} expression
*/
public static <D> Predicate inAny(Expression<D> left, Iterable<? extends Collection<? extends D>> lists) {
BooleanBuilder rv = new BooleanBuilder();
for (Collection<? extends D> list : lists) {
rv.or(in(left, list));
}
return rv;
} | 3.68 |
hbase_Subprocedure_call | /**
* Execute the Subprocedure {@link #acquireBarrier()} and {@link #insideBarrier()} methods while
* keeping some state for other threads to access. This would normally be executed by the
* ProcedureMember when a acquire message comes from the coordinator. Rpcs are used to spend
* message back to the coordinator after different phases are executed. Any exceptions caught
* during the execution (except for InterruptedException) get converted and propagated to
* coordinator via {@link ProcedureMemberRpcs#sendMemberAborted( Subprocedure, ForeignException)}.
*/
@SuppressWarnings("finally")
@Override
final public Void call() {
LOG.debug("Starting subprocedure '" + barrierName + "' with timeout "
+ executionTimeoutTimer.getMaxTime() + "ms");
// start the execution timeout timer
executionTimeoutTimer.start();
try {
// start by checking for error first
rethrowException();
LOG.debug("Subprocedure '" + barrierName + "' starting 'acquire' stage");
acquireBarrier();
LOG.debug("Subprocedure '" + barrierName + "' locally acquired");
rethrowException();
// vote yes to coordinator about being prepared
rpcs.sendMemberAcquired(this);
LOG.debug("Subprocedure '" + barrierName + "' coordinator notified of 'acquire', waiting on"
+ " 'reached' or 'abort' from coordinator");
// wait for the procedure to reach global barrier before proceding
waitForReachedGlobalBarrier();
rethrowException(); // if Coordinator aborts, will bail from here with exception
// In traditional 2PC, if a member reaches this state the TX has been committed and the
// member is responsible for rolling forward and recovering and completing the subsequent
// operations in the case of failure. It cannot rollback.
//
// This implementation is not 2PC since it can still rollback here, and thus has different
// semantics.
LOG.debug("Subprocedure '" + barrierName + "' received 'reached' from coordinator.");
byte[] dataToCoordinator = insideBarrier();
LOG.debug("Subprocedure '" + barrierName + "' locally completed");
rethrowException();
// Ack that the member has executed and released local barrier
rpcs.sendMemberCompleted(this, dataToCoordinator);
LOG.debug("Subprocedure '" + barrierName + "' has notified controller of completion");
// make sure we didn't get an external exception
rethrowException();
} catch (Exception e) {
String msg = null;
if (e instanceof InterruptedException) {
msg = "Procedure '" + barrierName + "' aborting due to interrupt!"
+ " Likely due to pool shutdown.";
Thread.currentThread().interrupt();
} else if (e instanceof ForeignException) {
msg = "Subprocedure '" + barrierName + "' aborting due to a ForeignException!";
} else {
msg = "Subprocedure '" + barrierName + "' failed!";
}
cancel(msg, e);
LOG.debug("Subprocedure '" + barrierName + "' running cleanup.");
cleanup(e);
} finally {
releasedLocalBarrier.countDown();
// tell the timer we are done, if we get here successfully
executionTimeoutTimer.complete();
complete = true;
LOG.debug("Subprocedure '" + barrierName + "' completed.");
return null;
}
} | 3.68 |
hbase_VersionModel_setRESTVersion | /**
* @param version the REST gateway version string
*/
public void setRESTVersion(String version) {
this.restVersion = version;
} | 3.68 |
flink_DataStream_join | /**
* Creates a join operation. See {@link JoinedStreams} for an example of how the keys and window
* can be specified.
*/
public <T2> JoinedStreams<T, T2> join(DataStream<T2> otherStream) {
return new JoinedStreams<>(this, otherStream);
} | 3.68 |
framework_SelectionEvent_getSelected | /**
* A {@link Collection} of all the itemIds that are currently selected.
*
* @return a Collection of the itemIds that are currently selected
*/
public Set<Object> getSelected() {
return Collections.unmodifiableSet(newSelection);
} | 3.68 |
dubbo_SlidingWindow_values | /**
* Get aggregated value list for entire sliding window at the specified time.
* The list will only contain value from "valid" panes.
*
* @return aggregated value list for entire sliding window.
*/
public List<T> values(long timeMillis) {
if (timeMillis < 0) {
return new ArrayList<>();
}
List<T> result = new ArrayList<>(paneCount);
for (int idx = 0; idx < paneCount; idx++) {
Pane<T> pane = referenceArray.get(idx);
if (pane == null || isPaneDeprecated(timeMillis, pane)) {
continue;
}
result.add(pane.getValue());
}
return result;
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_helpGetBytesForState | /**
* Returns the byte array of serialized state.
*
* @param valuePointer pointer to value.
* @return byte array of serialized value.
*/
byte[] helpGetBytesForState(long valuePointer) {
Node node = getNodeSegmentAndOffset(valuePointer);
MemorySegment segment = node.nodeSegment;
int offsetInSegment = node.nodeOffset;
int valueLen = SkipListUtils.getValueLen(segment, offsetInSegment);
MemorySegment valueSegment = MemorySegmentFactory.allocateUnpooledSegment(valueLen);
segment.copyTo(
offsetInSegment + SkipListUtils.getValueMetaLen(), valueSegment, 0, valueLen);
return valueSegment.getArray();
} | 3.68 |
framework_FocusableFlexTable_focus | /*
* (non-Javadoc)
*
* @see com.vaadin.client.Focusable#focus()
*/
@Override
public void focus() {
setFocus(true);
} | 3.68 |
flink_HsSubpartitionConsumerMemoryDataManager_consumeBuffer | /**
* Check whether the head of {@link #unConsumedBuffers} is the buffer to be consumed. If so,
* return the buffer and backlog.
*
* @param toConsumeIndex index of buffer to be consumed.
* @param buffersToRecycle buffers to recycle if needed.
* @return If the head of {@link #unConsumedBuffers} is target, return optional of the buffer
* and backlog. Otherwise, return {@link Optional#empty()}.
*/
@SuppressWarnings("FieldAccessNotGuarded")
// Note that: callWithLock ensure that code block guarded by resultPartitionReadLock and
// subpartitionLock.
@Override
public Optional<ResultSubpartition.BufferAndBacklog> consumeBuffer(
int toConsumeIndex, Collection<Buffer> buffersToRecycle) {
Optional<Tuple2<HsBufferContext, Buffer.DataType>> bufferAndNextDataType =
callWithLock(
() -> {
if (!checkFirstUnConsumedBufferIndex(toConsumeIndex)) {
return Optional.empty();
}
HsBufferContext bufferContext =
checkNotNull(unConsumedBuffers.pollFirst());
tryDecreaseBacklog(bufferContext.getBuffer());
bufferContext.consumed(consumerId);
Buffer.DataType nextDataType =
peekNextToConsumeDataTypeInternal(toConsumeIndex + 1);
return Optional.of(Tuple2.of(bufferContext, nextDataType));
});
bufferAndNextDataType.ifPresent(
tuple ->
memoryDataManagerOperation.onBufferConsumed(
tuple.f0.getBufferIndexAndChannel()));
return bufferAndNextDataType.map(
tuple ->
new ResultSubpartition.BufferAndBacklog(
tuple.f0.getBuffer().readOnlySlice(),
getBacklog(),
tuple.f1,
toConsumeIndex));
} | 3.68 |
hbase_ProcedureExecutor_addChore | /**
* Add a chore procedure to the executor
* @param chore the chore to add
*/
public void addChore(@Nullable ProcedureInMemoryChore<TEnvironment> chore) {
if (chore == null) {
return;
}
chore.setState(ProcedureState.WAITING_TIMEOUT);
timeoutExecutor.add(chore);
} | 3.68 |
hbase_ScanQueryMatcher_preCheck | /**
* Check before the delete logic.
* @return null means continue.
*/
protected final MatchCode preCheck(Cell cell) {
if (currentRow == null) {
// Since the curCell is null it means we are already sure that we have moved over to the next
// row
return MatchCode.DONE;
}
// if row key is changed, then we know that we have moved over to the next row
if (rowComparator.compareRows(currentRow, cell) != 0) {
return MatchCode.DONE;
}
if (this.columns.done()) {
return MatchCode.SEEK_NEXT_ROW;
}
long timestamp = cell.getTimestamp();
// check if this is a fake cell. The fake cell is an optimization, we should make the scanner
// seek to next column or next row. See StoreFileScanner.requestSeek for more details.
// check for early out based on timestamp alone
if (timestamp == PrivateConstants.OLDEST_TIMESTAMP || columns.isDone(timestamp)) {
return columns.getNextRowOrNextColumn(cell);
}
// check if the cell is expired by cell TTL
if (isCellTTLExpired(cell, this.oldestUnexpiredTS, this.now)) {
return MatchCode.SKIP;
}
return null;
} | 3.68 |
hbase_ClusterMetrics_getRequestCount | /** Returns the number of requests since last report */
default long getRequestCount() {
return getLiveServerMetrics().entrySet().stream()
.flatMap(v -> v.getValue().getRegionMetrics().values().stream())
.mapToLong(RegionMetrics::getRequestCount).sum();
} | 3.68 |
flink_HiveParserTypeCheckCtx_getSubqueryToRelNode | /** @return the outerRR */
public Map<HiveParserASTNode, RelNode> getSubqueryToRelNode() {
return subqueryToRelNode;
} | 3.68 |
dubbo_HttpHeaderUtil_createAttachments | /**
* convert attachment to Map<String, List<String>>
*
* @param attachmentMap
* @return
*/
public static Map<String, List<String>> createAttachments(Map<String, Object> attachmentMap) {
Map<String, List<String>> attachments = new HashMap<>();
int size = 0;
for (Map.Entry<String, Object> entry : attachmentMap.entrySet()) {
String key = entry.getKey();
String value = String.valueOf(entry.getValue());
if (value != null) {
size += value.getBytes(StandardCharsets.UTF_8).length;
}
List<String> strings = attachments.get(key);
if (strings == null) {
strings = new ArrayList<>();
attachments.put(key, strings);
}
strings.add(value);
}
return attachments;
} | 3.68 |
framework_DateCell_removeEmphasisStyle | /**
* @since 7.2
*/
public void removeEmphasisStyle(Element elementOver) {
removeEmphasisStyle(DOM.asOld(elementOver));
} | 3.68 |
hbase_ProcedureExecutor_getCurrentRunTime | /** Returns the time since the current procedure is running */
public long getCurrentRunTime() {
return EnvironmentEdgeManager.currentTime() - executionStartTime.get();
} | 3.68 |
hbase_LruAdaptiveBlockCache_runEviction | /**
* Multi-threaded call to run the eviction process.
*/
private void runEviction() {
if (evictionThread == null) {
evict();
} else {
evictionThread.evict();
}
} | 3.68 |
flink_LocalBufferPool_requestMemorySegmentFromGlobalWhenAvailable | /**
* Tries to obtain a buffer from global pool as soon as one pool is available. Note that
* multiple {@link LocalBufferPool}s might wait on the future of the global pool, hence this
* method double-check if a new buffer is really needed at the time it becomes available.
*/
@GuardedBy("availableMemorySegments")
private void requestMemorySegmentFromGlobalWhenAvailable() {
assert Thread.holdsLock(availableMemorySegments);
checkState(
!requestingNotificationOfGlobalPoolAvailable,
"local buffer pool is already in the state of requesting memory segment from global when it is available.");
requestingNotificationOfGlobalPoolAvailable = true;
assertNoException(
networkBufferPool.getAvailableFuture().thenRun(this::onGlobalPoolAvailable));
} | 3.68 |
hbase_NamedQueueRecorder_getEventCount | // must be power of 2 for disruptor ringbuffer
private int getEventCount(int eventCount) {
Preconditions.checkArgument(eventCount >= 0, "hbase.namedqueue.ringbuffer.size must be > 0");
int floor = Integer.highestOneBit(eventCount);
if (floor == eventCount) {
return floor;
}
// max capacity is 1 << 30
if (floor >= 1 << 29) {
return 1 << 30;
}
return floor << 1;
} | 3.68 |
hbase_TableMapReduceUtil_setScannerCaching | /**
* Sets the number of rows to return and cache with each scanner iteration. Higher caching values
* will enable faster mapreduce jobs at the expense of requiring more heap to contain the cached
* rows.
* @param job The current job to adjust.
* @param batchSize The number of rows to return in batch with each scanner iteration.
*/
public static void setScannerCaching(Job job, int batchSize) {
job.getConfiguration().setInt("hbase.client.scanner.caching", batchSize);
} | 3.68 |
flink_StateMetadata_validateStateMetadata | /**
* Validate deserialized state metadata from json content of {@link
* org.apache.flink.table.api.CompiledPlan}.
*
* @param inputNumOfOperator the input number of the stateful operator that the exec node to
* translate to.
* @param stateMetadataList the deserialized state metadata list.
*/
private static void validateStateMetadata(
int inputNumOfOperator, List<StateMetadata> stateMetadataList) {
// the state metadata list size should be equal to the input number of the operator
Preconditions.checkArgument(
inputNumOfOperator == stateMetadataList.size(),
String.format(
"The compiled plan contains inconsistent state metadata configuration.\n"
+ "Received %s state meta for a %sInputStreamOperator.",
stateMetadataList.size(),
inputNumOfOperator > 2
? "Multiple"
: inputNumOfOperator == 2 ? "Two" : "One"));
// the state index should not contain duplicates, and should start from 0 to inputNum - 1
List<Integer> normalizedIndexList =
stateMetadataList.stream()
.map(StateMetadata::getStateIndex)
.sorted()
.distinct()
.collect(Collectors.toList());
Preconditions.checkArgument(
normalizedIndexList.size() == inputNumOfOperator
&& normalizedIndexList.get(0) == 0
&& normalizedIndexList.get(inputNumOfOperator - 1)
== inputNumOfOperator - 1,
"The compiled plan contains inconsistent state metadata configuration.\n"
+ "The state index should not contain duplicates and start from 0 (inclusive) "
+ "and monotonically increase to the input size (exclusive) of the operator.");
} | 3.68 |
flink_DateTimeUtils_parseDate | /** Returns the epoch days since 1970-01-01. */
public static int parseDate(String dateStr, String fromFormat) {
// It is OK to use UTC, we just want get the epoch days
// TODO use offset, better performance
long ts = internalParseTimestampMillis(dateStr, fromFormat, TimeZone.getTimeZone("UTC"));
ZoneId zoneId = ZoneId.of("UTC");
Instant instant = Instant.ofEpochMilli(ts);
ZonedDateTime zdt = ZonedDateTime.ofInstant(instant, zoneId);
return ymdToUnixDate(zdt.getYear(), zdt.getMonthValue(), zdt.getDayOfMonth());
} | 3.68 |
hadoop_JobBase_setLongValue | /**
* Set the given counter to the given value
*
* @param name
* the counter name
* @param value
* the value for the counter
*/
protected void setLongValue(Object name, long value) {
this.longCounters.put(name, Long.valueOf(value));
} | 3.68 |
hibernate-validator_AnnotationApiHelper_keepLowestTypePerHierarchy | /**
* <p>
* Returns a set containing the "lowest" type per hierarchy contained in the
* input set. The following examples shall demonstrate the behavior.
* </p>
* <ul>
* <li>
* Input: {@code String}; Output: {@code String}</li>
* <li>
* Input: {@code Object}, {@code String}; Output:
* {@code String}</li>
* <li>
* Input: {@code Object}, {@code Collection}, {@code List};
* Output: {@code List}</li>
* <li>
* Input: {@code Collection}, {@code Set}, {@code List};
* Output: {@code List}, {@code Set}</li>
* </ul>
*
* @param types A set of type mirrors.
*
* @return A set with the lowest types per hierarchy or null, if the input
* set was null.
*/
public Set<TypeMirror> keepLowestTypePerHierarchy(Set<TypeMirror> types) {
if ( types == null ) {
return null;
}
Set<TypeMirror> theValue = CollectionHelper.newHashSet();
for ( TypeMirror typeMirror1 : types ) {
boolean foundSubType = false;
for ( TypeMirror typeMirror2 : types ) {
if ( !typeUtils.isSameType( typeMirror2, typeMirror1 ) && typeUtils.isAssignable(
typeMirror2, typeMirror1
) ) {
foundSubType = true;
continue;
}
}
if ( !foundSubType ) {
theValue.add( typeMirror1 );
}
}
return theValue;
} | 3.68 |
pulsar_TimeAverageMessageData_getUpdatedValue | // Update the average of a sample using the number of samples, the previous
// average, and a new sample.
private double getUpdatedValue(final double oldAverage, final double newSample) {
// Note that for numSamples == 1, this returns newSample.
// This ensures that default stats get overwritten after the first
// update.
return ((numSamples - 1) * oldAverage + newSample) / numSamples;
} | 3.68 |
dubbo_TTable_padding | /**
* set padding
*
* @param padding padding
*/
public TTable padding(int padding) {
this.padding = padding;
return this;
} | 3.68 |
framework_ApplicationConfiguration_getHeartbeatInterval | /**
* @return The interval in seconds between heartbeat requests, or a
* non-positive number if heartbeat is disabled.
*/
public int getHeartbeatInterval() {
return heartbeatInterval;
} | 3.68 |
framework_InMemoryDataProvider_setSortOrder | /**
* Sets the property and direction to use as the default sorting for this
* data provider. This overrides the sorting set by any other method that
* manipulates the default sorting of this data provider.
* <p>
* The default sorting is used if the query defines no sorting. The default
* sorting is also used to determine the ordering of items that are
* considered equal by the sorting defined in the query.
*
* @see #setSortComparator(SerializableComparator)
* @see #addSortOrder(ValueProvider, SortDirection)
*
* @param valueProvider
* the value provider that defines the property do sort by, not
* <code>null</code>
* @param sortDirection
* the sort direction to use, not <code>null</code>
*/
public default <V extends Comparable<? super V>> void setSortOrder(
ValueProvider<T, V> valueProvider, SortDirection sortDirection) {
setSortComparator(InMemoryDataProviderHelpers
.propertyComparator(valueProvider, sortDirection));
} | 3.68 |
hudi_HoodieAsyncService_shutdown | /**
* Request shutdown either forcefully or gracefully. Graceful shutdown allows the service to finish up the current
* round of work and shutdown. For graceful shutdown, it waits till the service is shutdown
*
* @param force Forcefully shutdown
*/
public void shutdown(boolean force) {
if (!shutdownRequested || force) {
shutdownRequested = true;
shutdown = true;
if (executor != null) {
if (force) {
executor.shutdownNow();
} else {
executor.shutdown();
try {
// Wait for some max time after requesting shutdown
executor.awaitTermination(24, TimeUnit.HOURS);
} catch (InterruptedException ie) {
LOG.error("Interrupted while waiting for shutdown", ie);
}
}
}
}
} | 3.68 |
framework_Page_getWindowName | /**
* Gets the window.name value of the browser window of this page.
*
* @since 7.2
*
* @return the window name, <code>null</code> if the name is not known
*/
public String getWindowName() {
return windowName;
} | 3.68 |
framework_Tree_paintContent | /**
* Paints any needed component-specific things to the given UIDL stream.
*
* @see AbstractComponent#paintContent(PaintTarget)
*/
@Override
public void paintContent(PaintTarget target) throws PaintException {
initialPaint = false;
if (partialUpdate) {
target.addAttribute("partialUpdate", true);
target.addAttribute("rootKey", itemIdMapper.key(expandedItemId));
} else {
getCaptionChangeListener().clear();
// The tab ordering number
if (getTabIndex() > 0) {
target.addAttribute("tabindex", getTabIndex());
}
// Paint tree attributes
if (isSelectable()) {
target.addAttribute("selectmode",
(isMultiSelect() ? "multi" : "single"));
if (isMultiSelect()) {
target.addAttribute("multiselectmode",
multiSelectMode.toString());
}
} else {
target.addAttribute("selectmode", "none");
}
if (isNewItemsAllowed()) {
target.addAttribute("allownewitem", true);
}
if (isNullSelectionAllowed()) {
target.addAttribute("nullselect", true);
}
if (dragMode != TreeDragMode.NONE) {
target.addAttribute("dragMode", dragMode.ordinal());
}
if (isHtmlContentAllowed()) {
target.addAttribute(TreeConstants.ATTRIBUTE_HTML_ALLOWED, true);
}
}
// Initialize variables
final Set<Action> actionSet = new LinkedHashSet<Action>();
// rendered selectedKeys
LinkedList<String> selectedKeys = new LinkedList<String>();
// Iterates through hierarchical tree using a stack of iterators
final Stack<Iterator<?>> iteratorStack = new Stack<Iterator<?>>();
Collection<?> ids;
if (partialUpdate) {
ids = getChildren(expandedItemId);
} else {
ids = rootItemIds();
}
if (ids != null) {
iteratorStack.push(ids.iterator());
}
/*
* Body actions - Actions which has the target null and can be invoked
* by right clicking on the Tree body
*/
if (actionHandlers != null) {
final List<String> keys = new ArrayList<String>();
for (Handler ah : actionHandlers) {
// Getting action for the null item, which in this case
// means the body item
final Action[] aa = ah.getActions(null, this);
if (aa != null) {
for (int ai = 0; ai < aa.length; ai++) {
final String akey = actionMapper.key(aa[ai]);
actionSet.add(aa[ai]);
keys.add(akey);
}
}
}
target.addAttribute("alb", keys.toArray());
}
while (!iteratorStack.isEmpty()) {
// Gets the iterator for current tree level
final Iterator<?> i = iteratorStack.peek();
// If the level is finished, back to previous tree level
if (!i.hasNext()) {
// Removes used iterator from the stack
iteratorStack.pop();
// Closes node
if (!iteratorStack.isEmpty()) {
target.endTag("node");
}
} else {
// Adds the item on current level
final Object itemId = i.next();
// Starts the item / node
final boolean isNode = areChildrenAllowed(itemId);
if (isNode) {
target.startTag("node");
} else {
target.startTag("leaf");
}
if (itemStyleGenerator != null) {
String stylename = itemStyleGenerator.getStyle(this,
itemId);
if (stylename != null) {
target.addAttribute(TreeConstants.ATTRIBUTE_NODE_STYLE,
stylename);
}
}
if (itemDescriptionGenerator != null) {
String description = itemDescriptionGenerator
.generateDescription(this, itemId, null);
if (description != null && !description.equals("")) {
target.addAttribute("descr", description);
}
}
// Adds the attributes
target.addAttribute(TreeConstants.ATTRIBUTE_NODE_CAPTION,
getItemCaption(itemId));
final Resource icon = getItemIcon(itemId);
if (icon != null) {
target.addAttribute(TreeConstants.ATTRIBUTE_NODE_ICON,
getItemIcon(itemId));
target.addAttribute(TreeConstants.ATTRIBUTE_NODE_ICON_ALT,
getItemIconAlternateText(itemId));
}
final String key = itemIdMapper.key(itemId);
target.addAttribute("key", key);
if (isSelected(itemId)) {
target.addAttribute("selected", true);
selectedKeys.add(key);
}
if (areChildrenAllowed(itemId) && isExpanded(itemId)) {
target.addAttribute("expanded", true);
}
// Add caption change listener
getCaptionChangeListener().addNotifierForItem(itemId);
// Actions
if (actionHandlers != null) {
final List<String> keys = new ArrayList<String>();
for (Action.Handler ah : actionHandlers) {
final Action[] aa = ah.getActions(itemId, this);
if (aa != null) {
for (int ai = 0; ai < aa.length; ai++) {
final String akey = actionMapper.key(aa[ai]);
actionSet.add(aa[ai]);
keys.add(akey);
}
}
}
target.addAttribute("al", keys.toArray());
}
// Adds the children if expanded, or close the tag
if (isExpanded(itemId) && hasChildren(itemId)
&& areChildrenAllowed(itemId)) {
iteratorStack.push(getChildren(itemId).iterator());
} else {
if (isNode) {
target.endTag("node");
} else {
target.endTag("leaf");
}
}
}
}
// Actions
if (!actionSet.isEmpty()) {
target.addVariable(this, "action", "");
target.startTag("actions");
final Iterator<Action> i = actionSet.iterator();
while (i.hasNext()) {
final Action a = i.next();
target.startTag("action");
if (a.getCaption() != null) {
target.addAttribute(TreeConstants.ATTRIBUTE_ACTION_CAPTION,
a.getCaption());
}
if (a.getIcon() != null) {
target.addAttribute(TreeConstants.ATTRIBUTE_ACTION_ICON,
a.getIcon());
}
target.addAttribute("key", actionMapper.key(a));
target.endTag("action");
}
target.endTag("actions");
}
if (partialUpdate) {
partialUpdate = false;
} else {
// Selected
target.addVariable(this, "selected",
selectedKeys.toArray(new String[selectedKeys.size()]));
// Expand and collapse
target.addVariable(this, "expand", new String[] {});
target.addVariable(this, "collapse", new String[] {});
// New items
target.addVariable(this, "newitem", new String[] {});
if (dropHandler != null) {
dropHandler.getAcceptCriterion().paint(target);
}
}
} | 3.68 |
hadoop_OBSDataBlocks_position | /**
* Get the current buffer position.
*
* @return the buffer position
*/
public synchronized int position() {
return byteBuffer.position();
} | 3.68 |
framework_Calendar_setHandler | /**
* Set the handler for the given type information. Mirrors
* {@link #addListener(String, Class, Object, Method) addListener} from
* AbstractComponent
*
* @param eventId
* A unique id for the event. Usually one of
* {@link CalendarEventId}
* @param eventType
* The class of the event, most likely a subclass of
* {@link CalendarComponentEvent}
* @param listener
* A listener that listens to the given event
* @param listenerMethod
* The method on the lister to call when the event is triggered
*/
protected void setHandler(String eventId, Class<?> eventType,
SerializableEventListener listener, Method listenerMethod) {
if (handlers.get(eventId) != null) {
removeListener(eventId, eventType, handlers.get(eventId));
handlers.remove(eventId);
}
if (listener != null) {
addListener(eventId, eventType, listener, listenerMethod);
handlers.put(eventId, listener);
}
} | 3.68 |
zxing_Encoder_encode | /**
* Encodes the given binary content as an Aztec symbol
*
* @param data input data string
* @param minECCPercent minimal percentage of error check words (According to ISO/IEC 24778:2008,
* a minimum of 23% + 3 words is recommended)
* @param userSpecifiedLayers if non-zero, a user-specified value for the number of layers
* @param charset character set to mark using ECI; if null, no ECI code will be inserted, and the
* default encoding of ISO/IEC 8859-1 will be assuming by readers.
* @return Aztec symbol matrix with metadata
*/
public static AztecCode encode(byte[] data, int minECCPercent, int userSpecifiedLayers, Charset charset) {
// High-level encode
BitArray bits = new HighLevelEncoder(data, charset).encode();
// stuff bits and choose symbol size
int eccBits = bits.getSize() * minECCPercent / 100 + 11;
int totalSizeBits = bits.getSize() + eccBits;
boolean compact;
int layers;
int totalBitsInLayer;
int wordSize;
BitArray stuffedBits;
if (userSpecifiedLayers != DEFAULT_AZTEC_LAYERS) {
compact = userSpecifiedLayers < 0;
layers = Math.abs(userSpecifiedLayers);
if (layers > (compact ? MAX_NB_BITS_COMPACT : MAX_NB_BITS)) {
throw new IllegalArgumentException(
String.format("Illegal value %s for layers", userSpecifiedLayers));
}
totalBitsInLayer = totalBitsInLayer(layers, compact);
wordSize = WORD_SIZE[layers];
int usableBitsInLayers = totalBitsInLayer - (totalBitsInLayer % wordSize);
stuffedBits = stuffBits(bits, wordSize);
if (stuffedBits.getSize() + eccBits > usableBitsInLayers) {
throw new IllegalArgumentException("Data to large for user specified layer");
}
if (compact && stuffedBits.getSize() > wordSize * 64) {
// Compact format only allows 64 data words, though C4 can hold more words than that
throw new IllegalArgumentException("Data to large for user specified layer");
}
} else {
wordSize = 0;
stuffedBits = null;
// We look at the possible table sizes in the order Compact1, Compact2, Compact3,
// Compact4, Normal4,... Normal(i) for i < 4 isn't typically used since Compact(i+1)
// is the same size, but has more data.
for (int i = 0; ; i++) {
if (i > MAX_NB_BITS) {
throw new IllegalArgumentException("Data too large for an Aztec code");
}
compact = i <= 3;
layers = compact ? i + 1 : i;
totalBitsInLayer = totalBitsInLayer(layers, compact);
if (totalSizeBits > totalBitsInLayer) {
continue;
}
// [Re]stuff the bits if this is the first opportunity, or if the
// wordSize has changed
if (stuffedBits == null || wordSize != WORD_SIZE[layers]) {
wordSize = WORD_SIZE[layers];
stuffedBits = stuffBits(bits, wordSize);
}
int usableBitsInLayers = totalBitsInLayer - (totalBitsInLayer % wordSize);
if (compact && stuffedBits.getSize() > wordSize * 64) {
// Compact format only allows 64 data words, though C4 can hold more words than that
continue;
}
if (stuffedBits.getSize() + eccBits <= usableBitsInLayers) {
break;
}
}
}
BitArray messageBits = generateCheckWords(stuffedBits, totalBitsInLayer, wordSize);
// generate mode message
int messageSizeInWords = stuffedBits.getSize() / wordSize;
BitArray modeMessage = generateModeMessage(compact, layers, messageSizeInWords);
// allocate symbol
int baseMatrixSize = (compact ? 11 : 14) + layers * 4; // not including alignment lines
int[] alignmentMap = new int[baseMatrixSize];
int matrixSize;
if (compact) {
// no alignment marks in compact mode, alignmentMap is a no-op
matrixSize = baseMatrixSize;
for (int i = 0; i < alignmentMap.length; i++) {
alignmentMap[i] = i;
}
} else {
matrixSize = baseMatrixSize + 1 + 2 * ((baseMatrixSize / 2 - 1) / 15);
int origCenter = baseMatrixSize / 2;
int center = matrixSize / 2;
for (int i = 0; i < origCenter; i++) {
int newOffset = i + i / 15;
alignmentMap[origCenter - i - 1] = center - newOffset - 1;
alignmentMap[origCenter + i] = center + newOffset + 1;
}
}
BitMatrix matrix = new BitMatrix(matrixSize);
// draw data bits
for (int i = 0, rowOffset = 0; i < layers; i++) {
int rowSize = (layers - i) * 4 + (compact ? 9 : 12);
for (int j = 0; j < rowSize; j++) {
int columnOffset = j * 2;
for (int k = 0; k < 2; k++) {
if (messageBits.get(rowOffset + columnOffset + k)) {
matrix.set(alignmentMap[i * 2 + k], alignmentMap[i * 2 + j]);
}
if (messageBits.get(rowOffset + rowSize * 2 + columnOffset + k)) {
matrix.set(alignmentMap[i * 2 + j], alignmentMap[baseMatrixSize - 1 - i * 2 - k]);
}
if (messageBits.get(rowOffset + rowSize * 4 + columnOffset + k)) {
matrix.set(alignmentMap[baseMatrixSize - 1 - i * 2 - k], alignmentMap[baseMatrixSize - 1 - i * 2 - j]);
}
if (messageBits.get(rowOffset + rowSize * 6 + columnOffset + k)) {
matrix.set(alignmentMap[baseMatrixSize - 1 - i * 2 - j], alignmentMap[i * 2 + k]);
}
}
}
rowOffset += rowSize * 8;
}
// draw mode message
drawModeMessage(matrix, compact, matrixSize, modeMessage);
// draw alignment marks
if (compact) {
drawBullsEye(matrix, matrixSize / 2, 5);
} else {
drawBullsEye(matrix, matrixSize / 2, 7);
for (int i = 0, j = 0; i < baseMatrixSize / 2 - 1; i += 15, j += 16) {
for (int k = (matrixSize / 2) & 1; k < matrixSize; k += 2) {
matrix.set(matrixSize / 2 - j, k);
matrix.set(matrixSize / 2 + j, k);
matrix.set(k, matrixSize / 2 - j);
matrix.set(k, matrixSize / 2 + j);
}
}
}
AztecCode aztec = new AztecCode();
aztec.setCompact(compact);
aztec.setSize(matrixSize);
aztec.setLayers(layers);
aztec.setCodeWords(messageSizeInWords);
aztec.setMatrix(matrix);
return aztec;
} | 3.68 |
flink_ExternalResourceUtils_createStaticExternalResourceInfoProviderFromConfig | /**
* Instantiate {@link StaticExternalResourceInfoProvider} for all of enabled external resources.
*/
public static ExternalResourceInfoProvider createStaticExternalResourceInfoProviderFromConfig(
Configuration configuration, PluginManager pluginManager) {
final Map<String, Long> externalResourceAmountMap =
getExternalResourceAmountMap(configuration);
LOG.info("Enabled external resources: {}", externalResourceAmountMap.keySet());
return createStaticExternalResourceInfoProvider(
externalResourceAmountMap,
externalResourceDriversFromConfig(configuration, pluginManager));
} | 3.68 |
flink_Tuple7_toString | /**
* Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6), where
* the individual fields are the value returned by calling {@link Object#toString} on that
* field.
*
* @return The string representation of the tuple.
*/
@Override
public String toString() {
return "("
+ StringUtils.arrayAwareToString(this.f0)
+ ","
+ StringUtils.arrayAwareToString(this.f1)
+ ","
+ StringUtils.arrayAwareToString(this.f2)
+ ","
+ StringUtils.arrayAwareToString(this.f3)
+ ","
+ StringUtils.arrayAwareToString(this.f4)
+ ","
+ StringUtils.arrayAwareToString(this.f5)
+ ","
+ StringUtils.arrayAwareToString(this.f6)
+ ")";
} | 3.68 |
hudi_HoodieInstantTimeGenerator_getInstantForDateString | /**
* Creates an instant string given a valid date-time string.
* @param dateString A date-time string in the format yyyy-MM-dd HH:mm:ss[.SSS]
* @return A timeline instant
* @throws ParseException If we cannot parse the date string
*/
public static String getInstantForDateString(String dateString) {
try {
return getInstantFromTemporalAccessor(LocalDateTime.parse(dateString, MILLIS_GRANULARITY_DATE_FORMATTER));
} catch (Exception e) {
// Attempt to add the milliseconds in order to complete parsing
return getInstantFromTemporalAccessor(LocalDateTime.parse(
String.format("%s.%s", dateString, DEFAULT_MILLIS_EXT), MILLIS_GRANULARITY_DATE_FORMATTER));
}
} | 3.68 |
hbase_SnapshotManager_snapshotTable | /**
* Take a snapshot using the specified handler. On failure the snapshot temporary working
* directory is removed. NOTE: prepareToTakeSnapshot() called before this one takes care of the
* rejecting the snapshot request if the table is busy with another snapshot/restore operation.
* @param snapshot the snapshot description
* @param handler the snapshot handler
*/
private synchronized void snapshotTable(SnapshotDescription snapshot,
final TakeSnapshotHandler handler) throws IOException {
try {
handler.prepare();
this.executorService.submit(handler);
this.snapshotHandlers.put(TableName.valueOf(snapshot.getTable()), handler);
} catch (Exception e) {
// cleanup the working directory by trying to delete it from the fs.
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir,
master.getConfiguration());
FileSystem workingDirFs = workingDir.getFileSystem(master.getConfiguration());
try {
if (!workingDirFs.delete(workingDir, true)) {
LOG.error("Couldn't delete working directory (" + workingDir + " for snapshot:"
+ ClientSnapshotDescriptionUtils.toString(snapshot));
}
} catch (IOException e1) {
LOG.error("Couldn't delete working directory (" + workingDir + " for snapshot:"
+ ClientSnapshotDescriptionUtils.toString(snapshot));
}
// fail the snapshot
throw new SnapshotCreationException("Could not build snapshot handler", e,
ProtobufUtil.createSnapshotDesc(snapshot));
}
} | 3.68 |
hibernate-validator_ReflectionHelper_isIndexable | /**
* Indicates if the type is considered indexable (ie is a {@code List}, an array or a {@code Map}).
* <p>
* Note that it does not include {@code Set}s as they are not indexable.
*
* @param type the type to inspect.
*
* @return Returns true if the type is indexable.
*/
public static boolean isIndexable(Type type) {
return isList( type ) ||
isMap( type ) ||
TypeHelper.isArray( type );
} | 3.68 |
morf_MergeStatementBuilder_getTable | /**
* Gets the table to merge the data into.
*
* @return the table.
*/
TableReference getTable() {
return table;
} | 3.68 |
framework_Table_rowHeadersAreEnabled | /**
* Checks whether row headers are visible.
*
* @return {@code false} if row headers are hidden, {@code true} otherwise
* @since 7.3.9
*/
protected boolean rowHeadersAreEnabled() {
return getRowHeaderMode() != RowHeaderMode.HIDDEN;
} | 3.68 |
hudi_HoodieTableMetaClient_initTable | /**
* Init Table with the properties build by this builder.
*
* @param configuration The hadoop config.
* @param basePath The base path for hoodie table.
*/
public HoodieTableMetaClient initTable(Configuration configuration, String basePath)
throws IOException {
return HoodieTableMetaClient.initTableAndGetMetaClient(configuration, basePath, build());
} | 3.68 |
framework_VTree_toString | /*
* (non-Javadoc)
*
* @see com.google.gwt.user.client.ui.UIObject#toString()
*/
@Override
public String toString() {
return nodeCaptionSpan.getInnerText();
} | 3.68 |
hbase_MetricsAssignmentManager_getAssignProcMetrics | /** Returns Set of common metrics for assign procedure */
public ProcedureMetrics getAssignProcMetrics() {
return assignProcMetrics;
} | 3.68 |
hadoop_OBSPosixBucketUtils_fsDelete | // Recursively delete a folder that might be not empty.
static boolean fsDelete(final OBSFileSystem owner, final FileStatus status,
final boolean recursive)
throws IOException, ObsException {
long startTime = System.currentTimeMillis();
long threadId = Thread.currentThread().getId();
Path f = status.getPath();
String key = OBSCommonUtils.pathToKey(owner, f);
if (!status.isDirectory()) {
LOG.debug("delete: Path is a file");
trashObjectIfNeed(owner, key);
} else {
LOG.debug("delete: Path is a directory: {} - recursive {}", f,
recursive);
key = OBSCommonUtils.maybeAddTrailingSlash(key);
boolean isEmptyDir = OBSCommonUtils.isFolderEmpty(owner, key);
if (key.equals("")) {
return OBSCommonUtils.rejectRootDirectoryDelete(
owner.getBucket(), isEmptyDir, recursive);
}
if (!recursive && !isEmptyDir) {
LOG.warn("delete: Path is not empty: {} - recursive {}", f,
recursive);
throw new PathIsNotEmptyDirectoryException(f.toString());
}
if (isEmptyDir) {
LOG.debug(
"delete: Deleting fake empty directory {} - recursive {}",
f, recursive);
OBSCommonUtils.deleteObject(owner, key);
} else {
LOG.debug(
"delete: Deleting objects for directory prefix {} to "
+ "delete - recursive {}", f, recursive);
trashFolderIfNeed(owner, key, f);
}
}
long endTime = System.currentTimeMillis();
LOG.debug("delete Path:{} thread:{}, timeUsedInMilliSec:{}", f,
threadId, endTime - startTime);
return true;
} | 3.68 |
hadoop_PeriodicRLESparseResourceAllocation_addInterval | /**
* Add resource for the specified interval. This function will be used by
* {@link InMemoryPlan} while placing reservations between 0 and timePeriod.
* The interval may include 0, but the end time must be strictly less than
* timePeriod.
*
* @param interval {@link ReservationInterval} to which the specified resource
* is to be added.
* @param resource {@link Resource} to be added to the interval specified.
* @return true if addition is successful, false otherwise
*/
public boolean addInterval(ReservationInterval interval, Resource resource) {
long startTime = interval.getStartTime();
long endTime = interval.getEndTime();
if (startTime >= 0 && endTime > startTime && endTime <= timePeriod) {
return super.addInterval(interval, resource);
} else {
LOG.info("Cannot set capacity beyond end time: " + timePeriod + " was ("
+ interval.toString() + ")");
return false;
}
} | 3.68 |
morf_XmlDataSetProducer_remove | /**
* @see java.util.Iterator#remove()
*/
@Override
public void remove() {
throw new UnsupportedOperationException("Cannot remove item from a record iterator");
} | 3.68 |
hadoop_PeriodicService_stopPeriodic | /**
* Stop the periodic task.
*/
protected synchronized void stopPeriodic() {
if (this.isRunning) {
LOG.info("{} is shutting down", this.serviceName);
this.isRunning = false;
this.scheduler.shutdownNow();
}
} | 3.68 |
hadoop_NodeLabelsUtils_getNodeAttributesByName | /**
* Returns a set of node attributes whose name exists in the provided
* <code>attributeNames</code> list.
*
* @param attributeNames For this given list of attribute names get the
* cluster NodeAttributes
* @param clusterNodeAttributes set of node Attributes
* @return set of Node Attributes which maps to the give attributes names
*/
public static Set <NodeAttribute> getNodeAttributesByName(
Set<String> attributeNames, Set<NodeAttribute> clusterNodeAttributes) {
return clusterNodeAttributes.stream()
.filter(attribute -> attributeNames
.contains(attribute.getAttributeKey().getAttributeName()))
.collect(Collectors.toSet());
} | 3.68 |
framework_Panel_addClickListener | /**
* Add a click listener to the Panel. The listener is called whenever the
* user clicks inside the Panel. Also when the click targets a component
* inside the Panel, provided the targeted component does not prevent the
* click event from propagating.
*
* @see Registration
*
* @param listener
* The listener to add, not null
* @return a registration object for removing the listener
* @since 8.0
*/
public Registration addClickListener(ClickListener listener) {
return addListener(EventId.CLICK_EVENT_IDENTIFIER, ClickEvent.class,
listener, ClickListener.clickMethod);
} | 3.68 |
hbase_ChoreService_scheduleChore | /**
* Schedule a chore.
* @param chore Chore to be scheduled. If the chore is already scheduled with another ChoreService
* instance, that schedule will be cancelled (i.e. a Chore can only ever be scheduled
* with a single ChoreService instance).
* @return true when the chore was successfully scheduled. false when the scheduling failed
* (typically occurs when a chore is scheduled during shutdown of service)
*/
public boolean scheduleChore(ScheduledChore chore) {
if (chore == null) {
return false;
}
// always lock chore first to prevent dead lock
synchronized (chore) {
synchronized (this) {
try {
// Chores should only ever be scheduled with a single ChoreService. If the choreService
// is changing, cancel any existing schedules of this chore.
if (chore.getChoreService() == this) {
LOG.warn("Chore {} has already been scheduled with us", chore);
return false;
}
if (chore.getPeriod() <= 0) {
LOG.info("Chore {} is disabled because its period is not positive.", chore);
return false;
}
LOG.info("Chore {} is enabled.", chore);
if (chore.getChoreService() != null) {
LOG.info("Cancel chore {} from its previous service", chore);
chore.getChoreService().cancelChore(chore);
}
chore.setChoreService(this);
ScheduledFuture<?> future =
scheduler.scheduleAtFixedRate(TraceUtil.tracedRunnable(chore, chore.getName()),
chore.getInitialDelay(), chore.getPeriod(), chore.getTimeUnit());
scheduledChores.put(chore, future);
return true;
} catch (Exception e) {
LOG.error("Could not successfully schedule chore: {}", chore.getName(), e);
return false;
}
}
}
} | 3.68 |
hbase_RawByte_encodeByte | /**
* Write instance {@code val} into buffer {@code buff}.
*/
public int encodeByte(byte[] buff, int offset, byte val) {
return Bytes.putByte(buff, offset, val);
} | 3.68 |
pulsar_ConfigurationStringUtil_castToString | /**
* The Bookkeeper configuration class converts comma delimited strings to ArrayLists, by default. Use
* this method to ensure a configuration value is a {@link String}.
* @param obj - object to convert to a string
* @return The object's conversion to a string where Lists map to a comma delimited list.
*/
static String castToString(Object obj) {
if (null == obj) {
return "";
}
if (obj instanceof List<?>) {
List<String> result = new ArrayList<>();
for (Object o : (List<?>) obj) {
result.add((String) o);
}
return String.join(",", result);
} else {
return obj.toString();
}
} | 3.68 |
shardingsphere-elasticjob_ExceptionUtils_transform | /**
* Transform throwable to string.
*
* @param cause cause
* @return string
*/
public static String transform(final Throwable cause) {
if (null == cause) {
return "";
}
StringWriter result = new StringWriter();
try (PrintWriter writer = new PrintWriter(result)) {
cause.printStackTrace(writer);
}
return result.toString();
} | 3.68 |
streampipes_ProcessingElementBuilder_create | /**
* Creates a new processing element using the builder pattern. If no label and description is
* given
* for an element,
* {@link org.apache.streampipes.sdk.builder.AbstractProcessingElementBuilder#withLocales(Locales...)}
* must be called.
*
* @param id A unique identifier of the new element, e.g., com.mycompany.sink.mynewdatasink
*/
public static ProcessingElementBuilder create(String id, int version) {
return new ProcessingElementBuilder(id, version);
} | 3.68 |
framework_Tree_handleSelectedItems | /**
* Handles the selection
*
* @param variables
* The variables sent to the server from the client
*/
private void handleSelectedItems(Map<String, Object> variables) {
// Converts the key-array to id-set
final LinkedList<Object> s = new LinkedList<Object>();
for (String key : (String[]) variables.get("selected")) {
final Object id = itemIdMapper.get(key);
if (!isNullSelectionAllowed()
&& (id == null || id == getNullSelectionItemId())) {
// skip empty selection if nullselection is not allowed
markAsDirty();
} else if (id != null && containsId(id)) {
s.add(id);
}
}
if (!isNullSelectionAllowed() && s.isEmpty()) {
// empty selection not allowed, keep old value
markAsDirty();
return;
}
setValue(s, true);
} | 3.68 |
hadoop_SubmitterUserResolver_needsTargetUsersList | /**
* {@inheritDoc}
* <p>
* Since {@link SubmitterUserResolver} returns the user name who is running
* gridmix, it doesn't need a target list of users.
*/
public boolean needsTargetUsersList() {
return false;
} | 3.68 |
hbase_TraceUtil_createClientSpan | /**
* Create a span with {@link SpanKind#CLIENT}.
*/
public static Span createClientSpan(String name) {
return createSpan(name, SpanKind.CLIENT);
} | 3.68 |
flink_Execution_sendCancelRpcCall | /**
* This method sends a CancelTask message to the instance of the assigned slot.
*
* <p>The sending is tried up to NUM_CANCEL_CALL_TRIES times.
*/
private void sendCancelRpcCall(int numberRetries) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final ComponentMainThreadExecutor jobMasterMainThreadExecutor =
getVertex().getExecutionGraphAccessor().getJobMasterMainThreadExecutor();
CompletableFuture<Acknowledge> cancelResultFuture =
FutureUtils.retry(
() -> taskManagerGateway.cancelTask(attemptId, rpcTimeout),
numberRetries,
jobMasterMainThreadExecutor);
cancelResultFuture.whenComplete(
(ack, failure) -> {
if (failure != null) {
fail(new Exception("Task could not be canceled.", failure));
}
});
}
} | 3.68 |
flink_ExecutionVertex_getTaskNameWithSubtaskIndex | /**
* Creates a simple name representation in the style 'taskname (x/y)', where 'taskname' is the
* name as returned by {@link #getTaskName()}, 'x' is the parallel subtask index as returned by
* {@link #getParallelSubtaskIndex()}{@code + 1}, and 'y' is the total number of tasks, as
* returned by {@link #getTotalNumberOfParallelSubtasks()}.
*
* @return A simple name representation in the form 'myTask (2/7)'
*/
@Override
public String getTaskNameWithSubtaskIndex() {
return this.taskNameWithSubtask;
} | 3.68 |
framework_VScrollTable_getHierarchyColumnIndex | /**
* This method exists for the needs of {@link VTreeTable} only. Not part of
* the official API, <b>extend at your own risk</b>. May be removed or
* replaced in the future.
*
* @return index of TreeTable's hierarchy column, or -1 if not applicable
*/
protected int getHierarchyColumnIndex() {
return -1;
} | 3.68 |
hadoop_AuditingFunctions_callableWithinAuditSpan | /**
* Given a callable, return a new callable which
* activates and deactivates the span around the inner invocation.
* @param auditSpan audit span
* @param operation operation
* @param <T> type of result
* @return a new invocation.
*/
public static <T> Callable<T> callableWithinAuditSpan(
@Nullable AuditSpan auditSpan,
Callable<T> operation) {
return auditSpan == null
? operation
: () -> {
auditSpan.activate();
return operation.call();
};
} | 3.68 |
hadoop_StageConfig_getTaskAttemptId | /**
* ID of this specific attempt at a task.
*/
public String getTaskAttemptId() {
return taskAttemptId;
} | 3.68 |
morf_HumanReadableStatementHelper_generateAliasedFieldAssignmentString | /**
* Generates a string describing a field reference update.
*
* <p>Package visibility for testing.</p>
*
* @param field the field to describe.
* @return a string containing the human-readable description of the field update.
*/
static String generateAliasedFieldAssignmentString(final AliasedField field) {
if (field instanceof CaseStatement) {
final StringBuilder sb = new StringBuilder();
for (WhenCondition when : ((CaseStatement)field).getWhenConditions()) {
sb.append(String.format("%n - If %s then set %s to %s", generateCriterionString(when.getCriterion(), false), generateFieldSymbolString(field), generateFieldValueString(when.getValue())));
}
sb.append(String.format("%n - Otherwise set %s to %s", generateFieldSymbolString(field), generateFieldValueString(((CaseStatement)field).getDefaultValue())));
return sb.toString();
} else if (field instanceof Cast) {
if (((Cast)field).getExpression() instanceof FieldReference) {
return String.format("%n - Set %s to %s's value", generateFieldSymbolString(field), ((FieldReference)((Cast)field).getExpression()).getName());
} else {
return String.format("%n - Set %s to %s", generateFieldSymbolString(field), generateFieldValueString(field));
}
} else if (field instanceof FieldReference) {
return String.format("%n - Set %s to %s's value", generateFieldSymbolString(field), ((FieldReference)field).getName());
} else {
return String.format("%n - Set %s to %s", generateFieldSymbolString(field), generateFieldValueString(field));
}
} | 3.68 |
flink_TypeSerializerSchemaCompatibility_isCompatibleAfterMigration | /**
* Returns whether or not the type of the compatibility is {@link
* Type#COMPATIBLE_AFTER_MIGRATION}.
*
* @return whether or not the type of the compatibility is {@link
* Type#COMPATIBLE_AFTER_MIGRATION}.
*/
public boolean isCompatibleAfterMigration() {
return resultType == Type.COMPATIBLE_AFTER_MIGRATION;
} | 3.68 |
flink_MetricListener_getHistogram | /**
* Get registered {@link Histogram} with identifier relative to the root metric group.
*
* @param identifier identifier relative to the root metric group
* @return Optional registered histogram
*/
public Optional<Histogram> getHistogram(String... identifier) {
return getMetric(Histogram.class, identifier);
} | 3.68 |
hbase_Scan_setCacheBlocks | /**
* Set whether blocks should be cached for this Scan.
* <p>
* This is true by default. When true, default settings of the table and family are used (this
* will never override caching blocks if the block cache is disabled for that family or entirely).
* @param cacheBlocks if false, default settings are overridden and blocks will not be cached
*/
public Scan setCacheBlocks(boolean cacheBlocks) {
this.cacheBlocks = cacheBlocks;
return this;
} | 3.68 |
hadoop_JobContextImpl_getProgressible | /**
* Get the progress mechanism for reporting progress.
*
* @return progress mechanism
*/
public Progressable getProgressible() {
return progress;
} | 3.68 |
hbase_Get_getRow | /**
* Method for retrieving the get's row
*/
@Override
public byte[] getRow() {
return this.row;
} | 3.68 |
hbase_WALSplitUtil_moveWAL | /**
* Move WAL. Used to move processed WALs to archive or bad WALs to corrupt WAL dir. WAL may have
* already been moved; makes allowance.
*/
public static void moveWAL(FileSystem fs, Path p, Path targetDir) throws IOException {
if (fs.exists(p)) {
if (!CommonFSUtils.renameAndSetModifyTime(fs, p, targetDir)) {
LOG.warn("Failed move of {} to {}", p, targetDir);
} else {
LOG.info("Moved {} to {}", p, targetDir);
}
}
} | 3.68 |
hudi_SourceFormatAdapter_fetchNewDataInAvroFormat | /**
* Fetch new data in avro format. If the source provides data in different format, they are translated to Avro format
*/
public InputBatch<JavaRDD<GenericRecord>> fetchNewDataInAvroFormat(Option<String> lastCkptStr, long sourceLimit) {
switch (source.getSourceType()) {
case AVRO:
//don't need to sanitize because it's already avro
return ((Source<JavaRDD<GenericRecord>>) source).fetchNext(lastCkptStr, sourceLimit);
case JSON: {
//sanitizing is done inside the convertor in transformJsonToGenericRdd if enabled
InputBatch<JavaRDD<String>> r = ((Source<JavaRDD<String>>) source).fetchNext(lastCkptStr, sourceLimit);
JavaRDD<GenericRecord> eventsRdd = transformJsonToGenericRdd(r);
return new InputBatch<>(Option.ofNullable(eventsRdd),r.getCheckpointForNextBatch(), r.getSchemaProvider());
}
case ROW: {
//we do the sanitizing here if enabled
InputBatch<Dataset<Row>> r = ((Source<Dataset<Row>>) source).fetchNext(lastCkptStr, sourceLimit);
return new InputBatch<>(Option.ofNullable(r.getBatch().map(
rdd -> {
SchemaProvider originalProvider = UtilHelpers.getOriginalSchemaProvider(r.getSchemaProvider());
return (originalProvider instanceof FilebasedSchemaProvider || (originalProvider instanceof SchemaRegistryProvider))
// If the source schema is specified through Avro schema,
// pass in the schema for the Row-to-Avro conversion
// to avoid nullability mismatch between Avro schema and Row schema
? HoodieSparkUtils.createRdd(rdd, HOODIE_RECORD_STRUCT_NAME, HOODIE_RECORD_NAMESPACE, true,
org.apache.hudi.common.util.Option.ofNullable(r.getSchemaProvider().getSourceSchema())
).toJavaRDD() : HoodieSparkUtils.createRdd(rdd,
HOODIE_RECORD_STRUCT_NAME, HOODIE_RECORD_NAMESPACE, false, Option.empty()).toJavaRDD();
})
.orElse(null)), r.getCheckpointForNextBatch(), r.getSchemaProvider());
}
case PROTO: {
//TODO([HUDI-5830]) implement field name sanitization
InputBatch<JavaRDD<Message>> r = ((Source<JavaRDD<Message>>) source).fetchNext(lastCkptStr, sourceLimit);
AvroConvertor convertor = new AvroConvertor(r.getSchemaProvider().getSourceSchema());
return new InputBatch<>(Option.ofNullable(r.getBatch().map(rdd -> rdd.map(convertor::fromProtoMessage)).orElse(null)),
r.getCheckpointForNextBatch(), r.getSchemaProvider());
}
default:
throw new IllegalArgumentException("Unknown source type (" + source.getSourceType() + ")");
}
} | 3.68 |
dubbo_RdsRouteRuleManager_getRuleListeners | // for test
static ConcurrentHashMap<String, Set<XdsRouteRuleListener>> getRuleListeners() {
return RULE_LISTENERS;
} | 3.68 |
framework_Calendar_isEventClickAllowed | /**
* Is the user allowed to trigger click events. Returns {@code true} by
* default. Subclass can override this method to disallow firing event
* clicks got from the client side.
*
* @return true if the client is allowed to click events
* @see #isClientChangeAllowed()
* @deprecated As of 7.4, override {@link #fireEventClick(Integer)} instead.
*/
@Deprecated
protected boolean isEventClickAllowed() {
return true;
} | 3.68 |
querydsl_PathMetadataFactory_forListAccess | /**
* Create a new PathMetadata instance for indexed list access
*
* @param parent parent path
* @param index index of element
* @return list access path
*/
public static PathMetadata forListAccess(Path<?> parent, @Range(from = 0, to = Integer.MAX_VALUE) int index) {
return new PathMetadata(parent, index, PathType.LISTVALUE_CONSTANT);
} | 3.68 |
streampipes_SupportedFormats_smileFormat | /**
* Defines that a pipeline element (data processor or data sink) supports processing messaging
* arriving in smile format
*
* @return The resulting {@link org.apache.streampipes.model.grounding.TransportFormat}.
*/
public static TransportFormat smileFormat() {
return new TransportFormat(MessageFormat.SMILE);
} | 3.68 |
pulsar_LoadManagerShared_filterDomainsNotHavingLeastNumberAntiAffinityNamespaces | /**
* It computes least number of namespace owned by any of the domain and then it filters out all the domains that own
* namespaces more than this count.
*
* @param brokerToAntiAffinityNamespaceCount
* @param candidates
* @param brokerToDomainMap
*/
private static void filterDomainsNotHavingLeastNumberAntiAffinityNamespaces(
Map<String, Integer> brokerToAntiAffinityNamespaceCount, Set<String> candidates,
Map<String, String> brokerToDomainMap) {
if (brokerToDomainMap == null || brokerToDomainMap.isEmpty()) {
return;
}
final Map<String, Integer> domainNamespaceCount = new HashMap<>();
int leastNamespaceCount = Integer.MAX_VALUE;
candidates.forEach(broker -> {
final String domain = brokerToDomainMap.getOrDefault(broker, DEFAULT_DOMAIN);
final int count = brokerToAntiAffinityNamespaceCount.getOrDefault(broker, 0);
domainNamespaceCount.compute(domain, (domainName, nsCount) -> nsCount == null ? count : nsCount + count);
});
// find leastNameSpaceCount
for (Entry<String, Integer> domainNsCountEntry : domainNamespaceCount.entrySet()) {
if (domainNsCountEntry.getValue() < leastNamespaceCount) {
leastNamespaceCount = domainNsCountEntry.getValue();
}
}
final int finalLeastNamespaceCount = leastNamespaceCount;
// only keep domain brokers which has leastNamespaceCount
candidates.removeIf(broker -> {
Integer nsCount = domainNamespaceCount.get(brokerToDomainMap.getOrDefault(broker, DEFAULT_DOMAIN));
return nsCount != null && nsCount != finalLeastNamespaceCount;
});
} | 3.68 |
flink_DateTimeUtils_of | /**
* Returns a {@code TimeUnitRange} with a given start and end unit.
*
* @param startUnit Start unit
* @param endUnit End unit
* @return Time unit range, or null if not valid
*/
public static TimeUnitRange of(TimeUnit startUnit, TimeUnit endUnit) {
return MAP.get(new Pair<>(startUnit, endUnit));
} | 3.68 |
hbase_StoreScanner_reopenAfterFlush | /** Returns if top of heap has changed (and KeyValueHeap has to try the next KV) */
protected final boolean reopenAfterFlush() throws IOException {
// here we can make sure that we have a Store instance so no null check on store.
Cell lastTop = heap.peek();
// When we have the scan object, should we not pass it to getScanners() to get a limited set of
// scanners? We did so in the constructor and we could have done it now by storing the scan
// object from the constructor
List<KeyValueScanner> scanners;
flushLock.lock();
try {
List<KeyValueScanner> allScanners =
new ArrayList<>(flushedstoreFileScanners.size() + memStoreScannersAfterFlush.size());
allScanners.addAll(flushedstoreFileScanners);
allScanners.addAll(memStoreScannersAfterFlush);
scanners = selectScannersFrom(store, allScanners);
// Clear the current set of flushed store files scanners so that they don't get added again
flushedstoreFileScanners.clear();
memStoreScannersAfterFlush.clear();
} finally {
flushLock.unlock();
}
// Seek the new scanners to the last key
seekScanners(scanners, lastTop, false, parallelSeekEnabled);
// remove the older memstore scanner
for (int i = currentScanners.size() - 1; i >= 0; i--) {
if (!currentScanners.get(i).isFileScanner()) {
scannersForDelayedClose.add(currentScanners.remove(i));
} else {
// we add the memstore scanner to the end of currentScanners
break;
}
}
// add the newly created scanners on the flushed files and the current active memstore scanner
addCurrentScanners(scanners);
// Combine all seeked scanners with a heap
resetKVHeap(this.currentScanners, store.getComparator());
resetQueryMatcher(lastTop);
if (heap.peek() == null || store.getComparator().compareRows(lastTop, this.heap.peek()) != 0) {
LOG.info("Storescanner.peek() is changed where before = " + lastTop.toString()
+ ",and after = " + heap.peek());
topChanged = true;
} else {
topChanged = false;
}
return topChanged;
} | 3.68 |
framework_Tree_setItemIcon | /**
* Sets the icon for an item.
*
* @param itemId
* the id of the item to be assigned an icon.
* @param icon
* the icon to use or null.
*
* @param altText
* the alternative text for the icon
*/
public void setItemIcon(Object itemId, Resource icon, String altText) {
if (itemId != null) {
super.setItemIcon(itemId, icon);
if (icon == null) {
itemIconAlts.remove(itemId);
} else if (altText == null) {
throw new IllegalArgumentException(NULL_ALT_EXCEPTION_MESSAGE);
} else {
itemIconAlts.put(itemId, altText);
}
markAsDirty();
}
} | 3.68 |
streampipes_NetioRestAdapter_applyConfiguration | /**
* Extracts the user configuration from the SpecificAdapterStreamDescription and sets the local variales
*
* @param extractor StaticPropertyExtractor
*/
private void applyConfiguration(IParameterExtractor extractor) {
this.ip = extractor.singleValueParameter(NETIO_IP, String.class);
this.username = extractor.singleValueParameter(NETIO_USERNAME, String.class);
this.password = extractor.secretValue(NETIO_PASSWORD);
this.pollingInterval = extractor.singleValueParameter(NETIO_POLLING_INTERVAL, Integer.class);
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.