name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_Bytes_toFloat | /**
* Put a float value out to the specified byte array position. Presumes float encoded as IEEE 754
* floating-point "single format"
* @param bytes array to convert
* @param offset offset into array
* @return Float made from passed byte array.
*/
public static float toFloat(byte[] bytes, int offset) {
return Float.intBitsToFloat(toInt(bytes, offset, SIZEOF_INT));
} | 3.68 |
hbase_AccessController_getActiveUser | /**
* Returns the active user to which authorization checks should be applied. If we are in the
* context of an RPC call, the remote user is used, otherwise the currently logged in user is
* used.
*/
private User getActiveUser(ObserverContext<?> ctx) throws IOException {
// for non-rpc handling, fallback to system user
Optional<User> optionalUser = ctx.getCaller();
if (optionalUser.isPresent()) {
return optionalUser.get();
}
return userProvider.getCurrent();
} | 3.68 |
hadoop_HdfsFileStatus_children | /**
* Set the number of children for this entity (default = 0).
* @param childrenNum Number of children
* @return This Builder instance
*/
public Builder children(int childrenNum) {
this.childrenNum = childrenNum;
return this;
} | 3.68 |
framework_Table_setHtmlContentAllowed | /**
* If set to true, all strings passed to {@link #setText(String...)}
* will be rendered as HTML.
*
* @param htmlContentAllowed
*/
public void setHtmlContentAllowed(boolean htmlContentAllowed) {
this.htmlContentAllowed = htmlContentAllowed;
} | 3.68 |
incubator-hugegraph-toolchain_EdgeLabelController_delete | /**
* Delete edge label doesn't need check checkUsing
*/
@DeleteMapping
public void delete(@PathVariable("connId") int connId,
@RequestParam("names") List<String> names) {
for (String name : names) {
this.elService.checkExist(name, connId);
this.elService.remove(name, connId);
}
} | 3.68 |
hadoop_RawErasureDecoder_decode | /**
* Decode with inputs and erasedIndexes, generates outputs. More see above.
*
* Note, for both input and output ECChunks, no mixing of on-heap buffers and
* direct buffers are allowed.
*
* @param inputs input buffers to read data from
* @param erasedIndexes indexes of erased units in the inputs array
* @param outputs output buffers to put decoded data into according to
* erasedIndexes, ready for read after the call
* @throws IOException if the decoder is closed
*/
public synchronized void decode(ECChunk[] inputs, int[] erasedIndexes,
ECChunk[] outputs) throws IOException {
ByteBuffer[] newInputs = CoderUtil.toBuffers(inputs);
ByteBuffer[] newOutputs = CoderUtil.toBuffers(outputs);
decode(newInputs, erasedIndexes, newOutputs);
} | 3.68 |
framework_AbstractOrderedLayoutConnector_getMaxHeight | /**
* Measures the maximum height of the layout in pixels
*/
private int getMaxHeight() {
int highestNonRelative = -1;
int highestRelative = -1;
LayoutManager layoutManager = getLayoutManager();
for (ComponentConnector child : getChildComponents()) {
Widget childWidget = child.getWidget();
Slot slot = getWidget().getSlot(childWidget);
Element captionElement = slot.getCaptionElement();
CaptionPosition captionPosition = slot.getCaptionPosition();
int pixelHeight = layoutManager
.getOuterHeight(childWidget.getElement());
if (pixelHeight == -1) {
// Height has not yet been measured -> postpone actions that
// depend on the max height
return -1;
}
boolean hasRelativeHeight = slot.hasRelativeHeight();
boolean captionSizeShouldBeAddedtoComponentHeight = captionPosition == CaptionPosition.TOP
|| captionPosition == CaptionPosition.BOTTOM;
boolean includeCaptionHeight = captionElement != null
&& captionSizeShouldBeAddedtoComponentHeight;
if (includeCaptionHeight) {
int captionHeight = layoutManager.getOuterHeight(captionElement)
- getLayoutManager().getMarginHeight(captionElement);
if (captionHeight == -1) {
// Height has not yet been measured -> postpone actions that
// depend on the max height
return -1;
}
pixelHeight += captionHeight;
}
if (!hasRelativeHeight) {
if (pixelHeight > highestNonRelative) {
highestNonRelative = pixelHeight;
}
} else {
if (pixelHeight > highestRelative) {
highestRelative = pixelHeight;
}
}
}
return highestNonRelative > -1 ? highestNonRelative : highestRelative;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithGroupByList | /**
* Tests the group by in a select.
*/
@Test
public void testSelectWithGroupByList() {
SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD), count(field(STRING_FIELD)), countDistinct(field(STRING_FIELD)))
.from(new TableReference(ALTERNATE_TABLE))
.groupBy(ImmutableList.of(field(STRING_FIELD), field(INT_FIELD), field(FLOAT_FIELD)));
String expectedSql = "SELECT stringField, COUNT(stringField), COUNT(DISTINCT stringField) FROM " + tableName(ALTERNATE_TABLE) + " GROUP BY stringField, intField, floatField";
assertEquals("Select with count function", expectedSql, testDialect.convertStatementToSQL(stmt));
} | 3.68 |
dubbo_ClassHelper_resolvePrimitiveClassName | /**
* Resolve the given class name as primitive class, if appropriate,
* according to the JVM's naming rules for primitive classes.
* <p>
* Also supports the JVM's internal class names for primitive arrays. Does
* <i>not</i> support the "[]" suffix notation for primitive arrays; this is
* only supported by {@link #forName}.
*
* @param name the name of the potentially primitive class
* @return the primitive class, or <code>null</code> if the name does not
* denote a primitive class or primitive array class
*/
public static Class<?> resolvePrimitiveClassName(String name) {
return ClassUtils.resolvePrimitiveClassName(name);
} | 3.68 |
hadoop_ReadWriteDiskValidatorMetrics_addWriteFileLatency | /**
* Add the file write latency to {@link MutableQuantiles} metrics.
*
* @param writeLatency file write latency in microseconds
*/
public void addWriteFileLatency(long writeLatency) {
if (fileWriteQuantiles != null) {
for (MutableQuantiles q : fileWriteQuantiles) {
q.add(writeLatency);
}
}
} | 3.68 |
flink_ResolvedSchema_getColumnNames | /** Returns all column names. It does not distinguish between different kinds of columns. */
public List<String> getColumnNames() {
return columns.stream().map(Column::getName).collect(Collectors.toList());
} | 3.68 |
framework_VScrollTable_isSelected | /**
* Has the row been selected?
*
* @return Returns true if selected, else false
*/
public boolean isSelected() {
return selected;
} | 3.68 |
hudi_KafkaOffsetGen_mergeRangesByTopicPartition | /**
* Merge ranges by topic partition, because we need to maintain the checkpoint with one offset range per topic partition.
* @param oldRanges to merge
* @return ranges merged by partition
*/
public static OffsetRange[] mergeRangesByTopicPartition(OffsetRange[] oldRanges) {
List<OffsetRange> newRanges = new ArrayList<>();
Map<TopicPartition, List<OffsetRange>> tpOffsets = Arrays.stream(oldRanges).collect(Collectors.groupingBy(OffsetRange::topicPartition));
for (Map.Entry<TopicPartition, List<OffsetRange>> entry : tpOffsets.entrySet()) {
long from = entry.getValue().stream().map(OffsetRange::fromOffset).min(Long::compare).get();
long until = entry.getValue().stream().map(OffsetRange::untilOffset).max(Long::compare).get();
newRanges.add(OffsetRange.create(entry.getKey(), from, until));
}
// make sure the result ranges is order by partition
newRanges.sort(SORT_BY_PARTITION);
return newRanges.toArray(new OffsetRange[0]);
} | 3.68 |
flink_OutputFileConfig_getPartSuffix | /** The suffix for the part name. */
public String getPartSuffix() {
return partSuffix;
} | 3.68 |
flink_JobResourceRequirements_writeToJobGraph | /**
* Write {@link JobResourceRequirements resource requirements} into the configuration of a given
* {@link JobGraph}.
*
* @param jobGraph job graph to write requirements to
* @param jobResourceRequirements resource requirements to write
* @throws IOException in case we're not able to serialize requirements into the configuration
*/
public static void writeToJobGraph(
JobGraph jobGraph, JobResourceRequirements jobResourceRequirements) throws IOException {
InstantiationUtil.writeObjectToConfig(
jobResourceRequirements,
jobGraph.getJobConfiguration(),
JOB_RESOURCE_REQUIREMENTS_KEY);
} | 3.68 |
framework_HierarchicalContainer_getParent | /*
* Gets the ID of the parent of the specified Item. Don't add a JavaDoc
* comment here, we use the default documentation from implemented
* interface.
*/
@Override
public Object getParent(Object itemId) {
if (filteredParent != null) {
return filteredParent.get(itemId);
}
return parent.get(itemId);
} | 3.68 |
hadoop_ConnectionPool_getNumConnections | /**
* Number of connections in the pool.
*
* @return Number of connections.
*/
protected int getNumConnections() {
return this.connections.size();
} | 3.68 |
hbase_ThrottledInputStream_toString | /** {@inheritDoc} */
@Override
public String toString() {
return "ThrottledInputStream{" + "bytesRead=" + bytesRead + ", maxBytesPerSec=" + maxBytesPerSec
+ ", bytesPerSec=" + getBytesPerSec() + ", totalSleepTime=" + totalSleepTime + '}';
} | 3.68 |
framework_CustomFieldConnector_setChildComponents | /*
* (non-Javadoc)
*
* @see com.vaadin.client.HasComponentsConnector#setChildren
* (java.util.Collection)
*/
@Override
public void setChildComponents(List<ComponentConnector> childComponents) {
this.childComponents = childComponents;
} | 3.68 |
framework_Calendar_setLastVisibleHourOfDay | /**
* This method restricts the hours that are shown per day. This affects the
* weekly view. The general contract is that <b>firstHour < lastHour</b>.
* <p>
* Note that this only affects the rendering process. Events are still
* requested by the dates set by {@link #setStartDate(Date)} and
* {@link #setEndDate(Date)}.
* <p>
* You can use {@link #autoScaleVisibleHoursOfDay()} for automatic scaling
* of the visible hours based on current events.
*
* @param lastHour
* the first hour of the day to show, between 0 and 23
* @see #autoScaleVisibleHoursOfDay()
*/
public void setLastVisibleHourOfDay(int lastHour) {
if (this.lastHour != lastHour && lastHour >= 0 && lastHour <= 23
&& lastHour >= getFirstVisibleHourOfDay()) {
this.lastHour = lastHour;
getState().lastHourOfDay = lastHour;
}
} | 3.68 |
morf_OracleDialect_indexPostDeploymentStatements | /**
* Generate the SQL to alter the index back to NOPARALLEL LOGGING.
*
* @param index The index we want to alter.
* @return The SQL to alter the index.
*/
private String indexPostDeploymentStatements(Index index) {
return new StringBuilder()
.append("ALTER INDEX ")
.append(schemaNamePrefix())
.append(index.getName())
.append(" NOPARALLEL LOGGING")
.toString();
} | 3.68 |
hbase_PrefixFilter_toByteArray | /** Returns The filter serialized using pb */
@Override
public byte[] toByteArray() {
FilterProtos.PrefixFilter.Builder builder = FilterProtos.PrefixFilter.newBuilder();
if (this.prefix != null) builder.setPrefix(UnsafeByteOperations.unsafeWrap(this.prefix));
return builder.build().toByteArray();
} | 3.68 |
framework_Tree_setParent | /*
* (non-Javadoc)
*
* @see com.vaadin.data.Container.Hierarchical#setParent(java.lang.Object ,
* java.lang.Object)
*/
@Override
public boolean setParent(Object itemId, Object newParentId) {
final boolean success = ((Container.Hierarchical) items)
.setParent(itemId, newParentId);
if (success) {
markAsDirty();
}
return success;
} | 3.68 |
flink_HandlerRequest_getRequestBody | /**
* Returns the request body.
*
* @return request body
*/
public R getRequestBody() {
return requestBody;
} | 3.68 |
querydsl_Expressions_cases | /**
* Create a builder for a case expression
*
* @return case builder
*/
public static CaseBuilder cases() {
return new CaseBuilder();
} | 3.68 |
hbase_MetricsHeapMemoryManager_updateMemStoreDeltaSizeHistogram | /**
* Update the increase/decrease memstore size histogram
* @param memStoreDeltaSize the tuning result of memstore.
*/
public void updateMemStoreDeltaSizeHistogram(final int memStoreDeltaSize) {
source.updateMemStoreDeltaSizeHistogram(memStoreDeltaSize);
} | 3.68 |
hbase_SnapshotInfo_inArchive | /** Returns true if the file is in the archive */
public boolean inArchive() {
return this.inArchive;
} | 3.68 |
framework_NativeButtonClick_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 14022;
} | 3.68 |
framework_VButton_isDraggable | /**
* Returns if this button has been made <code>draggable</code> or not.
*
* @return {@literal true} if draggable is enabled, {@literal false}
* otherwise
*/
private boolean isDraggable() {
return getElement().getPropertyBoolean("draggable");
} | 3.68 |
morf_OracleDialect_createNewSequence | /**
* Returns a SQL statement to create a sequence for a table's autonumber column
*
* @param table Table for which the sequence should be created.
* @param onColumn The autonumber column.
* @return SQL string.
*/
private String createNewSequence(Table table, Column onColumn) {
int autoNumberStart = onColumn.getAutoNumberStart() == -1 ? 1 : onColumn.getAutoNumberStart();
return new StringBuilder("CREATE SEQUENCE ")
.append(schemaNamePrefix())
.append(sequenceName(table.getName()))
.append(" START WITH ")
.append(autoNumberStart)
.append(" CACHE 2000")
.toString();
} | 3.68 |
flink_CustomSinkOperatorUidHashes_builder | /**
* Creates a builder to construct {@link CustomSinkOperatorUidHashes}.
*
* @return {@link SinkOperatorUidHashesBuilder}
*/
public static SinkOperatorUidHashesBuilder builder() {
return new SinkOperatorUidHashesBuilder();
} | 3.68 |
flink_RichSqlInsert_isOverwrite | /**
* Returns whether the insert mode is overwrite (for whole table or for specific partitions).
*
* @return true if this is overwrite mode
*/
public boolean isOverwrite() {
return getModifierNode(RichSqlInsertKeyword.OVERWRITE) != null;
} | 3.68 |
flink_ZooKeeperUtils_startCuratorFramework | /**
* Starts a {@link CuratorFramework} instance and connects it to the given ZooKeeper quorum from
* a builder.
*
* @param builder {@link CuratorFrameworkFactory.Builder} A builder for curatorFramework.
* @param fatalErrorHandler {@link FatalErrorHandler} fatalErrorHandler to handle unexpected
* errors of {@link CuratorFramework}
* @return {@link CuratorFrameworkWithUnhandledErrorListener} instance
*/
@VisibleForTesting
public static CuratorFrameworkWithUnhandledErrorListener startCuratorFramework(
CuratorFrameworkFactory.Builder builder, FatalErrorHandler fatalErrorHandler) {
CuratorFramework cf = builder.build();
UnhandledErrorListener unhandledErrorListener =
(message, throwable) -> {
LOG.error(
"Unhandled error in curator framework, error message: {}",
message,
throwable);
// The exception thrown in UnhandledErrorListener will be caught by
// CuratorFramework. So we mostly trigger exit process or interact with main
// thread to inform the failure in FatalErrorHandler.
fatalErrorHandler.onFatalError(throwable);
};
cf.getUnhandledErrorListenable().addListener(unhandledErrorListener);
cf.start();
return new CuratorFrameworkWithUnhandledErrorListener(cf, unhandledErrorListener);
} | 3.68 |
hbase_HFile_getChecksumFailuresCount | /**
* Number of checksum verification failures. It also clears the counter.
*/
public static final long getChecksumFailuresCount() {
return CHECKSUM_FAILURES.sum();
} | 3.68 |
hbase_NamespacesModel_setNamespaces | /**
* @param namespaces the namespace name array
*/
public void setNamespaces(List<String> namespaces) {
this.namespaces = namespaces;
} | 3.68 |
hadoop_Result_isPass | /**
* Should processing continue.
* @return if is pass true,not false.
*/
public boolean isPass() {
return this.success;
} | 3.68 |
AreaShop_GeneralRegion_getCurrent | /**
* Get the current number of regions in the group that is the limiting factor, assuming actionAllowed() is false.
* @return The current number of regions the player has
*/
public int getCurrent() {
return current;
} | 3.68 |
framework_AbsoluteLayoutConnector_onStateChanged | /*
* (non-Javadoc)
*
* @see
* com.vaadin.client.ui.AbstractComponentConnector#onStateChanged(com.vaadin
* .client.communication.StateChangeEvent)
*/
@Override
public void onStateChanged(StateChangeEvent stateChangeEvent) {
super.onStateChanged(stateChangeEvent);
clickEventHandler.handleEventHandlerRegistration();
// TODO Margin handling
for (ComponentConnector child : getChildComponents()) {
setChildWidgetPosition(child);
}
} | 3.68 |
pulsar_NonPersistentTopicsImpl_validateTopic | /*
* returns topic name with encoded Local Name
*/
private TopicName validateTopic(String topic) {
// Parsing will throw exception if name is not valid
return TopicName.get(topic);
} | 3.68 |
hadoop_DistributedSQLCounter_incrementCounterValue | /**
* Increments the counter by the given amount and
* returns the previous counter value.
*
* @param amount Amount to increase the counter.
* @return Previous counter value.
* @throws SQLException if querying the database fails.
*/
public int incrementCounterValue(int amount) throws SQLException {
// Disabling auto-commit to ensure that all statements on this transaction
// are committed at once.
try (Connection connection = connectionFactory.getConnection(false)) {
// Preventing dirty reads and non-repeatable reads to ensure that the
// value read will not be updated by a different connection.
if (connection.getTransactionIsolation() < Connection.TRANSACTION_REPEATABLE_READ) {
connection.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ);
}
try {
// Reading the counter value "FOR UPDATE" to lock the value record,
// forcing other connections to wait until this transaction is committed.
int lastValue = selectCounterValue(true, connection);
// Calculate the new counter value and handling overflow by
// resetting the counter to 0.
int newValue = lastValue + amount;
if (newValue < 0) {
lastValue = 0;
newValue = amount;
}
updateCounterValue(newValue, connection);
connection.commit();
return lastValue;
} catch (Exception e) {
// Rollback transaction to release table locks
connection.rollback();
throw e;
}
}
} | 3.68 |
framework_GridElement_getHeaderCount | /**
* Get header row count.
*
* @return Header row count
*/
public int getHeaderCount() {
return getSubPart("#header").findElements(By.xpath("./tr")).size();
} | 3.68 |
framework_WebBrowser_isEs6Supported | /**
* Checks if the browser supports ECMAScript 6, based on the user agent.
*
* @return <code>true</code> if the browser supports ES6, <code>false</code>
* otherwise.
* @since 8.1
*/
public boolean isEs6Supported() {
if (browserDetails == null) {
// Don't know, so assume no
return false;
}
return browserDetails.isEs6Supported();
} | 3.68 |
graphhopper_CHStorage_setLowShortcutWeightConsumer | /**
* Sets a callback called for shortcuts that are below the minimum weight. e.g. used to find/log mapping errors
*/
public void setLowShortcutWeightConsumer(Consumer<LowWeightShortcut> lowWeightShortcutConsumer) {
this.lowShortcutWeightConsumer = lowWeightShortcutConsumer;
} | 3.68 |
pulsar_ConsumerConfiguration_getSubscriptionInitialPosition | /**
* @return the configured {@link subscriptionInitialPosition} for the consumer
*/
public SubscriptionInitialPosition getSubscriptionInitialPosition(){
return conf.getSubscriptionInitialPosition();
} | 3.68 |
hadoop_ClasspathConstructor_getPathElements | /**
* Get a copy of the path list
* @return the JARs
*/
public List<String> getPathElements() {
return Collections.unmodifiableList(pathElements);
} | 3.68 |
pulsar_Producer_completed | /**
* Executed from managed ledger thread when the message is persisted.
*/
@Override
public void completed(Exception exception, long ledgerId, long entryId) {
if (exception != null) {
final ServerError serverError = getServerError(exception);
producer.cnx.execute(() -> {
if (!(exception instanceof TopicClosedException)) {
// For TopicClosed exception there's no need to send explicit error, since the client was
// already notified
long callBackSequenceId = Math.max(highestSequenceId, sequenceId);
producer.cnx.getCommandSender().sendSendError(producer.producerId, callBackSequenceId,
serverError, exception.getMessage());
}
producer.cnx.completedSendOperation(producer.isNonPersistentTopic, msgSize);
producer.publishOperationCompleted();
recycle();
});
} else {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] [{}] triggered send callback. cnx {}, sequenceId {}", producer.topic,
producer.producerName, producer.producerId, producer.cnx.clientAddress(), sequenceId);
}
this.ledgerId = ledgerId;
this.entryId = entryId;
producer.cnx.execute(this);
}
} | 3.68 |
framework_Slot_setStyleNames | /**
* Sets the style names for the slot containing the widget.
*
* @param stylenames
* The style names for the slot
*/
protected void setStyleNames(String... stylenames) {
setStyleName(SLOT_CLASSNAME);
if (stylenames != null) {
for (String stylename : stylenames) {
addStyleDependentName(stylename);
}
}
// Ensure alignment style names are correct
setAlignment(alignment);
} | 3.68 |
hbase_ChunkCreator_getChunk | /**
* Poll a chunk from the pool, reset it if not null, else create a new chunk to return if we
* have not yet created max allowed chunks count. When we have already created max allowed
* chunks and no free chunks as of now, return null. It is the responsibility of the caller to
* make a chunk then. Note: Chunks returned by this pool must be put back to the pool after its
* use.
* @return a chunk
* @see #putbackChunks(Chunk)
*/
Chunk getChunk() {
Chunk chunk = reclaimedChunks.poll();
if (chunk != null) {
chunk.reset();
reusedChunkCount.increment();
} else {
// Make a chunk iff we have not yet created the maxCount chunks
while (true) {
long created = this.chunkCount.get();
if (created < this.maxCount) {
if (this.chunkCount.compareAndSet(created, created + 1)) {
chunk = createChunkForPool(chunkType, chunkSize);
break;
}
} else {
break;
}
}
}
return chunk;
} | 3.68 |
flink_JoinOperator_projectTuple5 | /**
* Projects a pair of joined elements to a {@link Tuple} with the previously selected
* fields. Requires the classes of the fields of the resulting tuples.
*
* @return The projected data set.
* @see Tuple
* @see DataSet
*/
public <T0, T1, T2, T3, T4>
ProjectJoin<I1, I2, Tuple5<T0, T1, T2, T3, T4>> projectTuple5() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes);
TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>> tType =
new TupleTypeInfo<Tuple5<T0, T1, T2, T3, T4>>(fTypes);
return new ProjectJoin<I1, I2, Tuple5<T0, T1, T2, T3, T4>>(
this.ds1,
this.ds2,
this.keys1,
this.keys2,
this.hint,
this.fieldIndexes,
this.isFieldInFirst,
tType,
this);
} | 3.68 |
flink_StreamExecTemporalSort_createSortProcTime | /** Create Sort logic based on processing time. */
private Transformation<RowData> createSortProcTime(
RowType inputType,
Transformation<RowData> inputTransform,
ExecNodeConfig config,
ClassLoader classLoader) {
// if the order has secondary sorting fields in addition to the proctime
if (sortSpec.getFieldSize() > 1) {
// skip the first field which is the proctime field and would be ordered by timer.
SortSpec specExcludeTime = sortSpec.createSubSortSpec(1);
GeneratedRecordComparator rowComparator =
ComparatorCodeGenerator.gen(
config,
classLoader,
"ProcTimeSortComparator",
inputType,
specExcludeTime);
ProcTimeSortOperator sortOperator =
new ProcTimeSortOperator(InternalTypeInfo.of(inputType), rowComparator);
OneInputTransformation<RowData, RowData> transform =
ExecNodeUtil.createOneInputTransformation(
inputTransform,
createTransformationMeta(TEMPORAL_SORT_TRANSFORMATION, config),
sortOperator,
InternalTypeInfo.of(inputType),
inputTransform.getParallelism(),
false);
// as input node is singleton exchange, its parallelism is 1.
if (inputsContainSingleton()) {
transform.setParallelism(1);
transform.setMaxParallelism(1);
}
EmptyRowDataKeySelector selector = EmptyRowDataKeySelector.INSTANCE;
transform.setStateKeySelector(selector);
transform.setStateKeyType(selector.getProducedType());
return transform;
} else {
// if the order is done only on proctime we only need to forward the elements
return inputTransform;
}
} | 3.68 |
dubbo_RandomLoadBalance_doSelect | /**
* Select one invoker between a list using a random criteria
*
* @param invokers List of possible invokers
* @param url URL
* @param invocation Invocation
* @param <T>
* @return The selected invoker
*/
@Override
protected <T> Invoker<T> doSelect(List<Invoker<T>> invokers, URL url, Invocation invocation) {
// Number of invokers
int length = invokers.size();
if (!needWeightLoadBalance(invokers, invocation)) {
return invokers.get(ThreadLocalRandom.current().nextInt(length));
}
// Every invoker has the same weight?
boolean sameWeight = true;
// the maxWeight of every invoker, the minWeight = 0 or the maxWeight of the last invoker
int[] weights = new int[length];
// The sum of weights
int totalWeight = 0;
for (int i = 0; i < length; i++) {
int weight = getWeight(invokers.get(i), invocation);
// Sum
totalWeight += weight;
// save for later use
weights[i] = totalWeight;
if (sameWeight && totalWeight != weight * (i + 1)) {
sameWeight = false;
}
}
if (totalWeight > 0 && !sameWeight) {
// If (not every invoker has the same weight & at least one invoker's weight>0), select randomly based on
// totalWeight.
int offset = ThreadLocalRandom.current().nextInt(totalWeight);
// Return an invoker based on the random value.
if (length <= 4) {
for (int i = 0; i < length; i++) {
if (offset < weights[i]) {
return invokers.get(i);
}
}
} else {
int i = Arrays.binarySearch(weights, offset);
if (i < 0) {
i = -i - 1;
} else {
while (weights[i + 1] == offset) {
i++;
}
i++;
}
return invokers.get(i);
}
}
// If all invokers have the same weight value or totalWeight=0, return evenly.
return invokers.get(ThreadLocalRandom.current().nextInt(length));
} | 3.68 |
hbase_SnapshotFileCache_hasBeenModified | /**
* Check if the snapshot directory has been modified
* @param mtime current modification time of the directory
* @return <tt>true</tt> if it the modification time of the directory is newer time when we
* created <tt>this</tt>
*/
public boolean hasBeenModified(long mtime) {
return this.lastModified < mtime;
} | 3.68 |
hbase_RegionReplicationSink_stop | /**
* Stop the replication sink.
* <p/>
* Usually this should only be called when you want to close a region.
*/
public void stop() {
synchronized (entries) {
stopping = true;
clearAllEntries();
if (!sending) {
stopped = true;
entries.notifyAll();
}
}
} | 3.68 |
hbase_ClientUtils_utf8 | /**
* Helper to translate a byte buffer to UTF8 strings
* @param bb byte buffer
* @return UTF8 decoded string value
*/
public static String utf8(final ByteBuffer bb) {
// performance is not very critical here so we always copy the BB to a byte array
byte[] buf = new byte[bb.remaining()];
// duplicate so the get will not change the position of the original bb
bb.duplicate().get(buf);
return utf8(buf);
} | 3.68 |
rocketmq-connect_AbstractKafkaConnectSink_put | /**
* Put the records to the sink
*
* @param records sink records
*/
@Override
public void put(List<ConnectRecord> records) {
// convert sink data
List<SinkRecord> sinkRecords = new ArrayList<>();
records.forEach(connectRecord -> {
SinkRecord record = this.processSinkRecord(connectRecord);
sinkRecords.add(this.transforms(record));
});
sinkTask.put(sinkRecords);
} | 3.68 |
morf_ResultSetMismatch_getMismatchType | /**
* @return Mismatch type.
*/
public MismatchType getMismatchType() {
return mismatchType;
} | 3.68 |
framework_TableElementContextMenu_initProperties | // set up the properties (columns)
private void initProperties(Table table) {
for (int i = 0; i < COLUMNS; i++) {
table.addContainerProperty("property" + i, String.class,
"some value");
}
} | 3.68 |
framework_Table_getPropertyId | /**
* Returns the property id of context clicked column.
*
* @return property id; or <code>null</code> if we've clicked on the
* empty area of the Table
*/
public Object getPropertyId() {
return propertyId;
} | 3.68 |
hadoop_MembershipStoreImpl_getRepresentativeQuorum | /**
* Picks the most recent entry in the subset that is most agreeable on the
* specified field. 1) If a majority of the collection has the same value for
* the field, the first sorted entry within the subset the matches the
* majority value 2) Otherwise the first sorted entry in the set of all
* entries
*
* @param records - Collection of state store record objects of the same type
* @return record that is most representative of the field name
*/
private MembershipState getRepresentativeQuorum(
Collection<MembershipState> records) {
// Collate objects by field value: field value -> order set of records
Map<FederationNamenodeServiceState, TreeSet<MembershipState>> occurenceMap =
new HashMap<>();
for (MembershipState record : records) {
FederationNamenodeServiceState state = record.getState();
TreeSet<MembershipState> matchingSet = occurenceMap.get(state);
if (matchingSet == null) {
// TreeSet orders elements by descending date via comparators
matchingSet = new TreeSet<>();
occurenceMap.put(state, matchingSet);
}
matchingSet.add(record);
}
// Select largest group
TreeSet<MembershipState> largestSet = new TreeSet<>();
for (TreeSet<MembershipState> matchingSet : occurenceMap.values()) {
if (largestSet.size() < matchingSet.size()) {
largestSet = matchingSet;
}
}
// If quorum, use the newest element here
if (largestSet.size() > records.size() / 2) {
return largestSet.first();
// Otherwise, return most recent by class comparator
} else if (records.size() > 0) {
TreeSet<MembershipState> sortedList = new TreeSet<>(records);
LOG.debug("Quorum failed, using most recent: {}", sortedList.first());
return sortedList.first();
} else {
return null;
}
} | 3.68 |
hbase_ServerCommandLine_logProcessInfo | /**
* Logs information about the currently running JVM process including the environment variables.
* Logging of env vars can be disabled by setting {@code "hbase.envvars.logging.disabled"} to
* {@code "true"}.
* <p>
* If enabled, you can also exclude environment variables containing certain substrings by setting
* {@code "hbase.envvars.logging.skipwords"} to comma separated list of such substrings.
*/
public static void logProcessInfo(Configuration conf) {
logHBaseConfigs(conf);
// log environment variables unless asked not to
if (conf == null || !conf.getBoolean("hbase.envvars.logging.disabled", false)) {
Set<String> skipWords = new HashSet<>(DEFAULT_SKIP_WORDS);
if (conf != null) {
String[] confSkipWords = conf.getStrings("hbase.envvars.logging.skipwords");
if (confSkipWords != null) {
skipWords.addAll(Arrays.asList(confSkipWords));
}
}
nextEnv: for (Entry<String, String> entry : System.getenv().entrySet()) {
String key = entry.getKey().toLowerCase(Locale.ROOT);
String value = entry.getValue().toLowerCase(Locale.ROOT);
// exclude variables which may contain skip words
for (String skipWord : skipWords) {
if (key.contains(skipWord) || value.contains(skipWord)) continue nextEnv;
}
LOG.info("env:" + entry);
}
}
// and JVM info
logJVMInfo();
} | 3.68 |
framework_Potus_setTookOffice | /**
* @param tookOffice
* the tookOffice to set
*/
public void setTookOffice(Date tookOffice) {
this.tookOffice = tookOffice;
} | 3.68 |
querydsl_GeometryExpressions_extent | /**
* Returns the bounding box that bounds rows of geometries.
*
* @param collection geometry collection
* @return bounding box
*/
public static GeometryExpression<?> extent(Expression<? extends GeometryCollection> collection) {
return geometryOperation(SpatialOps.EXTENT, collection);
} | 3.68 |
hmily_XaResourceWrapped_commit0 | /**
* 子类实现. Commit 0.
*
* @param xid the xid
* @param onePhase the one phase
* @throws XAException the xa exception
*/
void commit0(final Xid xid, final boolean onePhase) throws XAException {
} | 3.68 |
framework_LayoutManager_getPaddingHeight | /**
* Gets the padding height (top padding + bottom padding) of the given
* element, provided that it has been measured. These elements are
* guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* A negative number is returned if the element has not been measured. If 0
* is returned, it might indicate that the element is not attached to the
* DOM.
*
* @param element
* the element to get the measured size for
* @return the measured padding height (top padding + bottom padding) of the
* element in pixels.
*/
public int getPaddingHeight(Element element) {
assert needsMeasure(
element) : "Getting measurement for element that is not measured";
return getMeasuredSize(element, nullSize).getPaddingHeight();
} | 3.68 |
hudi_FlinkClientUtil_createMetaClient | /**
* Creates the meta client.
*/
public static HoodieTableMetaClient createMetaClient(String basePath) {
return HoodieTableMetaClient.builder().setBasePath(basePath).setConf(FlinkClientUtil.getHadoopConf()).build();
} | 3.68 |
flink_FlinkConnection_getWarnings | // TODO We currently do not support this, but we can't throw a SQLException here because we want
// to support jdbc tools such as beeline and sqlline.
@Override
public SQLWarning getWarnings() throws SQLException {
return null;
} | 3.68 |
flink_RpcEndpoint_registerResource | /**
* Register the given closeable resource to {@link CloseableRegistry}.
*
* @param closeableResource the given closeable resource
*/
protected void registerResource(Closeable closeableResource) {
try {
resourceRegistry.registerCloseable(closeableResource);
} catch (IOException e) {
throw new RuntimeException(
"Registry closeable resource " + closeableResource + " fail", e);
}
} | 3.68 |
morf_UpdateStatementBuilder_useParallelDml | /**
* Request that this statement is executed with a parallel execution plan for data manipulation language (DML). This request will have no effect unless the database implementation supports it and the feature is enabled.
*
* <p>For statement that will affect a high percentage or rows in the table, a parallel execution plan may reduce the execution time, although the exact effect depends on
* the underlying database, the nature of the data and the nature of the query.</p>
*
* <p>Note that the use of parallel DML comes with restrictions, in particular, a table may not be accessed in the same transaction following a parallel DML execution. Please consult the Oracle manual section <em>Restrictions on Parallel DML</em> to check whether this hint is suitable.</p>
*
* @param degreeOfParallelism Degree of parallelism to be specified in the hint.
* @return this, for method chaining.
*/
public UpdateStatementBuilder useParallelDml(int degreeOfParallelism) {
hints.add(new UseParallelDml(degreeOfParallelism));
return this;
} | 3.68 |
hbase_CompactingMemStore_checkAndAddToActiveSize | /**
* Check whether anything need to be done based on the current active set size. The method is
* invoked upon every addition to the active set. For CompactingMemStore, flush the active set to
* the read-only memory if it's size is above threshold
* @param currActive intended segment to update
* @param cellToAdd cell to be added to the segment
* @param memstoreSizing object to accumulate changed size
* @return true if the cell can be added to the currActive
*/
protected boolean checkAndAddToActiveSize(MutableSegment currActive, Cell cellToAdd,
MemStoreSizing memstoreSizing) {
long cellSize = MutableSegment.getCellLength(cellToAdd);
boolean successAdd = false;
while (true) {
long segmentDataSize = currActive.getDataSize();
if (!inWalReplay && segmentDataSize > inmemoryFlushSize) {
// when replaying edits from WAL there is no need in in-memory flush regardless the size
// otherwise size below flush threshold try to update atomically
break;
}
if (currActive.compareAndSetDataSize(segmentDataSize, segmentDataSize + cellSize)) {
if (memstoreSizing != null) {
memstoreSizing.incMemStoreSize(cellSize, 0, 0, 0);
}
successAdd = true;
break;
}
}
if (!inWalReplay && currActive.getDataSize() > inmemoryFlushSize) {
// size above flush threshold so we flush in memory
this.tryFlushInMemoryAndCompactingAsync(currActive);
}
return successAdd;
} | 3.68 |
dubbo_TTable_getRowCount | /**
* get rows for the current column
*
* @return current column's rows
*/
public int getRowCount() {
return rows.size();
} | 3.68 |
hbase_Scan_getMaxResultSize | /** Returns the maximum result size in bytes. See {@link #setMaxResultSize(long)} */
public long getMaxResultSize() {
return maxResultSize;
} | 3.68 |
hbase_MasterRegistry_getDefaultMasterPort | /**
* Supplies the default master port we should use given the provided configuration.
* @param conf Configuration to parse from.
*/
private static int getDefaultMasterPort(Configuration conf) {
final int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT);
if (port == 0) {
// Master port may be set to 0. We should substitute the default port in that case.
return HConstants.DEFAULT_MASTER_PORT;
}
return port;
} | 3.68 |
hbase_HBaseFsckRepair_fixMultiAssignment | /**
* Fix multiple assignment by doing silent closes on each RS hosting the region and then force ZK
* unassigned node to OFFLINE to trigger assignment by master.
* @param connection HBase connection to the cluster
* @param region Region to undeploy
* @param servers list of Servers to undeploy from
*/
public static void fixMultiAssignment(Connection connection, RegionInfo region,
List<ServerName> servers) throws IOException, KeeperException, InterruptedException {
// Close region on the servers silently
for (ServerName server : servers) {
closeRegionSilentlyAndWait(connection, server, region);
}
// Force ZK node to OFFLINE so master assigns
forceOfflineInZK(connection.getAdmin(), region);
} | 3.68 |
flink_DriverUtils_isNullOrWhitespaceOnly | /**
* Checks if the string is null, empty, or contains only whitespace characters. A whitespace
* character is defined via {@link Character#isWhitespace(char)}.
*
* @param str The string to check
* @return True, if the string is null or blank, false otherwise.
*/
public static boolean isNullOrWhitespaceOnly(String str) {
if (str == null || str.length() == 0) {
return true;
}
final int len = str.length();
for (int i = 0; i < len; i++) {
if (!Character.isWhitespace(str.charAt(i))) {
return false;
}
}
return true;
} | 3.68 |
framework_AbstractDataProvider_addListener | /**
* Registers a new listener with the specified activation method to listen
* events generated by this component. If the activation method does not
* have any arguments the event object will not be passed to it when it's
* called.
*
* @param eventType
* the type of the listened event. Events of this type or its
* subclasses activate the listener.
* @param listener
* the object instance who owns the activation method.
* @param method
* the activation method.
* @return a registration for the listener
*/
protected Registration addListener(Class<?> eventType,
DataProviderListener<T> listener, Method method) {
if (eventRouter == null) {
eventRouter = new EventRouter();
}
return eventRouter.addListener(eventType, listener, method);
} | 3.68 |
hadoop_NodePlan_getTimeStamp | /**
* returns timestamp when this plan was created.
*
* @return long
*/
public long getTimeStamp() {
return timeStamp;
} | 3.68 |
flink_SqlColumnPosSpec_symbol | /**
* Creates a parse-tree node representing an occurrence of this keyword at a particular position
* in the parsed text.
*/
public SqlLiteral symbol(SqlParserPos pos) {
return SqlLiteral.createSymbol(this, pos);
} | 3.68 |
hbase_TableOutputFormat_checkOutputSpecs | /**
* Checks if the output table exists and is enabled.
* @param context The current context.
* @throws IOException When the check fails.
* @throws InterruptedException When the job is aborted.
* @see OutputFormat#checkOutputSpecs(JobContext)
*/
@Override
public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
Configuration hConf = getConf();
if (hConf == null) {
hConf = context.getConfiguration();
}
try (Connection connection = ConnectionFactory.createConnection(hConf);
Admin admin = connection.getAdmin()) {
TableName tableName = TableName.valueOf(hConf.get(OUTPUT_TABLE));
if (!admin.tableExists(tableName)) {
throw new TableNotFoundException(
"Can't write, table does not exist:" + tableName.getNameAsString());
}
if (!admin.isTableEnabled(tableName)) {
throw new TableNotEnabledException(
"Can't write, table is not enabled: " + tableName.getNameAsString());
}
}
} | 3.68 |
dubbo_ThreadlessExecutor_waitAndDrain | /**
* Waits until there is a task, executes the task and all queued tasks (if there're any). The task is either a normal
* response or a timeout response.
*/
public void waitAndDrain(long deadline) throws InterruptedException {
throwIfInterrupted();
Runnable runnable = queue.poll();
if (runnable == null) {
if (waiter.compareAndSet(null, Thread.currentThread())) {
try {
while ((runnable = queue.poll()) == null && waiter.get() == Thread.currentThread()) {
long restTime = deadline - System.nanoTime();
if (restTime <= 0) {
return;
}
LockSupport.parkNanos(this, restTime);
throwIfInterrupted();
}
} finally {
waiter.compareAndSet(Thread.currentThread(), null);
}
}
}
do {
if (runnable != null) {
runnable.run();
}
} while ((runnable = queue.poll()) != null);
} | 3.68 |
hbase_OrderedBytes_decodeFloat64 | /**
* Decode a 64-bit floating point value using the fixed-length encoding.
* @see #encodeFloat64(PositionedByteRange, double, Order)
*/
public static double decodeFloat64(PositionedByteRange src) {
final byte header = src.get();
assert header == FIXED_FLOAT64 || header == DESCENDING.apply(FIXED_FLOAT64);
Order ord = header == FIXED_FLOAT64 ? ASCENDING : DESCENDING;
long val = ord.apply(src.get()) & 0xff;
for (int i = 1; i < 8; i++) {
val = (val << 8) + (ord.apply(src.get()) & 0xff);
}
val ^= (~val >> (Long.SIZE - 1)) | Long.MIN_VALUE;
return Double.longBitsToDouble(val);
} | 3.68 |
pulsar_ManagedLedgerConfig_getPassword | /**
* @return the password
*/
public byte[] getPassword() {
return Arrays.copyOf(password, password.length);
} | 3.68 |
rocketmq-connect_Serdes_Integer | /**
* A serde for nullable {@code Integer} type.
*/
static public Serde<Integer> Integer() {
return new IntegerSerde();
} | 3.68 |
hadoop_S3ARemoteObject_openForRead | /**
* Opens a section of the file for reading.
*
* @param offset Start offset (0 based) of the section to read.
* @param size Size of the section to read.
* @return an {@code InputStream} corresponding to the given section of this file.
*
* @throws IOException if there is an error opening this file section for reading.
* @throws IllegalArgumentException if offset is negative.
* @throws IllegalArgumentException if offset is greater than or equal to file size.
* @throws IllegalArgumentException if size is greater than the remaining bytes.
*/
public ResponseInputStream<GetObjectResponse> openForRead(long offset, int size)
throws IOException {
Validate.checkNotNegative(offset, "offset");
Validate.checkLessOrEqual(offset, "offset", size(), "size()");
Validate.checkLessOrEqual(size, "size", size() - offset, "size() - offset");
streamStatistics.streamOpened();
final GetObjectRequest request = client
.newGetRequestBuilder(s3Attributes.getKey())
.range(S3AUtils.formatRange(offset, offset + size - 1))
.applyMutation(changeTracker::maybeApplyConstraint)
.build();
String operation = String.format(
"%s %s at %d", S3AInputStream.OPERATION_OPEN, uri, offset);
DurationTracker tracker = streamStatistics.initiateGetRequest();
ResponseInputStream<GetObjectResponse> object = null;
try {
object = Invoker.once(operation, uri, () -> client.getObject(request));
} catch (IOException e) {
tracker.failed();
throw e;
} finally {
tracker.close();
}
changeTracker.processResponse(object.response(), operation, offset);
return object;
} | 3.68 |
flink_HiveParserUtils_toImmutableSet | // converts a collection to guava ImmutableSet
private static Object toImmutableSet(Collection collection) {
try {
Class clz = useShadedImmutableSet ? shadedImmutableSetClz : immutableSetClz;
return HiveReflectionUtils.invokeMethod(
clz, null, "copyOf", new Class[] {Collection.class}, new Object[] {collection});
} catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
throw new FlinkHiveException("Failed to create immutable set", e);
}
} | 3.68 |
flink_SingleOutputStreamOperator_setResources | /**
* Sets the resources for this operator, the minimum and preferred resources are the same by
* default.
*
* @param resources The resources for this operator.
* @return The operator with set minimum and preferred resources.
*/
private SingleOutputStreamOperator<T> setResources(ResourceSpec resources) {
transformation.setResources(resources, resources);
return this;
} | 3.68 |
hbase_BulkLoadHFilesTool_validateFamiliesInHFiles | /**
* Checks whether there is any invalid family name in HFiles to be bulk loaded.
*/
private static void validateFamiliesInHFiles(TableDescriptor tableDesc,
Deque<LoadQueueItem> queue, boolean silence) throws IOException {
Set<String> familyNames = Arrays.stream(tableDesc.getColumnFamilies())
.map(ColumnFamilyDescriptor::getNameAsString).collect(Collectors.toSet());
List<String> unmatchedFamilies = queue.stream().map(item -> Bytes.toString(item.getFamily()))
.filter(fn -> !familyNames.contains(fn)).distinct().collect(Collectors.toList());
if (unmatchedFamilies.size() > 0) {
String msg =
"Unmatched family names found: unmatched family names in HFiles to be bulkloaded: "
+ unmatchedFamilies + "; valid family names of table " + tableDesc.getTableName()
+ " are: " + familyNames;
LOG.error(msg);
if (!silence) {
throw new IOException(msg);
}
}
} | 3.68 |
flink_FileSourceSplit_offset | /**
* Returns the start of the file region referenced by this source split. The position is
* inclusive, the value indicates the first byte that is part of the split.
*/
public long offset() {
return offset;
} | 3.68 |
flink_HiveParserBaseSemanticAnalyzer_validateNoHavingReferenceToAlias | // We support having referring alias just as in hive's semantic analyzer. This check only prints
// a warning now.
public static void validateNoHavingReferenceToAlias(
HiveParserQB qb,
HiveParserASTNode havingExpr,
HiveParserRowResolver inputRR,
HiveParserSemanticAnalyzer semanticAnalyzer)
throws SemanticException {
HiveParserQBParseInfo qbPI = qb.getParseInfo();
Map<HiveParserASTNode, String> exprToAlias = qbPI.getAllExprToColumnAlias();
for (Map.Entry<HiveParserASTNode, String> exprAndAlias : exprToAlias.entrySet()) {
final HiveParserASTNode expr = exprAndAlias.getKey();
final String alias = exprAndAlias.getValue();
// put the alias in input RR so that we can generate ExprNodeDesc with it
if (inputRR.getExpression(expr) != null) {
inputRR.put("", alias, inputRR.getExpression(expr));
}
final Set<Object> aliasReferences = new HashSet<>();
TreeVisitorAction action =
new TreeVisitorAction() {
@Override
public Object pre(Object t) {
if (HiveASTParseDriver.ADAPTOR.getType(t)
== HiveASTParser.TOK_TABLE_OR_COL) {
Object c = HiveASTParseDriver.ADAPTOR.getChild(t, 0);
if (c != null
&& HiveASTParseDriver.ADAPTOR.getType(c)
== HiveASTParser.Identifier
&& HiveASTParseDriver.ADAPTOR.getText(c).equals(alias)) {
aliasReferences.add(t);
}
}
return t;
}
@Override
public Object post(Object t) {
return t;
}
};
new TreeVisitor(HiveASTParseDriver.ADAPTOR).visit(havingExpr, action);
if (aliasReferences.size() > 0) {
String havingClause =
semanticAnalyzer
.ctx
.getTokenRewriteStream()
.toString(
havingExpr.getTokenStartIndex(),
havingExpr.getTokenStopIndex());
String msg =
String.format(
"Encountered Select alias '%s' in having clause '%s'"
+ " This is non standard behavior.",
alias, havingClause);
LOG.warn(msg);
}
}
} | 3.68 |
hbase_RequestConverter_buildOfflineRegionRequest | /**
* Creates a protocol buffer OfflineRegionRequest
* @return an OfflineRegionRequest
*/
public static OfflineRegionRequest buildOfflineRegionRequest(final byte[] regionName) {
OfflineRegionRequest.Builder builder = OfflineRegionRequest.newBuilder();
builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName));
return builder.build();
} | 3.68 |
shardingsphere-elasticjob_ShardingService_setReshardingFlag | /**
* Set resharding flag.
*/
public void setReshardingFlag() {
if (!leaderService.isLeaderUntilBlock()) {
return;
}
jobNodeStorage.createJobNodeIfNeeded(ShardingNode.NECESSARY);
} | 3.68 |
hadoop_GangliaConf_setUnits | /**
* @param units the units to set
*/
void setUnits(String units) {
this.units = units;
} | 3.68 |
hadoop_ContainerSimulator_createFromTaskContainerDefinition | /**
* Invoked when AM schedules containers to allocate.
* @param def The task's definition object.
* @return ContainerSimulator object
*/
public static ContainerSimulator createFromTaskContainerDefinition(
TaskContainerDefinition def) {
return new ContainerSimulator(def.getResource(), def.getDuration(),
def.getHostname(), def.getPriority(), def.getType(),
def.getExecutionType(), def.getAllocationId(), def.getRequestDelay());
} | 3.68 |
morf_SqlScriptExecutor_beforeExecute | /**
* @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.SqlScriptVisitor#beforeExecute(java.lang.String)
*/
@Override
public void beforeExecute(String sql) {
// Defaults to no-op
} | 3.68 |
hmily_LogUtil_debug | /**
* Debug.
*
* @param logger the logger
* @param supplier the supplier
*/
public static void debug(final Logger logger, final Supplier<Object> supplier) {
if (logger.isDebugEnabled()) {
logger.debug(Objects.toString(supplier.get()));
}
} | 3.68 |
framework_Table_setDragMode | /**
* Sets the drag start mode of the Table. Drag start mode controls how Table
* behaves as a drag source.
*
* @param newDragMode
*/
public void setDragMode(TableDragMode newDragMode) {
dragMode = newDragMode;
markAsDirty();
} | 3.68 |
pulsar_ProducerConfiguration_setProducerName | /**
* Specify a name for the producer
* <p>
* If not assigned, the system will generate a globally unique name which can be access with
* {@link Producer#getProducerName()}.
* <p>
* When specifying a name, it is app to the user to ensure that, for a given topic, the producer name is unique
* across all Pulsar's clusters.
* <p>
* If a producer with the same name is already connected to a particular topic, the
* {@link PulsarClient#createProducer(String)} operation will fail with {@link ProducerBusyException}.
*
* @param producerName
* the custom name to use for the producer
* @since 1.20.0
*/
public void setProducerName(String producerName) {
conf.setProducerName(producerName);
} | 3.68 |
flink_EmbeddedRocksDBStateBackend_setDbStoragePaths | /**
* Sets the directories in which the local RocksDB database puts its files (like SST and
* metadata files). These directories do not need to be persistent, they can be ephemeral,
* meaning that they are lost on a machine failure, because state in RocksDB is persisted in
* checkpoints.
*
* <p>If nothing is configured, these directories default to the TaskManager's local temporary
* file directories.
*
* <p>Each distinct state will be stored in one path, but when the state backend creates
* multiple states, they will store their files on different paths.
*
* <p>Passing {@code null} to this function restores the default behavior, where the configured
* temp directories will be used.
*
* @param paths The paths across which the local RocksDB database files will be spread.
*/
public void setDbStoragePaths(String... paths) {
if (paths == null) {
localRocksDbDirectories = null;
} else if (paths.length == 0) {
throw new IllegalArgumentException("empty paths");
} else {
File[] pp = new File[paths.length];
for (int i = 0; i < paths.length; i++) {
final String rawPath = paths[i];
final String path;
if (rawPath == null) {
throw new IllegalArgumentException("null path");
} else {
// we need this for backwards compatibility, to allow URIs like 'file:///'...
URI uri = null;
try {
uri = new Path(rawPath).toUri();
} catch (Exception e) {
// cannot parse as a path
}
if (uri != null && uri.getScheme() != null) {
if ("file".equalsIgnoreCase(uri.getScheme())) {
path = uri.getPath();
} else {
throw new IllegalArgumentException(
"Path " + rawPath + " has a non-local scheme");
}
} else {
path = rawPath;
}
}
pp[i] = new File(path);
if (!pp[i].isAbsolute()) {
throw new IllegalArgumentException("Relative paths are not supported");
}
}
localRocksDbDirectories = pp;
}
} | 3.68 |
framework_VButton_isTargetInsideButton | /**
* Check if the event occurred over an element which is part of this button
*/
private boolean isTargetInsideButton(Event event) {
Element to = event.getRelatedTarget();
return getElement().isOrHasChild(DOM.eventGetTarget(event))
&& (to == null || !getElement().isOrHasChild(to));
} | 3.68 |
morf_DeleteStatement_drive | /**
* @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser)
*/
@Override
public void drive(ObjectTreeTraverser traverser) {
traverser
.dispatch(getTable())
.dispatch(getWhereCriterion())
.dispatch(getLimit());
} | 3.68 |
hadoop_RollingWindow_getSum | /**
* Get value represented by this window at the specified time
* <p>
*
* If time lags behind the latest update time, the new updates are still
* included in the sum
*
* @param time
* @return number of events occurred in the past period
*/
public long getSum(long time) {
long sum = 0;
for (Bucket bucket : buckets) {
boolean stale = bucket.isStaleNow(time);
if (!stale) {
sum += bucket.value.get();
}
if (LOG.isDebugEnabled()) {
long bucketTime = bucket.updateTime.get();
String timeStr = new Date(bucketTime).toString();
LOG.debug("Sum: + " + sum + " Bucket: updateTime: " + timeStr + " ("
+ bucketTime + ") isStale " + stale + " at " + time);
}
}
return sum;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.