name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_AsyncTable_getAll | /**
* A simple version for batch get. It will fail if there are any failures and you will get the
* whole result list at once if the operation is succeeded.
* @param gets The objects that specify what data to fetch and from which rows.
* @return A {@link CompletableFuture} that wrapper the result list.
*/
default CompletableFuture<List<Result>> getAll(List<Get> gets) {
return allOf(get(gets));
} | 3.68 |
hadoop_FederationStateStoreFacade_addApplicationHomeSubCluster | /**
* Add ApplicationHomeSubCluster to FederationStateStore.
*
* @param applicationId applicationId.
* @param homeSubCluster homeSubCluster, homeSubCluster selected according to policy.
* @throws YarnException yarn exception.
*/
public void addApplicationHomeSubCluster(ApplicationId applicationId,
ApplicationHomeSubCluster homeSubCluster) throws YarnException {
try {
addApplicationHomeSubCluster(homeSubCluster);
} catch (YarnException e) {
String msg = String.format(
"Unable to insert the ApplicationId %s into the FederationStateStore.", applicationId);
throw new YarnException(msg, e);
}
} | 3.68 |
flink_ChannelStateWriteRequestExecutorFactory_getOrCreateExecutor | /**
* @param startExecutor It is for test to prevent create too many threads when some unit tests
* create executor frequently.
*/
ChannelStateWriteRequestExecutor getOrCreateExecutor(
JobVertexID jobVertexID,
int subtaskIndex,
CheckpointStorage checkpointStorage,
int maxSubtasksPerChannelStateFile,
boolean startExecutor) {
synchronized (lock) {
if (executor == null) {
executor =
new ChannelStateWriteRequestExecutorImpl(
new ChannelStateWriteRequestDispatcherImpl(
checkpointStorage, jobID, new ChannelStateSerializerImpl()),
maxSubtasksPerChannelStateFile,
executor -> {
assert Thread.holdsLock(lock);
checkState(this.executor == executor);
this.executor = null;
},
lock);
if (startExecutor) {
executor.start();
}
}
ChannelStateWriteRequestExecutor currentExecutor = executor;
currentExecutor.registerSubtask(jobVertexID, subtaskIndex);
return currentExecutor;
}
} | 3.68 |
framework_DateField_setResolution | /**
* Sets the resolution of the DateField.
*
* The default resolution is {@link Resolution#DAY} since Vaadin 7.0.
*
* @param resolution
* the resolution to set.
*/
public void setResolution(Resolution resolution) {
this.resolution = resolution;
updateRangeValidator();
markAsDirty();
} | 3.68 |
flink_JobGraph_addJar | /**
* Adds the path of a JAR file required to run the job on a task manager.
*
* @param jar path of the JAR file required to run the job on a task manager
*/
public void addJar(Path jar) {
if (jar == null) {
throw new IllegalArgumentException();
}
if (!userJars.contains(jar)) {
userJars.add(jar);
}
} | 3.68 |
flink_KeyedStateCheckpointOutputStream_isKeyGroupAlreadyStarted | /**
* Returns true, if the key group with the given id was already started. The key group might not
* yet be finished, if it's id is equal to the return value of {@link #getCurrentKeyGroup()}.
*/
public boolean isKeyGroupAlreadyStarted(int keyGroupId) {
return NO_OFFSET_SET != keyGroupRangeOffsets.getKeyGroupOffset(keyGroupId);
} | 3.68 |
open-banking-gateway_Xs2aConsentInfo_isDecoupledScaFinalizedByPSU | /**
* Is decoupled SCA was finalised by PSU with mobile or other type of device
*/
public boolean isDecoupledScaFinalizedByPSU(Xs2aContext ctx) {
return ctx.isDecoupledScaFinished();
} | 3.68 |
flink_DateTimeUtils_toSQLDate | /**
* Converts the internal representation of a SQL DATE (int) to the Java type used for UDF
* parameters ({@link java.sql.Date}).
*/
public static java.sql.Date toSQLDate(int v) {
// note that, in this case, can't handle Daylight Saving Time
final long t = v * MILLIS_PER_DAY;
return new java.sql.Date(t - LOCAL_TZ.getOffset(t));
} | 3.68 |
hbase_ClassSize_sizeOf | /**
* Calculate the memory consumption (in byte) of a byte array, including the array header and the
* whole backing byte array. If the whole byte array is occupied (not shared with other objects),
* please use this function. If not, please use {@link #sizeOfByteArray(int)} instead.
* @param b the byte array
* @return the memory consumption (in byte) of the whole byte array
*/
public static long sizeOf(byte[] b) {
return memoryLayout.sizeOfByteArray(b.length);
} | 3.68 |
flink_SplitEnumeratorContext_sendEventToSourceReader | /**
* Send a source event to a source reader. The source reader is identified by its subtask id and
* attempt number. It is similar to {@link #sendEventToSourceReader(int, SourceEvent)} but it is
* aware of the subtask execution attempt to send this event to.
*
* <p>The {@link SplitEnumerator} must invoke this method instead of {@link
* #sendEventToSourceReader(int, SourceEvent)} if it is used in cases that a subtask can have
* multiple concurrent execution attempts, e.g. if speculative execution is enabled. Otherwise
* an error will be thrown when the split enumerator tries to send a custom source event.
*
* @param subtaskId the subtask id of the source reader to send this event to.
* @param attemptNumber the attempt number of the source reader to send this event to.
* @param event the source event to send.
*/
default void sendEventToSourceReader(int subtaskId, int attemptNumber, SourceEvent event) {
throw new UnsupportedOperationException();
} | 3.68 |
hbase_TableDescriptorBuilder_setCompactionEnabled | /**
* Setting the table compaction enable flag.
* @param isEnable True if enable compaction.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) {
return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable));
} | 3.68 |
framework_ObjectProperty_getType | /**
* Returns the type of the ObjectProperty. The methods <code>getValue</code>
* and <code>setValue</code> must be compatible with this type: one must be
* able to safely cast the value returned from <code>getValue</code> to the
* given type and pass any variable assignable to this type as an argument
* to <code>setValue</code>.
*
* @return type of the Property
*/
@Override
public final Class<T> getType() {
return type;
} | 3.68 |
hudi_HoodieOperation_isDelete | /**
* Returns whether the operation is DELETE.
*/
public static boolean isDelete(HoodieOperation operation) {
return operation == DELETE;
} | 3.68 |
hadoop_ServiceLauncher_getService | /**
* Get the service.
*
* Null until
* {@link #coreServiceLaunch(Configuration, Service, List, boolean, boolean)}
* has completed.
* @return the service
*/
public final S getService() {
return service;
} | 3.68 |
morf_AbstractSqlDialectTest_shouldGenerateCorrectSqlForMathOperations12 | /**
* Test for proper SQL mathematics operation generation from DSL expressions
* that use brackets.
*/
@Test
public void shouldGenerateCorrectSqlForMathOperations12() {
String result = testDialect.getSqlFrom(bracket(field("a").plus(field("b"))).divideBy(field("c")));
assertEquals(expectedSqlForMathOperations12(), result);
} | 3.68 |
hbase_CellCodecWithTags_write | /**
* Write int length followed by array bytes.
*/
private void write(final byte[] bytes, final int offset, final int length) throws IOException {
this.out.write(Bytes.toBytes(length));
this.out.write(bytes, offset, length);
} | 3.68 |
framework_Page_addUriFragmentChangedListener | /**
* Adds a listener that gets notified every time the URI fragment of this
* page is changed. Please note that the initial URI fragment has already
* been set when a new UI is initialized, so there will not be any initial
* event for listeners added during {@link UI#init(VaadinRequest)}.
*
* @see #getUriFragment()
* @see #setUriFragment(String)
* @see Registration
*
* @param listener
* the URI fragment listener to add
* @return a registration object for removing the listener
* @deprecated Use {@link Page#addPopStateListener(PopStateListener)}
* instead
* @since 8.0
*/
@Deprecated
public Registration addUriFragmentChangedListener(
Page.UriFragmentChangedListener listener) {
return addListener(UriFragmentChangedEvent.class, listener,
URI_FRAGMENT_CHANGED_METHOD);
} | 3.68 |
flink_CliClient_executeFile | /**
* Execute content from Sql file and prints status information and/or errors on the terminal.
*
* @param content SQL file content
*/
private boolean executeFile(String content, OutputStream outputStream, ExecutionMode mode) {
terminal.writer().println(CliStrings.messageInfo(CliStrings.MESSAGE_EXECUTE_FILE).toAnsi());
// append line delimiter
try (InputStream inputStream =
new ByteArrayInputStream(
SqlMultiLineParser.formatSqlFile(content).getBytes());
Terminal dumbTerminal =
TerminalUtils.createDumbTerminal(inputStream, outputStream)) {
LineReader lineReader = createLineReader(dumbTerminal, mode);
return getAndExecuteStatements(lineReader, true);
} catch (Throwable e) {
printExecutionException(e);
return false;
}
} | 3.68 |
hbase_FuzzyRowFilter_parseFrom | /**
* Parse a serialized representation of {@link FuzzyRowFilter}
* @param pbBytes A pb serialized {@link FuzzyRowFilter} instance
* @return An instance of {@link FuzzyRowFilter} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static FuzzyRowFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.FuzzyRowFilter proto;
try {
proto = FilterProtos.FuzzyRowFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
int count = proto.getFuzzyKeysDataCount();
ArrayList<Pair<byte[], byte[]>> fuzzyKeysData = new ArrayList<>(count);
for (int i = 0; i < count; ++i) {
BytesBytesPair current = proto.getFuzzyKeysData(i);
byte[] keyBytes = current.getFirst().toByteArray();
byte[] keyMeta = current.getSecond().toByteArray();
fuzzyKeysData.add(new Pair<>(keyBytes, keyMeta));
}
return new FuzzyRowFilter(fuzzyKeysData);
} | 3.68 |
querydsl_MetaDataExporter_setBeanPrefix | /**
* Override the bean prefix for the classes (default: "")
*
* @param beanPrefix bean prefix for bean-types (default: "")
*/
public void setBeanPrefix(String beanPrefix) {
module.bind(SQLCodegenModule.BEAN_PREFIX, beanPrefix);
} | 3.68 |
pulsar_MessageId_fromByteArrayWithTopic | /**
* De-serialize a message id from a byte array with its topic
* information attached.
*
* <p>The topic information is needed when acknowledging a {@link MessageId} on
* a consumer that is consuming from multiple topics.
*
* @param data the byte array with the serialized message id
* @param topicName the topic name
* @return a {@link MessageId instance}
* @throws IOException if the de-serialization fails
*/
static MessageId fromByteArrayWithTopic(byte[] data, String topicName) throws IOException {
return DefaultImplementation.getDefaultImplementation().newMessageIdFromByteArrayWithTopic(data, topicName);
} | 3.68 |
hbase_Bytes_unsignedCopyAndIncrement | /**
* Treat the byte[] as an unsigned series of bytes, most significant bits first. Start by adding 1
* to the rightmost bit/byte and carry over all overflows to the more significant bits/bytes.
* @param input The byte[] to increment.
* @return The incremented copy of "in". May be same length or 1 byte longer.
*/
public static byte[] unsignedCopyAndIncrement(final byte[] input) {
byte[] copy = copy(input);
if (copy == null) {
throw new IllegalArgumentException("cannot increment null array");
}
for (int i = copy.length - 1; i >= 0; --i) {
if (copy[i] == -1) {// -1 is all 1-bits, which is the unsigned maximum
copy[i] = 0;
} else {
++copy[i];
return copy;
}
}
// we maxed out the array
byte[] out = new byte[copy.length + 1];
out[0] = 1;
System.arraycopy(copy, 0, out, 1, copy.length);
return out;
} | 3.68 |
flink_FlinkJoinToMultiJoinRule_addOnJoinFieldRefCounts | /**
* Adds on to the existing join condition reference counts the references from the new join
* condition.
*
* @param multiJoinInputs inputs into the new MultiJoin
* @param nTotalFields total number of fields in the MultiJoin
* @param joinCondition the new join condition
* @param origJoinFieldRefCounts existing join condition reference counts
* @return Map containing the new join condition
*/
private com.google.common.collect.ImmutableMap<Integer, ImmutableIntList>
addOnJoinFieldRefCounts(
List<RelNode> multiJoinInputs,
int nTotalFields,
RexNode joinCondition,
List<int[]> origJoinFieldRefCounts) {
// count the input references in the join condition
int[] joinCondRefCounts = new int[nTotalFields];
joinCondition.accept(new InputReferenceCounter(joinCondRefCounts));
// first, make a copy of the ref counters
final Map<Integer, int[]> refCountsMap = new HashMap<>();
int nInputs = multiJoinInputs.size();
int currInput = 0;
for (int[] origRefCounts : origJoinFieldRefCounts) {
refCountsMap.put(currInput, origRefCounts.clone());
currInput++;
}
// add on to the counts for each input into the MultiJoin the
// reference counts computed for the current join condition
currInput = -1;
int startField = 0;
int nFields = 0;
for (int i = 0; i < nTotalFields; i++) {
if (joinCondRefCounts[i] == 0) {
continue;
}
while (i >= (startField + nFields)) {
startField += nFields;
currInput++;
assert currInput < nInputs;
nFields = multiJoinInputs.get(currInput).getRowType().getFieldCount();
}
int[] refCounts = refCountsMap.get(currInput);
refCounts[i - startField] += joinCondRefCounts[i];
}
final com.google.common.collect.ImmutableMap.Builder<Integer, ImmutableIntList> builder =
com.google.common.collect.ImmutableMap.builder();
for (Map.Entry<Integer, int[]> entry : refCountsMap.entrySet()) {
builder.put(entry.getKey(), ImmutableIntList.of(entry.getValue()));
}
return builder.build();
} | 3.68 |
hadoop_SetupTaskStage_executeStage | /**
* Set up a task.
* @param name task name (for logging)
* @return task attempt directory
* @throws IOException IO failure.
*/
@Override
protected Path executeStage(final String name) throws IOException {
return createNewDirectory("Task setup " + name,
requireNonNull(getTaskAttemptDir(), "No task attempt directory"));
} | 3.68 |
hbase_HFileLink_getHFileFromBackReference | /**
* Get the full path of the HFile referenced by the back reference
* @param conf {@link Configuration} to read for the archive directory name
* @param linkRefPath Link Back Reference path
* @return full path of the referenced hfile
* @throws IOException on unexpected error.
*/
public static Path getHFileFromBackReference(final Configuration conf, final Path linkRefPath)
throws IOException {
return getHFileFromBackReference(CommonFSUtils.getRootDir(conf), linkRefPath);
} | 3.68 |
framework_Upload_getButtonStyleName | /**
* Returns the style name rendered into button that fires uploading.
*
* @return Style name to be rendered into button that fires uploading
* @since 8.2
*/
public String getButtonStyleName() {
return getState(false).buttonStyleName;
} | 3.68 |
framework_MenuBar_removeItem | /**
* Removes the specified menu item from the bar.
*
* @param item
* the item to be removed
*/
public void removeItem(MenuItem item) {
final int idx = items.indexOf(item);
if (idx == -1) {
return;
}
final Element container = getItemContainerElement();
DOM.removeChild(container, DOM.getChild(container, idx));
items.remove(idx);
} | 3.68 |
hbase_RegionMover_designatedFile | /**
* Set the designated file. Designated file contains hostnames where region moves. Designated
* file should have 'host:port' per line. Port is mandatory here as we can have many RS running
* on a single host.
* @param designatedFile The designated file
* @return RegionMoverBuilder object
*/
public RegionMoverBuilder designatedFile(String designatedFile) {
this.designatedFile = designatedFile;
return this;
} | 3.68 |
druid_IPAddress_isClassB | /**
* Check if the IP address is belongs to a Class B IP address.
*
* @return Return <code>true</code> if the encapsulated IP address belongs to a class B IP address, otherwise
* returne <code>false</code>.
*/
public final boolean isClassB() {
return (ipAddress & 0x00000003) == 1;
} | 3.68 |
flink_Executing_forceRescale | /** Force rescaling as long as the target parallelism is different from the current one. */
private void forceRescale() {
if (context.shouldRescale(getExecutionGraph(), true)) {
getLogger()
.info(
"Added resources are still there after {} time({}), force a rescale.",
JobManagerOptions.SCHEDULER_SCALING_INTERVAL_MAX.key(),
scalingIntervalMax);
context.goToRestarting(
getExecutionGraph(),
getExecutionGraphHandler(),
getOperatorCoordinatorHandler(),
Duration.ofMillis(0L),
getFailures());
}
} | 3.68 |
hudi_GenericRecordFullPayloadGenerator_updateTimestamp | /**
* Generates a sequential timestamp (daily increment), and updates the timestamp field of the record.
* Note: When generating records, number of records to be generated must be more than numDatePartitions * parallelism,
* to guarantee that at least numDatePartitions are created.
*
* @VisibleForTesting
*/
public GenericRecord updateTimestamp(GenericRecord record, String fieldName) {
long delta = TimeUnit.SECONDS.convert((partitionIndex++ % numDatePartitions) + startPartition, TimeUnit.DAYS);
record.put(fieldName, delta);
return record;
} | 3.68 |
hudi_TableSchemaResolver_getTableHistorySchemaStrFromCommitMetadata | /**
* Gets the history schemas as String for a hoodie table from the HoodieCommitMetadata of the instant.
*
* @return history schemas string for this table
*/
public Option<String> getTableHistorySchemaStrFromCommitMetadata() {
// now we only support FileBaseInternalSchemaManager
FileBasedInternalSchemaStorageManager manager = new FileBasedInternalSchemaStorageManager(metaClient);
String result = manager.getHistorySchemaStr();
return result.isEmpty() ? Option.empty() : Option.of(result);
} | 3.68 |
rocketmq-connect_WorkerSourceTask_maybeCreateAndGetTopic | /**
* maybe create and get topic
*
* @param record
* @return
*/
private String maybeCreateAndGetTopic(ConnectRecord record) {
String topic = overwriteTopicFromRecord(record);
if (StringUtils.isBlank(topic)) {
// topic from config
topic = taskConfig.getString(SourceConnectorConfig.CONNECT_TOPICNAME);
}
if (StringUtils.isBlank(topic)) {
throw new ConnectException("source connect lack of topic config");
}
if (!workerConfig.isAutoCreateTopicEnable() || topicCache.contains(topic)) {
return topic;
}
if (!ConnectUtil.isTopicExist(workerConfig, topic)) {
ConnectUtil.createTopic(workerConfig, new TopicConfig(topic));
}
topicCache.add(topic);
return topic;
} | 3.68 |
streampipes_StreamPipesClient_sinks | /**
* Get API to work with data sinks
*
* @return {@link org.apache.streampipes.client.api.DataSinkApi}
*/
@Override
public DataSinkApi sinks() {
return new DataSinkApi(config);
} | 3.68 |
graphhopper_AbstractNonCHBidirAlgo_fillEdgesToUsingFilter | /**
* @see #fillEdgesFromUsingFilter(EdgeFilter)
*/
protected void fillEdgesToUsingFilter(EdgeFilter edgeFilter) {
additionalEdgeFilter = edgeFilter;
finishedTo = !fillEdgesTo();
additionalEdgeFilter = null;
} | 3.68 |
framework_Upload_setButtonCaption | /**
* In addition to the actual file chooser, upload components have button
* that starts actual upload progress. This method is used to set text in
* that button.
* <p>
* In case the button text is set to null, the button is hidden. In this
* case developer must explicitly initiate the upload process with
* {@link #submitUpload()}.
* <p>
* In case the Upload is used in immediate mode using
* {@link #setImmediateMode(boolean)}, the file chooser (HTML input with
* type "file") is hidden and only the button with this text is shown.
* <p>
*
* <p>
* <strong>Note</strong> the string given is set as is to the button. HTML
* formatting is not stripped. Be sure to properly validate your value
* according to your needs.
*
* @param buttonCaption
* text for upload components button.
*/
public void setButtonCaption(String buttonCaption) {
getState().buttonCaption = buttonCaption;
} | 3.68 |
hadoop_BinaryRecordInput_get | /**
* Get a thread-local record input for the supplied DataInput.
* @param inp data input stream
* @return binary record input corresponding to the supplied DataInput.
*/
public static BinaryRecordInput get(DataInput inp) {
BinaryRecordInput bin = B_IN.get();
bin.setDataInput(inp);
return bin;
} | 3.68 |
pulsar_AuthenticationFactory_token | /**
* Create an authentication provider for token based authentication.
*
* @param tokenSupplier
* a supplier of the client auth token
* @return the Authentication object initialized with the token credentials
*/
public static Authentication token(Supplier<String> tokenSupplier) {
return DefaultImplementation.getDefaultImplementation().newAuthenticationToken(tokenSupplier);
} | 3.68 |
querydsl_MetaDataExporter_setBeansTargetFolder | /**
* Set the target folder for beans
*
* <p>defaults to the targetFolder value</p>
*
* @param targetFolder target source folder to create the bean sources into
*/
public void setBeansTargetFolder(File targetFolder) {
this.beansTargetFolder = targetFolder;
} | 3.68 |
querydsl_AbstractJPAQuery_fetchResults | /**
* {@inheritDoc}
*
* @deprecated {@code fetchResults} requires a count query to be computed. In {@code querydsl-sql}, this is done
* by wrapping the query in a subquery, like so: {@code SELECT COUNT(*) FROM (<original query>)}. Unfortunately,
* JPQL - the query language of JPA - does not allow queries to project from subqueries. As a result there isn't a
* universal way to express count queries in JPQL. Historically QueryDSL attempts at producing a modified query
* to compute the number of results instead.
*
* However, this approach only works for simple queries. Specifically
* queries with multiple group by clauses and queries with a having clause turn out to be problematic. This is because
* {@code COUNT(DISTINCT a, b, c)}, while valid SQL in most dialects, is not valid JPQL. Furthermore, a having
* clause may refer select elements or aggregate functions and therefore cannot be emulated by moving the predicate
* to the where clause instead.
*
* In order to support {@code fetchResults} for queries with multiple group by elements or a having clause, we
* generate the count in memory instead. This means that the method simply falls back to returning the size of
* {@link #fetch()}. For large result sets this may come at a severe performance penalty.
*
* For very specific domain models where {@link #fetchResults()} has to be used in conjunction with complex queries
* containing multiple group by elements and/or a having clause, we recommend using the
* <a href="https://persistence.blazebit.com/documentation/1.5/core/manual/en_US/index.html#querydsl-integration">Blaze-Persistence</a>
* integration for QueryDSL. Among other advanced query features, Blaze-Persistence makes it possible to select
* from subqueries in JPQL. As a result the {@code BlazeJPAQuery} provided with the integration, implements
* {@code fetchResults} properly and always executes a proper count query.
*
* Mind that for any scenario where the count is not strictly needed separately, we recommend to use {@link #fetch()}
* instead.
*/
@Override
@Deprecated
public QueryResults<T> fetchResults() {
try {
QueryModifiers modifiers = getMetadata().getModifiers();
if (getMetadata().getGroupBy().size() > 1 || getMetadata().getHaving() != null) {
logger.warning("Fetchable#fetchResults() was computed in memory! See the Javadoc for AbstractJPAQuery#fetchResults for more details.");
Query query = createQuery(null, false);
@SuppressWarnings("unchecked")
List<T> resultList = query.getResultList();
int offset = modifiers.getOffsetAsInteger() == null ? 0 : modifiers.getOffsetAsInteger();
int limit = modifiers.getLimitAsInteger() == null ? resultList.size() : modifiers.getLimitAsInteger();
return new QueryResults<T>(resultList.subList(offset, Math.min(resultList.size(), offset + limit)), modifiers, resultList.size());
}
Query countQuery = createQuery(null, true);
long total = (Long) countQuery.getSingleResult();
if (total > 0) {
Query query = createQuery(modifiers, false);
@SuppressWarnings("unchecked")
List<T> list = (List<T>) getResultList(query);
return new QueryResults<T>(list, modifiers, total);
} else {
return QueryResults.emptyResults();
}
} finally {
reset();
}
} | 3.68 |
hbase_Table_getRpcTimeout | /**
* Get timeout of each rpc request in this Table instance. It will be overridden by a more
* specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
* @see #getReadRpcTimeout(TimeUnit)
* @see #getWriteRpcTimeout(TimeUnit)
* @param unit the unit of time the timeout to be represented in
* @return rpc timeout in the specified time unit
*/
default long getRpcTimeout(TimeUnit unit) {
throw new NotImplementedException("Add an implementation!");
} | 3.68 |
flink_SinkTestSuiteBase_getSinkMetricFilter | /**
* Return the filter used to filter the sink metric.
*
* <ul>
* <li>Sink v1: return null.
* <li>Sink v2: return the "Writer" prefix in the `SinkTransformationTranslator`.
* </ul>
*/
private String getSinkMetricFilter(DataStreamSinkExternalContext<T> context) {
if (context instanceof DataStreamSinkV1ExternalContext) {
return null;
} else if (context instanceof DataStreamSinkV2ExternalContext) {
// See class `SinkTransformationTranslator`
return "Writer";
} else {
throw new IllegalArgumentException(
String.format("Get unexpected sink context: %s", context.getClass()));
}
} | 3.68 |
hbase_AdaptiveLifoCoDelCallQueue_needToDrop | /**
* @param callRunner to validate
* @return true if this call needs to be skipped based on call timestamp and internal queue state
* (deemed overloaded).
*/
private boolean needToDrop(CallRunner callRunner) {
long now = EnvironmentEdgeManager.currentTime();
long callDelay = now - callRunner.getRpcCall().getReceiveTime();
long localMinDelay = this.minDelay;
// Try and determine if we should reset
// the delay time and determine overload
if (now > intervalTime && !resetDelay.get() && !resetDelay.getAndSet(true)) {
intervalTime = now + codelInterval;
isOverloaded.set(localMinDelay > codelTargetDelay);
}
// If it looks like we should reset the delay
// time do it only once on one thread
if (resetDelay.get() && resetDelay.getAndSet(false)) {
minDelay = callDelay;
// we just reset the delay dunno about how this will work
return false;
} else if (callDelay < localMinDelay) {
minDelay = callDelay;
}
return isOverloaded.get() && callDelay > 2 * codelTargetDelay;
} | 3.68 |
flink_HsMemoryDataManager_handleDecision | // Attention: Do not call this method within the read lock and subpartition lock, otherwise
// deadlock may occur as this method maybe acquire write lock and other subpartition's lock
// inside.
private void handleDecision(
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
Optional<Decision> decisionOpt) {
Decision decision =
decisionOpt.orElseGet(
() -> callWithLock(() -> spillStrategy.decideActionWithGlobalInfo(this)));
if (!decision.getBufferToSpill().isEmpty()) {
spillBuffers(decision.getBufferToSpill());
}
if (!decision.getBufferToRelease().isEmpty()) {
releaseBuffers(decision.getBufferToRelease());
}
} | 3.68 |
hadoop_TimelineEntity_setPrimaryFilters | /**
* Set the primary filter map to the given map of primary filters
*
* @param primaryFilters
* a map of primary filters
*/
public void setPrimaryFilters(Map<String, Set<Object>> primaryFilters) {
this.primaryFilters =
TimelineServiceHelper.mapCastToHashMap(primaryFilters);
} | 3.68 |
hbase_RegexStringComparator_toByteArray | /** Returns The comparator serialized using pb */
@Override
public byte[] toByteArray() {
return engine.toByteArray();
} | 3.68 |
hudi_HoodieTable_clearMetadataTablePartitionsConfig | /**
* Clears hoodie.table.metadata.partitions in hoodie.properties
*/
private void clearMetadataTablePartitionsConfig(Option<MetadataPartitionType> partitionType, boolean clearAll) {
Set<String> partitions = metaClient.getTableConfig().getMetadataPartitions();
if (clearAll && partitions.size() > 0) {
LOG.info("Clear hoodie.table.metadata.partitions in hoodie.properties");
metaClient.getTableConfig().setValue(TABLE_METADATA_PARTITIONS.key(), EMPTY_STRING);
HoodieTableConfig.update(metaClient.getFs(), new Path(metaClient.getMetaPath()), metaClient.getTableConfig().getProps());
} else if (partitionType.isPresent() && partitions.remove(partitionType.get().getPartitionPath())) {
metaClient.getTableConfig().setValue(HoodieTableConfig.TABLE_METADATA_PARTITIONS.key(), String.join(",", partitions));
HoodieTableConfig.update(metaClient.getFs(), new Path(metaClient.getMetaPath()), metaClient.getTableConfig().getProps());
}
} | 3.68 |
flink_PipelineExecutorUtils_getJobGraph | /**
* Creates the {@link JobGraph} corresponding to the provided {@link Pipeline}.
*
* @param pipeline the pipeline whose job graph we are computing.
* @param configuration the configuration with the necessary information such as jars and
* classpaths to be included, the parallelism of the job and potential savepoint settings
* used to bootstrap its state.
* @param userClassloader the classloader which can load user classes.
* @return the corresponding {@link JobGraph}.
*/
public static JobGraph getJobGraph(
@Nonnull final Pipeline pipeline,
@Nonnull final Configuration configuration,
@Nonnull ClassLoader userClassloader)
throws MalformedURLException {
checkNotNull(pipeline);
checkNotNull(configuration);
final ExecutionConfigAccessor executionConfigAccessor =
ExecutionConfigAccessor.fromConfiguration(configuration);
final JobGraph jobGraph =
FlinkPipelineTranslationUtil.getJobGraph(
userClassloader,
pipeline,
configuration,
executionConfigAccessor.getParallelism());
configuration
.getOptional(PipelineOptionsInternal.PIPELINE_FIXED_JOB_ID)
.ifPresent(strJobID -> jobGraph.setJobID(JobID.fromHexString(strJobID)));
if (configuration.getBoolean(DeploymentOptions.ATTACHED)
&& configuration.getBoolean(DeploymentOptions.SHUTDOWN_IF_ATTACHED)) {
jobGraph.setInitialClientHeartbeatTimeout(
configuration.getLong(ClientOptions.CLIENT_HEARTBEAT_TIMEOUT));
}
jobGraph.addJars(executionConfigAccessor.getJars());
jobGraph.setClasspaths(executionConfigAccessor.getClasspaths());
jobGraph.setSavepointRestoreSettings(executionConfigAccessor.getSavepointRestoreSettings());
return jobGraph;
} | 3.68 |
druid_MySqlStatementParser_parseLeave | /**
* parse leave statement
*/
public MySqlLeaveStatement parseLeave() {
accept(Token.LEAVE);
MySqlLeaveStatement leaveStmt = new MySqlLeaveStatement();
leaveStmt.setLabelName(exprParser.name().getSimpleName());
accept(Token.SEMI);
return leaveStmt;
} | 3.68 |
graphhopper_VectorTile_hasExtent | /**
* <pre>
* Although this is an "optional" field it is required by the specification.
* See https://github.com/mapbox/vector-tile-spec/issues/47
* </pre>
*
* <code>optional uint32 extent = 5 [default = 4096];</code>
*/
public boolean hasExtent() {
return ((bitField0_ & 0x00000020) == 0x00000020);
} | 3.68 |
hadoop_Server_verifyDir | /**
* Verifies the specified directory exists.
*
* @param dir directory to verify it exists.
*
* @throws ServerException thrown if the directory does not exist or it the
* path it is not a directory.
*/
private void verifyDir(String dir) throws ServerException {
File file = new File(dir);
if (!file.exists()) {
throw new ServerException(ServerException.ERROR.S01, dir);
}
if (!file.isDirectory()) {
throw new ServerException(ServerException.ERROR.S02, dir);
}
} | 3.68 |
hadoop_RecordCreatorFactory_getRecordCreator | /**
* Returns the DNS record creator for the provided type.
*
* @param type the DNS record type.
* @return the record creator.
*/
static RecordCreator getRecordCreator(int type) {
switch (type) {
case A:
return new ARecordCreator();
case CNAME:
return new CNAMERecordCreator();
case TXT:
return new TXTRecordCreator();
case AAAA:
return new AAAARecordCreator();
case PTR:
return new PTRRecordCreator();
case SRV:
return new SRVRecordCreator();
default:
throw new IllegalArgumentException("No type " + type);
}
} | 3.68 |
morf_SqlServerDialect_getSqlForOrderByField | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForOrderByField(org.alfasoftware.morf.sql.element.FieldReference)
*/
@Override
protected String getSqlForOrderByField(FieldReference orderByField) {
StringBuilder result = new StringBuilder();
String sqlFromField = getSqlFrom(orderByField);
if (orderByField.getNullValueHandling().isPresent()) {
switch (orderByField.getNullValueHandling().get()) {
case FIRST:
result.append("(CASE WHEN ").append(sqlFromField).append(" IS NULL THEN 0 ELSE 1 END), ");
break;
case LAST:
result.append("(CASE WHEN ").append(sqlFromField).append(" IS NULL THEN 1 ELSE 0 END), ");
break;
case NONE:
default:
break;
}
}
result.append(sqlFromField);
switch (orderByField.getDirection()) {
case DESCENDING:
result.append(" DESC");
break;
case ASCENDING:
case NONE:
default:
break;
}
return result.toString().trim();
} | 3.68 |
hadoop_StagingCommitter_getBaseTaskAttemptPath | /**
* Return the local work path as the destination for writing work.
* @param context the context of the task attempt.
* @return a path in the local filesystem.
*/
@Override
public Path getBaseTaskAttemptPath(TaskAttemptContext context) {
// a path on the local FS for files that will be uploaded
return getWorkPath();
} | 3.68 |
framework_VCaption_setCaptionAsHtml | /**
* Sets whether the caption is rendered as HTML.
* <p>
* Default is false
*
* @param captionAsHtml
* true if the captions are rendered as HTML, false if rendered
* as plain text
*/
public void setCaptionAsHtml(boolean captionAsHtml) {
this.captionAsHtml = captionAsHtml;
} | 3.68 |
flink_ExecutionEnvironment_areExplicitEnvironmentsAllowed | /**
* Checks whether it is currently permitted to explicitly instantiate a LocalEnvironment or a
* RemoteEnvironment.
*
* @return True, if it is possible to explicitly instantiate a LocalEnvironment or a
* RemoteEnvironment, false otherwise.
*/
@Internal
public static boolean areExplicitEnvironmentsAllowed() {
return contextEnvironmentFactory == null
&& threadLocalContextEnvironmentFactory.get() == null;
} | 3.68 |
morf_ViewURLAsFile_getFile | /**
* Returns a view of the Resource located by <var>url</var> as a {@link File}.
*
* <p>If the URL points to
* within a ZIP or JAR file, the entry is expanded into a temporary directory
* and a file reference to that returned.</p>
*
* <p>If the URL point to a zip over http/https resource then it provides a downloaded version of it.</p>
* <p>If the URL point to a directory over http/https resource then it provides a downloaded version of it.</p>
*
* @param url Target to extract.
* @param urlUsername the username for the url.
* @param urlPassword the password for the url.
* @return Location on disk at which file operations can be performed on
* <var>url</var>
*/
File getFile(final URL url, final String urlUsername, final String urlPassword) {
if (url.getProtocol().equals("file")) {
log.info(url.toString() + " is a File System resource. Providing it directly as a File.");
try {
return new File(URLDecoder.decode(url.getPath(), "utf-8"));
} catch (UnsupportedEncodingException e) {
throw new IllegalStateException("UTF-8 is not supported encoding!", e);
}
} else if (url.getProtocol().equals("jar")) {
synchronized (expandedResources) {
File result = expandedResources.get(url.toString());
if (result != null) {
return result;
}
// Either need a temporary file, or a temporary directory
try {
result = File.createTempFile(this.getClass().getSimpleName() + "-"
+ StringUtils.abbreviate(url.toString(), 32, 96).replaceAll("\\W+", "_") + "-", ".database");
result.deleteOnExit();
expandedResources.put(url.toString(), result);
JarURLConnection jar = (JarURLConnection) url.openConnection();
if (jar.getJarEntry().isDirectory()) {
log.info(url.toString() + " is a directory inside a Jar. Inflating it to a temporary folder in order to provide a file view of it.");
if (!result.delete() || !result.mkdirs()) {
throw new IllegalStateException("Unable to transform [" + result + "] into a temporary directory");
}
try (JarInputStream input = new JarInputStream(jar.getJarFileURL().openStream())) {
String prefix = jar.getJarEntry().getName();
ZipEntry entry;
while ((entry = input.getNextEntry()) != null) { // NOPMD
if (entry.getName().startsWith(prefix) && !entry.isDirectory()) {
File target = new File(result, entry.getName().substring(prefix.length()));
if (!target.getParentFile().exists() && !target.getParentFile().mkdirs()) {
throw new RuntimeException("Could not make directories [" + target.getParentFile() + "]");
}
try (OutputStream output = new BufferedOutputStream(new FileOutputStream(target))) {
ByteStreams.copy(input, output);
}
}
}
}
} else {
log.info(url.toString() + " is a file inside a Jar. Extracting it to a temporary file to provide a view of it.");
try (InputStream input = url.openStream();
OutputStream output = new BufferedOutputStream(new FileOutputStream(result))) {
ByteStreams.copy(input, output);
}
}
} catch (IOException e) {
throw new RuntimeException("Unable to access [" + url + "] when targeting temporary file [" + result + "]", e);
}
return result;
}
} else if (url.getProtocol().equals("https") || url.getProtocol().equals("http")) {
if (url.getPath().endsWith(".zip")) {
log.info(url.toString() + " is a zip over http/https. Downloading it to a temporary file to provide a view of it.");
File dataSet = createTempFile("dataset", ".tmp");
downloadFileFromHttpUrl(url, urlUsername, urlPassword, dataSet);
tempFiles.add(dataSet);
log.info("Successfully downloaded zipped data set [" + url + "] to temp file [" + dataSet + "]");
return dataSet;
} else {
log.info(url.toString() + " is a directory over http/https. Downloading the remote directory to provide a view of the xml files contained in there.");
// -- This is an experimental attempt to traverse the file directory and source the relevant XML files. YMMV.
//
// Create a temp file which will hold the xml files in the data set
File directoryFile = createTempFile("urlDirectory", ".tmp");
// populate file from url
downloadFileFromHttpUrl(url, urlUsername, urlPassword, directoryFile);
// We will now have the directory file with all the links of the xml files so get a list of all the xml files
ArrayList<String> xmlFiles = new ArrayList<>();
CharSource source = Files.asCharSource(directoryFile, Charset.forName("UTF-8"));
try (BufferedReader bufRead = source.openBufferedStream()) {
String line = bufRead.readLine();
while (line != null)
{
if (line.contains("file name=")) {
int start = line.indexOf('"') + 1;
int end = line.indexOf('"', start);
String file = line.substring(start, end);
if (file.endsWith(".xml")) xmlFiles.add(file);
}
line = bufRead.readLine();
}
} catch (IOException e) {
throw new RuntimeException("Exception reading file [" + directoryFile+ "]", e);
}
// Download the xml files and place them in a folder
File dataSet = createTempFile("dataset", ".database");
if (!dataSet.delete() || !dataSet.mkdirs()) {
throw new IllegalStateException("Unable to transform [" + dataSet + "] into a temporary directory");
}
for (String xml : xmlFiles) {
File target = createTempFile(xml.substring(0,xml.indexOf('.')), ".xml", dataSet);
tempFiles.add(target);
if (!target.getParentFile().mkdirs()) {
throw new RuntimeException("Unable to create directories for: " + target.getParentFile());
}
try {
downloadFileFromHttpUrl(new URL(url.toString() + xml), urlUsername, urlPassword, target);
} catch (MalformedURLException e) {
throw new RuntimeException("Unable to create URL: " + url.toString() + xml, e);
}
}
if (!directoryFile.delete()) {
throw new RuntimeException("Unable to delete [" + directoryFile + "]");
}
// add data set directory last so that when we attempt to delete it in the close() method the files it holds will already have been deleted.
tempFiles.add(dataSet);
return dataSet;
}
} else {
throw new UnsupportedOperationException("Unsupported URL protocol on [" + url + "]");
}
} | 3.68 |
hbase_ByteBufferUtils_readVLong | /**
* Similar to {@link WritableUtils#readVLong(java.io.DataInput)} but reads from a
* {@link ByteBuff}.
*/
public static long readVLong(ByteBuff in) {
return readVLong(in::get);
} | 3.68 |
framework_ValidationResult_create | /**
* Creates the validation result with the given {@code errorMessage} and
* {@code errorLevel}. Results with {@link ErrorLevel} of {@code INFO} or
* {@code WARNING} are not errors by default.
*
* @see #ok()
* @see #error(String)
*
* @param errorMessage
* error message, not {@code null}
* @param errorLevel
* error level, not {@code null}
* @return validation result with the given {@code errorMessage} and
* {@code errorLevel}
* @throws NullPointerException
* if {@code errorMessage} or {@code errorLevel} is {@code null}
*
* @since 8.2
*/
public static ValidationResult create(String errorMessage,
ErrorLevel errorLevel) {
Objects.requireNonNull(errorMessage);
Objects.requireNonNull(errorLevel);
return new SimpleValidationResult(errorMessage, errorLevel);
} | 3.68 |
flink_Transformation_setCoLocationGroupKey | /**
* <b>NOTE:</b> This is an internal undocumented feature for now. It is not clear whether this
* will be supported and stable in the long term.
*
* <p>Sets the key that identifies the co-location group. Operators with the same co-location
* key will have their corresponding subtasks placed into the same slot by the scheduler.
*
* <p>Setting this to null means there is no co-location constraint.
*/
public void setCoLocationGroupKey(@Nullable String coLocationGroupKey) {
this.coLocationGroupKey = coLocationGroupKey;
} | 3.68 |
hadoop_SubApplicationEntityReader_updateFilterForConfsAndMetricsToRetrieve | /**
* Updates filter list based on fields for confs and metrics to retrieve.
*
* @param listBasedOnFields filter list based on fields.
* @throws IOException if any problem occurs while updating filter list.
*/
private void updateFilterForConfsAndMetricsToRetrieve(
FilterList listBasedOnFields, Set<String> cfsInFields)
throws IOException {
TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
// Please note that if confsToRetrieve is specified, we would have added
// CONFS to fields to retrieve in augmentParams() even if not specified.
if (dataToRetrieve.getFieldsToRetrieve().contains(Field.CONFIGS)) {
// Create a filter list for configs.
listBasedOnFields.addFilter(
TimelineFilterUtils.createFilterForConfsOrMetricsToRetrieve(
dataToRetrieve.getConfsToRetrieve(),
SubApplicationColumnFamily.CONFIGS,
SubApplicationColumnPrefix.CONFIG));
cfsInFields.add(
Bytes.toString(SubApplicationColumnFamily.CONFIGS.getBytes()));
}
// Please note that if metricsToRetrieve is specified, we would have added
// METRICS to fields to retrieve in augmentParams() even if not specified.
if (dataToRetrieve.getFieldsToRetrieve().contains(Field.METRICS)) {
// Create a filter list for metrics.
listBasedOnFields.addFilter(
TimelineFilterUtils.createFilterForConfsOrMetricsToRetrieve(
dataToRetrieve.getMetricsToRetrieve(),
SubApplicationColumnFamily.METRICS,
SubApplicationColumnPrefix.METRIC));
cfsInFields.add(
Bytes.toString(SubApplicationColumnFamily.METRICS.getBytes()));
}
} | 3.68 |
hbase_ByteBuffAllocator_getBuffer | /**
* @return One free DirectByteBuffer from the pool. If no free ByteBuffer and we have not reached
* the maximum pool size, it will create a new one and return. In case of max pool size
* also reached, will return null. When pool returned a ByteBuffer, make sure to return it
* back to pool after use.
*/
private ByteBuffer getBuffer() {
ByteBuffer bb = buffers.poll();
if (bb != null) {
// To reset the limit to capacity and position to 0, must clear here.
bb.clear();
poolAllocationBytes.add(bufSize);
return bb;
}
while (true) {
int c = this.usedBufCount.intValue();
if (c >= this.maxBufCount) {
if (!maxPoolSizeInfoLevelLogged) {
LOG.info("Pool already reached its max capacity : {} and no free buffers now. Consider "
+ "increasing the value for '{}' ?", maxBufCount, MAX_BUFFER_COUNT_KEY);
maxPoolSizeInfoLevelLogged = true;
}
return null;
}
if (!this.usedBufCount.compareAndSet(c, c + 1)) {
continue;
}
poolAllocationBytes.add(bufSize);
return ByteBuffer.allocateDirect(bufSize);
}
} | 3.68 |
hbase_FavoredStochasticBalancer_assignRegionToAvailableFavoredNode | /**
* Assign the region to primary if its available. If both secondary and tertiary are available,
* assign to the host which has less load. Else assign to secondary or tertiary whichever is
* available (in that order).
*/
private void assignRegionToAvailableFavoredNode(
Map<ServerName, List<RegionInfo>> assignmentMapForFavoredNodes, RegionInfo region,
ServerName primaryHost, ServerName secondaryHost, ServerName tertiaryHost) {
if (primaryHost != null) {
addRegionToMap(assignmentMapForFavoredNodes, region, primaryHost);
} else if (secondaryHost != null && tertiaryHost != null) {
// Assign the region to the one with a lower load (both have the desired hdfs blocks)
ServerName s;
ServerMetrics tertiaryLoad = provider.getLoad(tertiaryHost);
ServerMetrics secondaryLoad = provider.getLoad(secondaryHost);
if (secondaryLoad != null && tertiaryLoad != null) {
if (secondaryLoad.getRegionMetrics().size() < tertiaryLoad.getRegionMetrics().size()) {
s = secondaryHost;
} else {
s = tertiaryHost;
}
} else {
// We don't have one/more load, lets just choose a random node
s = ThreadLocalRandom.current().nextBoolean() ? secondaryHost : tertiaryHost;
}
addRegionToMap(assignmentMapForFavoredNodes, region, s);
} else if (secondaryHost != null) {
addRegionToMap(assignmentMapForFavoredNodes, region, secondaryHost);
} else if (tertiaryHost != null) {
addRegionToMap(assignmentMapForFavoredNodes, region, tertiaryHost);
} else {
// No favored nodes are online, lets assign to BOGUS server
addRegionToMap(assignmentMapForFavoredNodes, region, BOGUS_SERVER_NAME);
}
} | 3.68 |
hbase_RequestConverter_buildCreateNamespaceRequest | /**
* Creates a protocol buffer CreateNamespaceRequest
* @return a CreateNamespaceRequest
*/
public static CreateNamespaceRequest
buildCreateNamespaceRequest(final NamespaceDescriptor descriptor) {
CreateNamespaceRequest.Builder builder = CreateNamespaceRequest.newBuilder();
builder.setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor));
return builder.build();
} | 3.68 |
hbase_HFileBlock_encodedBlockSizeWritten | /**
* Returns the number of bytes written into the current block so far, or zero if not writing the
* block at the moment. Note that this will return zero in the "block ready" state as well.
* @return the number of bytes written
*/
public int encodedBlockSizeWritten() {
return state != State.WRITING ? 0 : this.getEncodingState().getEncodedDataSizeWritten();
} | 3.68 |
flink_ConnectionUtils_tryToConnect | /**
* @param fromAddress The address to connect from.
* @param toSocket The socket address to connect to.
* @param timeout The timeout fr the connection.
* @param logFailed Flag to indicate whether to log failed attempts on info level (failed
* attempts are always logged on DEBUG level).
* @return True, if the connection was successful, false otherwise.
* @throws IOException Thrown if the socket cleanup fails.
*/
private static boolean tryToConnect(
InetAddress fromAddress, SocketAddress toSocket, int timeout, boolean logFailed)
throws IOException {
String detailedMessage =
String.format(
"connect to [%s] from local address [%s] with timeout [%s]",
toSocket, fromAddress, timeout);
if (LOG.isDebugEnabled()) {
LOG.debug("Trying to " + detailedMessage);
}
try (Socket socket = new Socket()) {
// port 0 = let the OS choose the port
SocketAddress bindP = new InetSocketAddress(fromAddress, 0);
// machine
socket.bind(bindP);
socket.connect(toSocket, timeout);
return true;
} catch (Exception ex) {
String message = "Failed to " + detailedMessage + " due to: " + ex.getMessage();
if (LOG.isDebugEnabled()) {
LOG.debug(message, ex);
} else if (logFailed) {
LOG.info(message);
}
return false;
}
} | 3.68 |
dubbo_ReferenceConfig_createInvoker | /**
* \create a reference invoker
*/
@SuppressWarnings({"unchecked", "rawtypes"})
private void createInvoker() {
if (urls.size() == 1) {
URL curUrl = urls.get(0);
invoker = protocolSPI.refer(interfaceClass, curUrl);
// registry url, mesh-enable and unloadClusterRelated is true, not need Cluster.
if (!UrlUtils.isRegistry(curUrl) && !curUrl.getParameter(UNLOAD_CLUSTER_RELATED, false)) {
List<Invoker<?>> invokers = new ArrayList<>();
invokers.add(invoker);
invoker = Cluster.getCluster(getScopeModel(), Cluster.DEFAULT)
.join(new StaticDirectory(curUrl, invokers), true);
}
} else {
List<Invoker<?>> invokers = new ArrayList<>();
URL registryUrl = null;
for (URL url : urls) {
// For multi-registry scenarios, it is not checked whether each referInvoker is available.
// Because this invoker may become available later.
invokers.add(protocolSPI.refer(interfaceClass, url));
if (UrlUtils.isRegistry(url)) {
// use last registry url
registryUrl = url;
}
}
if (registryUrl != null) {
// registry url is available
// for multi-subscription scenario, use 'zone-aware' policy by default
String cluster = registryUrl.getParameter(CLUSTER_KEY, ZoneAwareCluster.NAME);
// The invoker wrap sequence would be: ZoneAwareClusterInvoker(StaticDirectory) ->
// FailoverClusterInvoker
// (RegistryDirectory, routing happens here) -> Invoker
invoker = Cluster.getCluster(registryUrl.getScopeModel(), cluster, false)
.join(new StaticDirectory(registryUrl, invokers), false);
} else {
// not a registry url, must be direct invoke.
if (CollectionUtils.isEmpty(invokers)) {
throw new IllegalArgumentException("invokers == null");
}
URL curUrl = invokers.get(0).getUrl();
String cluster = curUrl.getParameter(CLUSTER_KEY, Cluster.DEFAULT);
invoker =
Cluster.getCluster(getScopeModel(), cluster).join(new StaticDirectory(curUrl, invokers), true);
}
}
} | 3.68 |
hudi_MarkerDirState_markFileAsAvailable | /**
* Marks the file as available to use again.
*
* @param fileIndex file index
*/
public void markFileAsAvailable(int fileIndex) {
synchronized (markerCreationProcessingLock) {
threadUseStatus.set(fileIndex, false);
}
} | 3.68 |
framework_AbstractMultiSelectConnector_getIconUrl | /**
* Returns the optional icon URL for the given item.
* <p>
* Item icons are not supported by all multiselects.
*
* @param item
* the item
* @return the optional icon URL, or an empty optional if none specified
*/
static Optional<String> getIconUrl(JsonObject item) {
return Optional.ofNullable(
item.getString(ListingJsonConstants.JSONKEY_ITEM_ICON));
} | 3.68 |
hbase_ProcedureMember_createSubprocedure | /**
* This is separated from execution so that we can detect and handle the case where the
* subprocedure is invalid and inactionable due to bad info (like DISABLED snapshot type being
* sent here)
*/
public Subprocedure createSubprocedure(String opName, byte[] data) {
return builder.buildSubprocedure(opName, data);
} | 3.68 |
graphhopper_MinHeapWithUpdate_update | /**
* Updates the element with the given id. The complexity of this method is O(log(N)), just like push/poll.
* Its illegal to update elements that are not contained in the heap. Use {@link #contains} to check the existence
* of an id.
*/
public void update(int id, float value) {
checkIdInRange(id);
int index = positions[id];
if (index < 0)
throw new IllegalStateException("The heap does not contain: " + id + ". Use the contains method to check this before calling update");
float prev = vals[index];
vals[index] = value;
if (value > prev)
percolateDown(index);
else if (value < prev)
percolateUp(index);
} | 3.68 |
hadoop_TaskManifest_createSerializer | /**
* Get a JSON serializer for this class.
* @return a serializer.
*/
@Override
public JsonSerialization<TaskManifest> createSerializer() {
return serializer();
} | 3.68 |
hadoop_LocalityMulticastAMRMProxyPolicy_getSubClustersForId | /**
* Return all known subclusters associated with an allocation id.
*
* @param allocationId the allocation id considered
*
* @return the list of {@link SubClusterId}s associated with this allocation
* id
*/
private Set<SubClusterId> getSubClustersForId(long allocationId) {
if (countContainersPerRM.get(allocationId) == null) {
return null;
}
return countContainersPerRM.get(allocationId).keySet();
} | 3.68 |
shardingsphere-elasticjob_ExecutorServiceReloader_reloadIfNecessary | /**
* Reload if necessary.
*
* @param jobConfig job configuration
*/
public synchronized void reloadIfNecessary(final JobConfiguration jobConfig) {
if (jobExecutorThreadPoolSizeProviderType.equals(jobConfig.getJobExecutorThreadPoolSizeProviderType())) {
return;
}
executorService.shutdown();
init(jobConfig);
} | 3.68 |
flink_LocalProperties_filterBySemanticProperties | /**
* Filters these LocalProperties by the fields that are forwarded to the output as described by
* the SemanticProperties.
*
* @param props The semantic properties holding information about forwarded fields.
* @param input The index of the input.
* @return The filtered LocalProperties
*/
public LocalProperties filterBySemanticProperties(SemanticProperties props, int input) {
if (props == null) {
throw new NullPointerException("SemanticProperties may not be null.");
}
LocalProperties returnProps = new LocalProperties();
// check if sorting is preserved
if (this.ordering != null) {
Ordering newOrdering = new Ordering();
for (int i = 0; i < this.ordering.getInvolvedIndexes().size(); i++) {
int sourceField = this.ordering.getInvolvedIndexes().get(i);
FieldSet targetField = props.getForwardingTargetFields(input, sourceField);
if (targetField == null || targetField.size() == 0) {
if (i == 0) {
// order fully destroyed
newOrdering = null;
break;
} else {
// order partially preserved
break;
}
} else {
// use any field of target fields for now. We should use something like field
// equivalence sets in the future.
if (targetField.size() > 1) {
LOG.warn(
"Found that a field is forwarded to more than one target field in "
+ "semantic forwarded field information. Will only use the field with the lowest index.");
}
newOrdering.appendOrdering(
targetField.toArray()[0],
this.ordering.getType(i),
this.ordering.getOrder(i));
}
}
returnProps.ordering = newOrdering;
if (newOrdering != null) {
returnProps.groupedFields = newOrdering.getInvolvedIndexes();
} else {
returnProps.groupedFields = null;
}
}
// check if grouping is preserved
else if (this.groupedFields != null) {
FieldList newGroupedFields = new FieldList();
for (Integer sourceField : this.groupedFields) {
FieldSet targetField = props.getForwardingTargetFields(input, sourceField);
if (targetField == null || targetField.size() == 0) {
newGroupedFields = null;
break;
} else {
// use any field of target fields for now. We should use something like field
// equivalence sets in the future.
if (targetField.size() > 1) {
LOG.warn(
"Found that a field is forwarded to more than one target field in "
+ "semantic forwarded field information. Will only use the field with the lowest index.");
}
newGroupedFields = newGroupedFields.addField(targetField.toArray()[0]);
}
}
returnProps.groupedFields = newGroupedFields;
}
if (this.uniqueFields != null) {
Set<FieldSet> newUniqueFields = new HashSet<FieldSet>();
for (FieldSet fields : this.uniqueFields) {
FieldSet newFields = new FieldSet();
for (Integer sourceField : fields) {
FieldSet targetField = props.getForwardingTargetFields(input, sourceField);
if (targetField == null || targetField.size() == 0) {
newFields = null;
break;
} else {
// use any field of target fields for now. We should use something like
// field equivalence sets in the future.
if (targetField.size() > 1) {
LOG.warn(
"Found that a field is forwarded to more than one target field in "
+ "semantic forwarded field information. Will only use the field with the lowest index.");
}
newFields = newFields.addField(targetField.toArray()[0]);
}
}
if (newFields != null) {
newUniqueFields.add(newFields);
}
}
if (!newUniqueFields.isEmpty()) {
returnProps.uniqueFields = newUniqueFields;
} else {
returnProps.uniqueFields = null;
}
}
return returnProps;
} | 3.68 |
hbase_MultiRowRangeFilter_isStartRowInclusive | /** Returns if start row is inclusive. */
public boolean isStartRowInclusive() {
return startRowInclusive;
} | 3.68 |
flink_SortedGrouping_combineGroup | /**
* Applies a GroupCombineFunction on a grouped {@link DataSet}. A CombineFunction is similar to
* a GroupReduceFunction but does not perform a full data exchange. Instead, the CombineFunction
* calls the combine method once per partition for combining a group of results. This operator
* is suitable for combining values into an intermediate format before doing a proper
* groupReduce where the data is shuffled across the node for further reduction. The GroupReduce
* operator can also be supplied with a combiner by implementing the RichGroupReduce function.
* The combine method of the RichGroupReduce function demands input and output type to be the
* same. The CombineFunction, on the other side, can have an arbitrary output type.
*
* @param combiner The GroupCombineFunction that is applied on the DataSet.
* @return A GroupCombineOperator which represents the combined DataSet.
*/
public <R> GroupCombineOperator<T, R> combineGroup(GroupCombineFunction<T, R> combiner) {
if (combiner == null) {
throw new NullPointerException("GroupCombine function must not be null.");
}
TypeInformation<R> resultType =
TypeExtractor.getGroupCombineReturnTypes(
combiner,
this.getInputDataSet().getType(),
Utils.getCallLocationName(),
true);
return new GroupCombineOperator<>(
this, resultType, inputDataSet.clean(combiner), Utils.getCallLocationName());
} | 3.68 |
framework_CSSInjectWithColorpicker_createFontSelect | /**
* Creates a font family selection dialog
*/
private Component createFontSelect() {
final ComboBox<String> select = new ComboBox<>(null,
Arrays.asList("Arial", "Helvetica", "Verdana", "Courier",
"Times", "sans-serif"));
select.setValue("Arial");
select.setWidth("200px");
select.setPlaceholder("Font");
select.setDescription("Font");
select.setEmptySelectionAllowed(false);
select.addValueChangeListener(event -> {
// Get the new font family
String fontFamily = select.getValue();
// Get the stylesheet of the page
Styles styles = Page.getCurrent().getStyles();
// inject the new font size as a style. We need .v-app to
// override Vaadin's default styles here
styles.add(".v-app .v-textarea.text-label { font-family:"
+ fontFamily + "; }");
});
return select;
} | 3.68 |
framework_Upload_setButtonStyleName | /**
* In addition to the actual file chooser, upload components have button
* that starts actual upload progress. This method is used to set a style
* name to that button.
* <p>
* Note: Unlike {@code Button.setStyleName()} this method overrides all the
* styles from the button. If you wish to preserve the default styles, enter
* the style name as {@code "v-button yourStyleName"}.
*
* @param buttonStyleName
* style name for upload components button.
* @see #setButtonCaption(String) about when the button is shown / hidden.
* @since 8.2
*/
public void setButtonStyleName(String buttonStyleName) {
getState().buttonStyleName = buttonStyleName;
} | 3.68 |
flink_NFA_computeNextStates | /**
* Computes the next computation states based on the given computation state, the current event,
* its timestamp and the internal state machine. The algorithm is:
*
* <ol>
* <li>Decide on valid transitions and number of branching paths. See {@link OutgoingEdges}
* <li>Perform transitions:
* <ol>
* <li>IGNORE (links in {@link SharedBuffer} will still point to the previous event)
* <ul>
* <li>do not perform for Start State - special case
* <li>if stays in the same state increase the current stage for future use with
* number of outgoing edges
* <li>if after PROCEED increase current stage and add new stage (as we change the
* state)
* <li>lock the entry in {@link SharedBuffer} as it is needed in the created
* branch
* </ul>
* <li>TAKE (links in {@link SharedBuffer} will point to the current event)
* <ul>
* <li>add entry to the shared buffer with version of the current computation
* state
* <li>add stage and then increase with number of takes for the future computation
* states
* <li>peek to the next state if it has PROCEED path to a Final State, if true
* create Final ComputationState to emit results
* </ul>
* </ol>
* <li>Handle the Start State, as it always have to remain
* <li>Release the corresponding entries in {@link SharedBuffer}.
* </ol>
*
* @param sharedBufferAccessor The accessor to shared buffer that we need to change
* @param computationState Current computation state
* @param event Current event which is processed
* @param timerService timer service which provides access to time related features
* @return Collection of computation states which result from the current one
* @throws Exception Thrown if the system cannot access the state.
*/
private Collection<ComputationState> computeNextStates(
final SharedBufferAccessor<T> sharedBufferAccessor,
final ComputationState computationState,
final EventWrapper event,
final TimerService timerService)
throws Exception {
final ConditionContext context =
new ConditionContext(
sharedBufferAccessor, computationState, timerService, event.getTimestamp());
final OutgoingEdges<T> outgoingEdges =
createDecisionGraph(context, computationState, event.getEvent());
// Create the computing version based on the previously computed edges
// We need to defer the creation of computation states until we know how many edges start
// at this computation state so that we can assign proper version
final List<StateTransition<T>> edges = outgoingEdges.getEdges();
int takeBranchesToVisit = Math.max(0, outgoingEdges.getTotalTakeBranches() - 1);
int ignoreBranchesToVisit = outgoingEdges.getTotalIgnoreBranches();
int totalTakeToSkip = Math.max(0, outgoingEdges.getTotalTakeBranches() - 1);
final List<ComputationState> resultingComputationStates = new ArrayList<>();
for (StateTransition<T> edge : edges) {
switch (edge.getAction()) {
case IGNORE:
{
if (!isStartState(computationState)) {
final DeweyNumber version;
if (isEquivalentState(
edge.getTargetState(), getState(computationState))) {
// Stay in the same state (it can be either looping one or
// singleton)
final int toIncrease =
calculateIncreasingSelfState(
outgoingEdges.getTotalIgnoreBranches(),
outgoingEdges.getTotalTakeBranches());
version = computationState.getVersion().increase(toIncrease);
} else {
// IGNORE after PROCEED
version =
computationState
.getVersion()
.increase(totalTakeToSkip + ignoreBranchesToVisit)
.addStage();
ignoreBranchesToVisit--;
}
addComputationState(
sharedBufferAccessor,
resultingComputationStates,
edge.getTargetState(),
computationState.getPreviousBufferEntry(),
version,
computationState.getStartTimestamp(),
computationState.getPreviousTimestamp(),
computationState.getStartEventID());
}
}
break;
case TAKE:
final State<T> nextState = edge.getTargetState();
final State<T> currentState = edge.getSourceState();
final NodeId previousEntry = computationState.getPreviousBufferEntry();
final DeweyNumber currentVersion =
computationState.getVersion().increase(takeBranchesToVisit);
final DeweyNumber nextVersion = new DeweyNumber(currentVersion).addStage();
takeBranchesToVisit--;
final NodeId newEntry =
sharedBufferAccessor.put(
currentState.getName(),
event.getEventId(),
previousEntry,
currentVersion);
final long startTimestamp;
final EventId startEventId;
if (isStartState(computationState)) {
startTimestamp = event.getTimestamp();
startEventId = event.getEventId();
} else {
startTimestamp = computationState.getStartTimestamp();
startEventId = computationState.getStartEventID();
}
final long previousTimestamp = event.getTimestamp();
addComputationState(
sharedBufferAccessor,
resultingComputationStates,
nextState,
newEntry,
nextVersion,
startTimestamp,
previousTimestamp,
startEventId);
// check if newly created state is optional (have a PROCEED path to Final state)
final State<T> finalState =
findFinalStateAfterProceed(context, nextState, event.getEvent());
if (finalState != null) {
addComputationState(
sharedBufferAccessor,
resultingComputationStates,
finalState,
newEntry,
nextVersion,
startTimestamp,
previousTimestamp,
startEventId);
}
break;
}
}
if (isStartState(computationState)) {
int totalBranches =
calculateIncreasingSelfState(
outgoingEdges.getTotalIgnoreBranches(),
outgoingEdges.getTotalTakeBranches());
DeweyNumber startVersion = computationState.getVersion().increase(totalBranches);
ComputationState startState =
ComputationState.createStartState(
computationState.getCurrentStateName(), startVersion);
resultingComputationStates.add(startState);
}
if (computationState.getPreviousBufferEntry() != null) {
// release the shared entry referenced by the current computation state.
sharedBufferAccessor.releaseNode(
computationState.getPreviousBufferEntry(), computationState.getVersion());
}
return resultingComputationStates;
} | 3.68 |
hbase_NamespaceStateManager_initialize | /**
* Initialize namespace state cache by scanning meta table.
*/
private void initialize() throws IOException {
List<NamespaceDescriptor> namespaces = this.master.getClusterSchema().getNamespaces();
for (NamespaceDescriptor namespace : namespaces) {
addNamespace(namespace.getName());
List<TableName> tables = this.master.listTableNamesByNamespace(namespace.getName());
for (TableName table : tables) {
if (table.isSystemTable()) {
continue;
}
List<RegionInfo> regions =
MetaTableAccessor.getTableRegions(this.master.getConnection(), table, true);
addTable(table, regions.size());
}
}
LOG.info("Finished updating state of " + nsStateCache.size() + " namespaces. ");
initialized = true;
} | 3.68 |
hbase_AccessController_createACLTable | /**
* Create the ACL table
*/
private static void createACLTable(Admin admin) throws IOException {
/** Table descriptor for ACL table */
ColumnFamilyDescriptor cfd =
ColumnFamilyDescriptorBuilder.newBuilder(PermissionStorage.ACL_LIST_FAMILY).setMaxVersions(1)
.setInMemory(true).setBlockCacheEnabled(true).setBlocksize(8 * 1024)
.setBloomFilterType(BloomType.NONE).setScope(HConstants.REPLICATION_SCOPE_LOCAL).build();
TableDescriptor td = TableDescriptorBuilder.newBuilder(PermissionStorage.ACL_TABLE_NAME)
.setColumnFamily(cfd).build();
admin.createTable(td);
} | 3.68 |
framework_AbstractBeanContainer_addAll | /**
* Adds all the beans from a {@link Collection} in one operation using the
* bean item identifier resolver. More efficient than adding them one by
* one.
*
* A bean id resolver must be set before calling this method.
*
* Note: the behavior of this method changed in Vaadin 6.6 - now items are
* added at the very end of the unfiltered container and not after the last
* visible item if filtering is used.
*
* @param collection
* The collection of beans to add. Must not be null.
* @throws IllegalStateException
* if no bean identifier resolver has been set
* @throws IllegalArgumentException
* if the resolver returns a null itemId for one of the beans in
* the collection
*/
protected void addAll(Collection<? extends BEANTYPE> collection)
throws IllegalStateException, IllegalArgumentException {
boolean modified = false;
int origSize = size();
for (BEANTYPE bean : collection) {
// TODO skipping invalid beans - should not allow them in javadoc?
if (bean == null
|| !getBeanType().isAssignableFrom(bean.getClass())) {
continue;
}
IDTYPE itemId = resolveBeanId(bean);
if (itemId == null) {
throw new IllegalArgumentException(
"Resolved identifier for a bean must not be null");
}
if (internalAddItemAtEnd(itemId, createBeanItem(bean),
false) != null) {
modified = true;
}
}
if (modified) {
// Filter the contents when all items have been added
if (isFiltered()) {
doFilterContainer(!getFilters().isEmpty());
}
if (visibleNewItemsWasAdded(origSize)) {
// fire event about added items
int firstPosition = origSize;
IDTYPE firstItemId = getVisibleItemIds().get(firstPosition);
int affectedItems = size() - origSize;
fireItemsAdded(firstPosition, firstItemId, affectedItems);
}
}
} | 3.68 |
hbase_RegionServerFlushTableProcedureManager_submitTask | /**
* Submit a task to the pool. NOTE: all must be submitted before you can safely
* {@link #waitForOutstandingTasks()}.
*/
void submitTask(final Callable<Void> task) {
Future<Void> f = this.taskPool.submit(task);
futures.add(f);
} | 3.68 |
hadoop_SchedulerHealth_getAllocationCount | /**
* Get the count of allocation from the latest scheduler health report.
*
* @return allocation count
*/
public Long getAllocationCount() {
return getOperationCount(Operation.ALLOCATION);
} | 3.68 |
hbase_TimeRange_includesTimeRange | /**
* Check if the range has any overlap with TimeRange
* @param tr TimeRange
* @return True if there is overlap, false otherwise
*/
// This method came from TimeRangeTracker. We used to go there for this function but better
// to come here to the immutable, unsynchronized datastructure at read time.
public boolean includesTimeRange(final TimeRange tr) {
if (this.allTime) {
return true;
}
assert tr.getMin() >= 0;
return getMin() < tr.getMax() && getMax() >= tr.getMin();
} | 3.68 |
hadoop_HdfsDataOutputStream_hsync | /**
* Sync buffered data to DataNodes (flush to disk devices).
*
* @param syncFlags
* Indicate the detailed semantic and actions of the hsync.
* @throws IOException
* @see FSDataOutputStream#hsync()
*/
public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
OutputStream wrappedStream = getWrappedStream();
if (wrappedStream instanceof CryptoOutputStream) {
wrappedStream.flush();
wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
}
((DFSOutputStream) wrappedStream).hsync(syncFlags);
} | 3.68 |
hudi_BaseCommitActionExecutor_finalizeWrite | /**
* Finalize Write operation.
* @param instantTime Instant Time
* @param stats Hoodie Write Stat
*/
protected void finalizeWrite(String instantTime, List<HoodieWriteStat> stats, HoodieWriteMetadata<O> result) {
try {
Instant start = Instant.now();
table.finalizeWrite(context, instantTime, stats);
result.setFinalizeDuration(Duration.between(start, Instant.now()));
} catch (HoodieIOException ioe) {
throw new HoodieCommitException("Failed to complete commit " + instantTime + " due to finalize errors.", ioe);
}
} | 3.68 |
flink_TimestampedFileInputSplit_setSplitState | /**
* Sets the state of the split. This information is used when restoring from a checkpoint and
* allows to resume reading the underlying file from the point we left off.
*
* <p>* This is applicable to {@link org.apache.flink.api.common.io.FileInputFormat
* FileInputFormats} that implement the {@link
* org.apache.flink.api.common.io.CheckpointableInputFormat} interface.
*/
public void setSplitState(Serializable state) {
this.splitState = state;
} | 3.68 |
flink_DynamicPartitionPruningUtils_canConvertAndConvertDppFactSide | /**
* Judge whether the input RelNode can be converted to the dpp fact side. If the input RelNode
* can be converted, this method will return the converted fact side whose partitioned table
* source will be converted to {@link BatchPhysicalDynamicFilteringTableSourceScan}, If not,
* this method will return the origin RelNode.
*/
public static Tuple2<Boolean, RelNode> canConvertAndConvertDppFactSide(
RelNode rel,
ImmutableIntList joinKeys,
RelNode dimSide,
ImmutableIntList dimSideJoinKey) {
DppFactSideChecker dppFactSideChecker =
new DppFactSideChecker(rel, joinKeys, dimSide, dimSideJoinKey);
return dppFactSideChecker.canConvertAndConvertDppFactSide();
} | 3.68 |
hadoop_AzureNativeFileSystemStore_connectUsingConnectionStringCredentials | /**
* Connect to Azure storage using account key credentials.
*/
private void connectUsingConnectionStringCredentials(
final String accountName, final String containerName,
final String accountKey) throws InvalidKeyException, StorageException,
IOException, URISyntaxException {
// If the account name is "acc.blob.core.windows.net", then the
// rawAccountName is just "acc"
String rawAccountName = accountName.split("\\.")[0];
StorageCredentials credentials = new StorageCredentialsAccountAndKey(
rawAccountName, accountKey);
connectUsingCredentials(accountName, credentials, containerName);
} | 3.68 |
framework_Calendar_getFirstDateForWeek | /**
* Gets a date that is first day in the week that target given date belongs
* to.
*
* @param date
* Target date
* @return Date that is first date in same week that given date is.
*/
protected Date getFirstDateForWeek(Date date) {
int firstDayOfWeek = currentCalendar.getFirstDayOfWeek();
currentCalendar.setTime(date);
while (firstDayOfWeek != currentCalendar
.get(java.util.Calendar.DAY_OF_WEEK)) {
currentCalendar.add(java.util.Calendar.DATE, -1);
}
return currentCalendar.getTime();
} | 3.68 |
morf_MySqlDialect_fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming | /**
* MySQL doesn't permit a open connection to be used for anything else while using a streaming
* {@link ResultSet}, so if we know it will be, we disable streaming entirely. This has obvious
* memory implications for large data sets, so bulk loads should generally open new transactions
* inside the loop iterating the result set, which implicitly opens separate connections, allowing
* {@link Integer#MIN_VALUE} to be used instead.
*
* @see org.alfasoftware.morf.jdbc.SqlDialect#fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming()
*/
@Override
public int fetchSizeForBulkSelectsAllowingConnectionUseDuringStreaming() {
return Integer.MAX_VALUE;
} | 3.68 |
flink_RawValueData_fromObject | /** Creates an instance of {@link RawValueData} from a Java object. */
static <T> RawValueData<T> fromObject(T javaObject) {
return BinaryRawValueData.fromObject(javaObject);
} | 3.68 |
hbase_Scan_setNeedCursorResult | /**
* When the server is slow or we scan a table with many deleted data or we use a sparse filter,
* the server will response heartbeat to prevent timeout. However the scanner will return a Result
* only when client can do it. So if there are many heartbeats, the blocking time on
* ResultScanner#next() may be very long, which is not friendly to online services. Set this to
* true then you can get a special Result whose #isCursor() returns true and is not contains any
* real data. It only tells you where the server has scanned. You can call next to continue
* scanning or open a new scanner with this row key as start row whenever you want. Users can get
* a cursor when and only when there is a response from the server but we can not return a Result
* to users, for example, this response is a heartbeat or there are partial cells but users do not
* allow partial result. Now the cursor is in row level which means the special Result will only
* contains a row key. {@link Result#isCursor()} {@link Result#getCursor()} {@link Cursor}
*/
public Scan setNeedCursorResult(boolean needCursorResult) {
this.needCursorResult = needCursorResult;
return this;
} | 3.68 |
framework_VTwinColSelect_setInternalWidths | /** For internal use only. May be removed or replaced in the future. */
public void setInternalWidths() {
getElement().getStyle().setPosition(Position.RELATIVE);
int bordersAndPaddings = WidgetUtil
.measureHorizontalPaddingAndBorder(buttons.getElement(), 0);
int buttonWidth = WidgetUtil.getRequiredWidth(buttons);
int totalWidth = getOffsetWidth();
int spaceForSelect = (totalWidth - buttonWidth - bordersAndPaddings)
/ 2;
optionsListBox.setWidth(spaceForSelect + "px");
if (optionsCaption != null) {
optionsCaption.setWidth(spaceForSelect + "px");
}
selectionsListBox.setWidth(spaceForSelect + "px");
if (selectionsCaption != null) {
selectionsCaption.setWidth(spaceForSelect + "px");
}
captionWrapper.setWidth("100%");
} | 3.68 |
graphhopper_KVStorage_getMap | /**
* Please note that this method ignores potentially different tags for forward and backward direction. To avoid this
* use {@link #getAll(long)} instead.
*/
public Map<String, Object> getMap(final long entryPointer) {
if (entryPointer < 0)
throw new IllegalStateException("Pointer to access KVStorage cannot be negative:" + entryPointer);
if (entryPointer == EMPTY_POINTER) return Collections.emptyMap();
int keyCount = vals.getByte(entryPointer) & 0xFF;
if (keyCount == 0) return Collections.emptyMap();
HashMap<String, Object> map = new HashMap<>(keyCount);
long tmpPointer = entryPointer + 1;
AtomicInteger sizeOfObject = new AtomicInteger();
for (int i = 0; i < keyCount; i++) {
int currentKeyIndexRaw = vals.getShort(tmpPointer);
int currentKeyIndex = currentKeyIndexRaw >>> 2;
tmpPointer += 2;
Object object = deserializeObj(sizeOfObject, tmpPointer, indexToClass.get(currentKeyIndex));
tmpPointer += sizeOfObject.get();
String key = indexToKey.get(currentKeyIndex);
map.put(key, object);
}
return map;
} | 3.68 |
framework_DDEventHandleStrategy_handleDragOver | /**
* Handles drag over on element.
*
* @param mediator
* VDragAndDropManager data accessor
* @param target
* target element over which DnD event has happened
*/
protected void handleDragOver(Element target, DDManagerMediator mediator) {
mediator.getDragEvent().setElementOver(target);
mediator.getManager().getCurrentDropHandler()
.dragOver(mediator.getDragEvent());
} | 3.68 |
framework_UIConnector_getActiveTheme | /**
* Returns the name of the theme currently in used by the UI.
*
* @since 7.3
* @return the theme name used by this UI
*/
public String getActiveTheme() {
return activeTheme;
} | 3.68 |
MagicPlugin_BaseSpell_onCast | /**
* Called when this spell is cast.
*
* <p>This is where you do your work!
*
* <p>If parameters were passed to this spell, either via a variant or the command line,
* they will be passed in here.
*
* @param parameters Any parameters that were passed to this spell
* @return The SpellResult of this cast.
*/
public SpellResult onCast(ConfigurationSection parameters) {
throw new UnsupportedOperationException("The onCast method has not been implemented");
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.