name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
querydsl_QueryBase_offset | /**
* Defines the offset for the query results
*
* @param offset row offset
* @return the current object
*/
public Q offset(long offset) {
return queryMixin.offset(offset);
} | 3.68 |
flink_SinkTestSuiteBase_checkResultWithSemantic | /**
* Compare the test data with actual data in given semantic.
*
* @param reader the data reader for the sink
* @param testData the test data
* @param semantic the supported semantic, see {@link CheckpointingMode}
*/
protected void checkResultWithSemantic(
ExternalSystemDataReader<T> reader, List<T> testData, CheckpointingMode semantic)
throws Exception {
final ArrayList<T> result = new ArrayList<>();
waitUntilCondition(
() -> {
pollAndAppendResultData(result, reader, testData, 30, semantic);
try {
CollectIteratorAssertions.assertThat(sort(result).iterator())
.matchesRecordsFromSource(Arrays.asList(sort(testData)), semantic);
return true;
} catch (Throwable t) {
return false;
}
});
} | 3.68 |
framework_AbstractOrderedLayout_iterator | /**
* Gets the component container iterator for going trough all the components
* in the container.
*
* @return the Iterator of the components inside the container.
*/
@Override
public Iterator<Component> iterator() {
return Collections.unmodifiableCollection(components).iterator();
} | 3.68 |
hmily_ConsulClient_put | /**
* put config content.
*
* @param key config key
* @param content config content
*/
public void put(final String key, final String content) {
consul.keyValueClient().putValue(key, content);
} | 3.68 |
framework_SessionInitEvent_getRequest | /**
* Gets the request that triggered the initialization.
*
* @return the request
*/
public VaadinRequest getRequest() {
return request;
} | 3.68 |
hadoop_ReduceTaskAttemptInfo_getReduceRuntime | /**
* Get the runtime for the <b>reduce</b> phase of the reduce task-attempt.
*
* @return the runtime for the <b>reduce</b> phase of the reduce task-attempt
*/
public long getReduceRuntime() {
return reduceTime;
} | 3.68 |
framework_LayoutManager_getMarginRight | /**
* Gets the right margin of the given element, provided that it has been
* measured. These elements are guaranteed to be measured:
* <ul>
* <li>ManagedLayouts and their child Connectors
* <li>Elements for which there is at least one ElementResizeListener
* <li>Elements for which at least one ManagedLayout has registered a
* dependency
* </ul>
*
* A negative number is returned if the element has not been measured. If 0
* is returned, it might indicate that the element is not attached to the
* DOM.
*
* @param element
* the element to get the measured size for
* @return the measured right margin of the element in pixels.
*/
public int getMarginRight(Element element) {
assert needsMeasure(
element) : "Getting measurement for element that is not measured";
return getMeasuredSize(element, nullSize).getMarginRight();
} | 3.68 |
framework_VaadinFinderLocatorStrategy_generateQueries | /**
* Function to generate all possible search paths for given component list.
* Function strips out all the com.vaadin.ui. prefixes from elements as this
* functionality makes generating a query later on easier.
*
* @param components
* List of components
* @return List of Vaadin selectors
*/
private List<String> generateQueries(List<String> components) {
// Prepare to loop through all the elements.
List<String> paths = new ArrayList<>();
int compIdx = 0;
String basePath = components.get(compIdx).replace("com.vaadin.ui.", "");
// Add a basic search for the first element (e.g. //Button)
paths.add((components.size() == 1 ? "/" : "//") + basePath);
while (++compIdx < components.size()) {
// Loop through the remaining components
for (int i = components.size() - 1; i >= compIdx; --i) {
boolean recursive = false;
if (i > compIdx) {
recursive = true;
}
paths.add((i == components.size() - 1 ? "/" : "//")
+ components.get(i).replace("com.vaadin.ui.", "")
+ (recursive ? "//" : "/") + basePath);
}
// Add the element at index compIdx to the basePath so it is
// included in all the following searches.
basePath = components.get(compIdx).replace("com.vaadin.ui.", "")
+ "/" + basePath;
}
return paths;
} | 3.68 |
dubbo_ThrowableConsumer_execute | /**
* Executes {@link ThrowableConsumer}
*
* @param t the function argument
* @param consumer {@link ThrowableConsumer}
* @param <T> the source type
*/
static <T> void execute(T t, ThrowableConsumer<T> consumer) {
consumer.execute(t);
} | 3.68 |
framework_SharedUtil_addGetParameters | /**
* Adds the get parameters to the uri and returns the new uri that contains
* the parameters.
*
* @param uri
* The uri to which the parameters should be added.
* @param extraParams
* One or more parameters in the format "a=b" or "c=d&e=f".
* An empty string is allowed but will not modify the url.
* @return The modified URI with the get parameters in extraParams added.
*/
public static String addGetParameters(String uri, String extraParams) {
if (extraParams == null || extraParams.isEmpty()) {
return uri;
}
// RFC 3986: The query component is indicated by the first question
// mark ("?") character and terminated by a number sign ("#") character
// or by the end of the URI.
String fragment = null;
int hashPosition = uri.indexOf('#');
if (hashPosition != -1) {
// Fragment including "#"
fragment = uri.substring(hashPosition);
// The full uri before the fragment
uri = uri.substring(0, hashPosition);
}
if (uri.contains("?")) {
uri += "&";
} else {
uri += "?";
}
uri += extraParams;
if (fragment != null) {
uri += fragment;
}
return uri;
} | 3.68 |
flink_DataType_performEarlyClassValidation | /**
* This method should catch the most common errors. However, another validation is required in
* deeper layers as we don't know whether the data type is used for input or output declaration.
*/
private static <C> Class<C> performEarlyClassValidation(
LogicalType logicalType, Class<C> candidate) {
if (candidate != null
&& !logicalType.supportsInputConversion(candidate)
&& !logicalType.supportsOutputConversion(candidate)) {
throw new ValidationException(
String.format(
"Logical type '%s' does not support a conversion from or to class '%s'.",
logicalType.asSummaryString(), candidate.getName()));
}
return candidate;
} | 3.68 |
hbase_WALCoprocessorHost_preWALRoll | /**
* Called before rolling the current WAL
* @param oldPath the path of the current wal that we are replacing
* @param newPath the path of the wal we are going to create
*/
public void preWALRoll(Path oldPath, Path newPath) throws IOException {
execOperation(coprocEnvironments.isEmpty() ? null : new WALObserverOperation() {
@Override
protected void call(WALObserver observer) throws IOException {
observer.preWALRoll(this, oldPath, newPath);
}
});
} | 3.68 |
flink_StateDescriptor_enableTimeToLive | /**
* Configures optional activation of state time-to-live (TTL).
*
* <p>State user value will expire, become unavailable and be cleaned up in storage depending on
* configured {@link StateTtlConfig}.
*
* <p>If enabling the TTL configuration, the field {@link StateDescriptor#defaultValue} will be
* invalid.
*
* @param ttlConfig configuration of state TTL
*/
public void enableTimeToLive(StateTtlConfig ttlConfig) {
Preconditions.checkNotNull(ttlConfig);
if (ttlConfig.isEnabled()) {
Preconditions.checkArgument(
queryableStateName == null,
"Queryable state is currently not supported with TTL");
}
this.ttlConfig = ttlConfig;
} | 3.68 |
AreaShop_WorldEditSelection_getHeight | /**
* Get Y-size.
*
* @return height
*/
public int getHeight() {
return maximum.getBlockY() - minimum.getBlockY() + 1;
} | 3.68 |
hbase_NamespacesResource_getNamespaceInstanceResource | /**
* Dispatch to NamespaceInstanceResource
*/
@Path("{namespace}")
public NamespacesInstanceResource getNamespaceInstanceResource(
final @PathParam("namespace") String namespace) throws IOException {
return new NamespacesInstanceResource(namespace);
} | 3.68 |
flink_DataStream_timeWindowAll | /**
* Windows this {@code DataStream} into sliding time windows.
*
* <p>This is a shortcut for either {@code .window(SlidingEventTimeWindows.of(size, slide))} or
* {@code .window(SlidingProcessingTimeWindows.of(size, slide))} depending on the time
* characteristic set using {@link
* org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#setStreamTimeCharacteristic(org.apache.flink.streaming.api.TimeCharacteristic)}
*
* <p>Note: This operation is inherently non-parallel since all elements have to pass through
* the same operator instance.
*
* @param size The size of the window.
* @deprecated Please use {@link #windowAll(WindowAssigner)} with either {@link
* SlidingEventTimeWindows} or {@link SlidingProcessingTimeWindows}. For more information,
* see the deprecation notice on {@link TimeCharacteristic}
*/
@Deprecated
public AllWindowedStream<T, TimeWindow> timeWindowAll(Time size, Time slide) {
if (environment.getStreamTimeCharacteristic() == TimeCharacteristic.ProcessingTime) {
return windowAll(SlidingProcessingTimeWindows.of(size, slide));
} else {
return windowAll(SlidingEventTimeWindows.of(size, slide));
}
} | 3.68 |
hbase_SecurityInfo_getInfo | /**
* Returns the security configuration associated with the given service name.
*/
public static SecurityInfo getInfo(String serviceName) {
return infos.get(serviceName);
} | 3.68 |
flink_ExtractionUtils_isStructuredFieldMutable | /**
* Checks if a field is mutable or immutable. Returns {@code true} if the field is properly
* mutable. Returns {@code false} if it is properly immutable.
*/
static boolean isStructuredFieldMutable(Class<?> clazz, Field field) {
final int m = field.getModifiers();
// field is immutable
if (Modifier.isFinal(m)) {
return false;
}
// field is directly mutable
if (Modifier.isPublic(m)) {
return true;
}
// field has setters by which it is mutable
if (getStructuredFieldSetter(clazz, field).isPresent()) {
return true;
}
throw extractionError(
"Field '%s' of class '%s' is mutable but is neither publicly accessible nor does it have "
+ "a corresponding setter method.",
field.getName(), clazz.getName());
} | 3.68 |
dubbo_Bytes_bytes2long | /**
* to long.
*
* @param b byte array.
* @param off offset.
* @return long.
*/
public static long bytes2long(byte[] b, int off) {
return ((b[off + 7] & 0xFFL) << 0)
+ ((b[off + 6] & 0xFFL) << 8)
+ ((b[off + 5] & 0xFFL) << 16)
+ ((b[off + 4] & 0xFFL) << 24)
+ ((b[off + 3] & 0xFFL) << 32)
+ ((b[off + 2] & 0xFFL) << 40)
+ ((b[off + 1] & 0xFFL) << 48)
+ (((long) b[off + 0]) << 56);
} | 3.68 |
hadoop_MappingRuleResult_toString | /**
* Returns the string representation of the object.
* @return the string representation of the object
*/
@Override
public String toString() {
if (result == MappingRuleResultType.PLACE) {
return result.name() + ": '" + normalizedQueue + "' ('" + queue + "')";
} else {
return result.name();
}
} | 3.68 |
framework_VColorPickerArea_setHTML | /**
* Sets the caption's content to the given HTML.
*
* @param html
*/
@Override
public void setHTML(String html) {
caption.setHTML(html);
} | 3.68 |
hudi_PostgresDebeziumAvroPayload_containsStringToastedValues | /**
* Returns true if a column is either of type string or a union of one or more strings that contain a debezium toasted value.
*
* @param incomingRecord The incoming avro record
* @param field the column of interest
* @return
*/
private boolean containsStringToastedValues(IndexedRecord incomingRecord, Schema.Field field) {
return ((field.schema().getType() == Schema.Type.STRING
|| (field.schema().getType() == Schema.Type.UNION && field.schema().getTypes().stream().anyMatch(s -> s.getType() == Schema.Type.STRING)))
// Check length first as an optimization
&& ((CharSequence) ((GenericData.Record) incomingRecord).get(field.name())).length() == DEBEZIUM_TOASTED_VALUE.length()
&& DEBEZIUM_TOASTED_VALUE.equals(((CharSequence) ((GenericData.Record) incomingRecord).get(field.name())).toString()));
} | 3.68 |
hbase_QuotaSettings_validateQuotaTarget | /**
* Validates that settings being merged into {@code this} is targeting the same "subject", e.g.
* user, table, namespace.
* @param mergee The quota settings to be merged into {@code this}.
* @throws IllegalArgumentException if the subjects are not equal.
*/
void validateQuotaTarget(QuotaSettings mergee) {
if (!Objects.equals(getUserName(), mergee.getUserName())) {
throw new IllegalArgumentException("Mismatched user names on settings to merge");
}
if (!Objects.equals(getTableName(), mergee.getTableName())) {
throw new IllegalArgumentException("Mismatched table names on settings to merge");
}
if (!Objects.equals(getNamespace(), mergee.getNamespace())) {
throw new IllegalArgumentException("Mismatched namespace on settings to merge");
}
if (!Objects.equals(getRegionServer(), mergee.getRegionServer())) {
throw new IllegalArgumentException("Mismatched region server on settings to merge");
}
} | 3.68 |
morf_SchemaAdapter_isEmptyDatabase | /**
* @see org.alfasoftware.morf.metadata.Schema#isEmptyDatabase()
*/
@Override
public boolean isEmptyDatabase() {
return delegate.isEmptyDatabase();
} | 3.68 |
flink_StreamGraphGenerator_setSlotSharingGroupResource | /**
* Specify fine-grained resource requirements for slot sharing groups.
*
* <p>Note that a slot sharing group hints the scheduler that the grouped operators CAN be
* deployed into a shared slot. There's no guarantee that the scheduler always deploy the
* grouped operators together. In cases grouped operators are deployed into separate slots, the
* slot resources will be derived from the specified group requirements.
*/
public StreamGraphGenerator setSlotSharingGroupResource(
Map<String, ResourceProfile> slotSharingGroupResources) {
slotSharingGroupResources.forEach(
(name, profile) -> {
if (!profile.equals(ResourceProfile.UNKNOWN)) {
this.slotSharingGroupResources.put(name, profile);
}
});
return this;
} | 3.68 |
pulsar_NonPersistentTopic_checkBacklogQuotaExceeded | /**
*
* @return quota exceeded status for blocking producer creation
*/
@Override
public CompletableFuture<Void> checkBacklogQuotaExceeded(String producerName, BacklogQuotaType backlogQuotaType) {
// No-op
return CompletableFuture.completedFuture(null);
} | 3.68 |
framework_Calendar_setFirstVisibleDayOfWeek | /**
* <p>
* This method restricts the weekdays that are shown. This affects both the
* monthly and the weekly view. The general contract is that <b>firstDay <
* lastDay</b>.
* </p>
*
* <p>
* Note that this only affects the rendering process. Events are still
* requested by the dates set by {@link #setStartDate(Date)} and
* {@link #setEndDate(Date)}.
* </p>
*
* @param firstDay
* the first day of the week to show, between 1 and 7
*/
public void setFirstVisibleDayOfWeek(int firstDay) {
if (this.firstDay != firstDay && firstDay >= 1 && firstDay <= 7
&& getLastVisibleDayOfWeek() >= firstDay) {
this.firstDay = firstDay;
getState().firstVisibleDayOfWeek = firstDay;
}
} | 3.68 |
hadoop_AbstractStoreOperation_activateAuditSpan | /**
* Activate the audit span.
*/
public void activateAuditSpan() {
if (auditSpan != null) {
auditSpan.activate();
}
} | 3.68 |
hmily_ExtensionLoader_setValue | /**
* Sets value.
*
* @param value the value
*/
public void setValue(final T value) {
this.value = value;
} | 3.68 |
hbase_Constraints_enableConstraint | /**
* Enable the given {@link Constraint}. Retains all the information (e.g. Configuration) for the
* {@link Constraint}, but makes sure that it gets loaded on the table.
* @param builder {@link TableDescriptorBuilder} to modify
* @param clazz {@link Constraint} to enable
* @throws IOException If the constraint cannot be properly deserialized
*/
public static void enableConstraint(TableDescriptorBuilder builder,
Class<? extends Constraint> clazz) throws IOException {
changeConstraintEnabled(builder, clazz, true);
} | 3.68 |
hbase_ReplicationLoad_sinkToString | /**
* sinkToString
* @return a string contains sinkReplicationLoad information
*/
public String sinkToString() {
if (this.replicationLoadSink == null) return null;
StringBuilder sb = new StringBuilder();
sb = Strings.appendKeyValue(sb, "AgeOfLastAppliedOp",
this.replicationLoadSink.getAgeOfLastAppliedOp());
sb = Strings.appendKeyValue(sb, "TimestampsOfLastAppliedOp",
(new Date(this.replicationLoadSink.getTimeStampsOfLastAppliedOp()).toString()));
return sb.toString();
} | 3.68 |
flink_ExceptionUtils_findSerializedThrowable | /**
* Checks whether a throwable chain contains a specific type of exception and returns it. It
* deserializes any {@link SerializedThrowable} that are found using the provided {@link
* ClassLoader}.
*
* @param throwable the throwable chain to check.
* @param searchType the type of exception to search for in the chain.
* @param classLoader to use for deserialization.
* @return Optional throwable of the requested type if available, otherwise empty
*/
public static <T extends Throwable> Optional<T> findSerializedThrowable(
Throwable throwable, Class<T> searchType, ClassLoader classLoader) {
if (throwable == null || searchType == null) {
return Optional.empty();
}
Throwable t = throwable;
while (t != null) {
if (searchType.isAssignableFrom(t.getClass())) {
return Optional.of(searchType.cast(t));
} else if (t.getClass().isAssignableFrom(SerializedThrowable.class)) {
Throwable next = ((SerializedThrowable) t).deserializeError(classLoader);
// SerializedThrowable#deserializeError returns itself under some conditions (e.g.,
// null cause).
// If that happens, exit to avoid looping infinitely. This is ok because if the user
// was searching
// for a SerializedThrowable, we would have returned it in the initial if condition.
t = (next == t) ? null : next;
} else {
t = t.getCause();
}
}
return Optional.empty();
} | 3.68 |
framework_DataChangeEvent_getItem | /**
* Gets the refreshed item.
*
* @return the refreshed item
*/
public T getItem() {
return item;
} | 3.68 |
hbase_TableDescriptorBuilder_setMemStoreFlushSize | /**
* Represents the maximum size of the memstore after which the contents of the memstore are
* flushed to the filesystem. This defaults to a size of 64 MB.
* @param memstoreFlushSize memory cache flush size for each hregion
* @return the modifyable TD
*/
public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) {
return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize));
} | 3.68 |
pulsar_Topic_getOriginalProducerName | /**
* Return the producer name for the original producer.
*
* For messages published locally, this will return the same local producer name, though in case of replicated
* messages, the original producer name will differ
*/
default String getOriginalProducerName() {
return null;
} | 3.68 |
framework_VAccordion_setId | /**
* Sets the identifier for this stack item.
*
* @param newId
* the identifier to set
*/
public void setId(String newId) {
if (!SharedUtil.equals(newId, id)) {
if (id != null) {
getElement().removeAttribute("id");
}
id = newId;
if (id != null && !id.isEmpty()) {
getElement().setId(id);
}
}
} | 3.68 |
flink_RocksDBStateBackend_setNumberOfTransferingThreads | /** @deprecated Typo in method name. Use {@link #setNumberOfTransferThreads(int)} instead. */
@Deprecated
public void setNumberOfTransferingThreads(int numberOfTransferingThreads) {
setNumberOfTransferThreads(numberOfTransferingThreads);
} | 3.68 |
hbase_MetricsSource_getOpsShipped | /**
* Gets the number of OPs shipped by this source queue to target cluster.
* @return oPsShipped total number of OPs shipped by this source.
*/
public long getOpsShipped() {
return this.singleSourceSource.getShippedOps();
} | 3.68 |
flink_FlinkRelBuilder_pushFunctionScan | /**
* {@link RelBuilder#functionScan(SqlOperator, int, Iterable)} cannot work smoothly with aliases
* which is why we implement a custom one. The method is static because some {@link RelOptRule}s
* don't use {@link FlinkRelBuilder}.
*/
public static RelBuilder pushFunctionScan(
RelBuilder relBuilder,
SqlOperator operator,
int inputCount,
Iterable<RexNode> operands,
List<String> aliases) {
Preconditions.checkArgument(
operator instanceof BridgingSqlFunction.WithTableFunction,
"Table function expected.");
final RexBuilder rexBuilder = relBuilder.getRexBuilder();
final RelDataTypeFactory typeFactory = relBuilder.getTypeFactory();
final List<RelNode> inputs = new LinkedList<>();
for (int i = 0; i < inputCount; i++) {
inputs.add(0, relBuilder.build());
}
final List<RexNode> operandList = CollectionUtil.iterableToList(operands);
final RelDataType functionRelDataType = rexBuilder.deriveReturnType(operator, operandList);
final List<RelDataType> fieldRelDataTypes;
if (functionRelDataType.isStruct()) {
fieldRelDataTypes =
functionRelDataType.getFieldList().stream()
.map(RelDataTypeField::getType)
.collect(Collectors.toList());
} else {
fieldRelDataTypes = Collections.singletonList(functionRelDataType);
}
final RelDataType rowRelDataType = typeFactory.createStructType(fieldRelDataTypes, aliases);
final RexNode call = rexBuilder.makeCall(rowRelDataType, operator, operandList);
final RelNode functionScan =
LogicalTableFunctionScan.create(
relBuilder.getCluster(),
inputs,
call,
null,
rowRelDataType,
Collections.emptySet());
return relBuilder.push(functionScan);
} | 3.68 |
hibernate-validator_ValueExtractorResolver_getMaximallySpecificAndContainerElementCompliantValueExtractor | /**
* Used to find the maximally specific and container element compliant value extractor based on the declared type
* and the type parameter.
* <p>
* Used for container element constraints.
*
* @throws ConstraintDeclarationException if more than 2 maximally specific container-element-compliant value extractors are found
*/
public ValueExtractorDescriptor getMaximallySpecificAndContainerElementCompliantValueExtractor(Class<?> declaredType, TypeVariable<?> typeParameter) {
return getUniqueValueExtractorOrThrowException(
declaredType,
getRuntimeAndContainerElementCompliantValueExtractorsFromPossibleCandidates( declaredType, typeParameter, declaredType, registeredValueExtractors )
);
} | 3.68 |
graphhopper_VectorTile_setKeys | /**
* <pre>
* Dictionary encoding for keys
* </pre>
*
* <code>repeated string keys = 3;</code>
*/
public Builder setKeys(
int index, java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
ensureKeysIsMutable();
keys_.set(index, value);
onChanged();
return this;
} | 3.68 |
hadoop_StringValueMax_getVal | /**
* @return the aggregated value
*/
public String getVal() {
return this.maxVal;
} | 3.68 |
hbase_WALFactory_shutdown | /**
* Tell the underlying WAL providers to shut down, but do not clean up underlying storage. If you
* are not ending cleanly and will need to replay edits from this factory's wals, use this method
* if you can as it will try to leave things as tidy as possible.
*/
public void shutdown() throws IOException {
List<IOException> ioes = new ArrayList<>();
// these fields could be null if the WALFactory is created only for being used in the
// getInstance method.
if (metaProvider != null) {
try {
metaProvider.shutdown();
} catch (IOException e) {
ioes.add(e);
}
}
if (replicationProvider != null) {
try {
replicationProvider.shutdown();
} catch (IOException e) {
ioes.add(e);
}
}
if (provider != null) {
try {
provider.shutdown();
} catch (IOException e) {
ioes.add(e);
}
}
if (!ioes.isEmpty()) {
IOException ioe = new IOException("Failed to shutdown WALFactory");
for (IOException e : ioes) {
ioe.addSuppressed(e);
}
throw ioe;
}
} | 3.68 |
framework_InfoSection_getTabButton | /*
* (non-Javadoc)
*
* @see com.vaadin.client.debug.internal.Section#getTabButton()
*/
@Override
public DebugButton getTabButton() {
return tabButton;
} | 3.68 |
hmily_CuratorZookeeperClient_persist | /**
* Persist.
*
* @param key the key
* @param value the value
*/
public void persist(final String key, final String value) {
try {
if (!isExisted(key)) {
client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).forPath(key, value.getBytes(Charsets.UTF_8));
} else {
update(key, value);
}
// CHECKSTYLE:OFF
} catch (final Exception ex) {
// CHECKSTYLE:ON
CuratorZookeeperExceptionHandler.handleException(ex);
}
} | 3.68 |
flink_CompactingHashTable_getEntryIterator | /**
* @return Iterator over hash table
* @see EntryIterator
*/
public MutableObjectIterator<T> getEntryIterator() {
return new EntryIterator(this);
} | 3.68 |
hbase_WALProcedureStore_main | /**
* Parses a directory of WALs building up ProcedureState. For testing parse and profiling.
* @param args Include pointer to directory of WAL files for a store instance to parse & load.
*/
public static void main(String[] args) throws IOException {
Configuration conf = HBaseConfiguration.create();
if (args == null || args.length != 1) {
System.out.println("ERROR: Empty arguments list; pass path to MASTERPROCWALS_DIR.");
System.out.println("Usage: WALProcedureStore MASTERPROCWALS_DIR");
System.exit(-1);
}
WALProcedureStore store =
new WALProcedureStore(conf, new Path(args[0]), null, new LeaseRecovery() {
@Override
public void recoverFileLease(FileSystem fs, Path path) throws IOException {
// no-op
}
});
try {
store.start(16);
ProcedureExecutor<?> pe =
new ProcedureExecutor<>(conf, new Object()/* Pass anything */, store);
pe.init(1, true);
} finally {
store.stop(true);
}
} | 3.68 |
pulsar_FixedColumnLengthTableMaker_addHorizontalBorder | // Helper function to add top and bottom borders.
private void addHorizontalBorder(final int length, final StringBuilder builder, final char borderChar) {
for (int i = 0; i < length; ++i) {
builder.append(borderChar);
}
} | 3.68 |
dubbo_RpcServiceContext_getResponse | /**
* Get the response object of the underlying RPC protocol, e.g. HttpServletResponse
*
* @return null if the underlying protocol doesn't provide support for getting response or the response is not of the specified type
*/
@Override
@SuppressWarnings("unchecked")
public <T> T getResponse(Class<T> clazz) {
return (response != null && clazz.isAssignableFrom(response.getClass())) ? (T) response : null;
} | 3.68 |
flink_NonReusingBuildSecondReOpenableHashJoinIterator_reopenProbe | /**
* Set new input for probe side
*
* @throws java.io.IOException
*/
public void reopenProbe(MutableObjectIterator<V1> probeInput) throws IOException {
reopenHashTable.reopenProbe(probeInput);
} | 3.68 |
hbase_LogEventHandler_persistAll | /**
* Add all in memory queue records to system table. The implementors can use system table or
* direct HDFS file or ZK as persistence system.
*/
void persistAll(NamedQueuePayload.NamedQueueEvent namedQueueEvent, Connection connection) {
namedQueueServices.get(namedQueueEvent).persistAll(connection);
} | 3.68 |
hadoop_RenameFilesStage_executeStage | /**
* Rename files in job commit.
* @param args tuple of (manifest data, set of created dirs)
* @return the job report.
* @throws IOException failure
*/
@Override
protected ManifestSuccessData executeStage(
Triple<LoadedManifestData, Set<Path>, Integer> args)
throws IOException {
final LoadedManifestData manifestData = args.getLeft();
createdDirectories = args.getMiddle();
final EntryFileIO entryFileIO = new EntryFileIO(getStageConfig().getConf());
final ManifestSuccessData success = createManifestOutcome(getStageConfig(),
OP_STAGE_JOB_COMMIT);
LOG.info("{}: Executing Manifest Job Commit with {} files",
getName(), manifestData.getFileCount());
// iterate over the entries in the file.
try (SequenceFile.Reader reader = entryFileIO.createReader(
manifestData.getEntrySequenceData())) {
TaskPool.foreach(entryFileIO.iterateOver(reader))
.executeWith(getIOProcessors())
.stopOnFailure()
.run(this::commitOneFile);
}
// synchronized block to keep spotbugs happy.
List<FileEntry> committed = getFilesCommitted();
LOG.info("{}: Files committed: {}. Total size {}",
getName(), committed.size(), getTotalFileSize());
// Add a subset of the destination files to the success file;
// enough for simple testing
success.setFilenamePaths(
committed
.subList(0, Math.min(committed.size(), args.getRight()))
.stream().map(FileEntry::getDestPath)
.collect(Collectors.toList()));
success.setSuccess(true);
return success;
} | 3.68 |
pulsar_ProducerImpl_updateMessageMetadata | /**
* Update the message metadata except those fields that will be updated for chunks later.
*
* @param msgMetadata
* @param uncompressedSize
* @return the sequence id
*/
private void updateMessageMetadata(final MessageMetadata msgMetadata, final int uncompressedSize) {
if (!msgMetadata.hasPublishTime()) {
msgMetadata.setPublishTime(client.getClientClock().millis());
checkArgument(!msgMetadata.hasProducerName());
msgMetadata.setProducerName(producerName);
// The field "uncompressedSize" is zero means the compression info were not set yet.
if (msgMetadata.getUncompressedSize() <= 0) {
if (conf.getCompressionType() != CompressionType.NONE) {
msgMetadata
.setCompression(CompressionCodecProvider.convertToWireProtocol(conf.getCompressionType()));
}
msgMetadata.setUncompressedSize(uncompressedSize);
}
}
} | 3.68 |
AreaShop_SignLinkerManager_exitSignLinkMode | /**
* Let a player exit sign linking mode.
* @param player The player that has to exit sign linking mode
*/
public void exitSignLinkMode(Player player) {
signLinkers.remove(player.getUniqueId());
if(eventsRegistered && signLinkers.isEmpty()) {
eventsRegistered = false;
HandlerList.unregisterAll(this);
}
plugin.message(player, "linksigns-stopped");
} | 3.68 |
hbase_ThriftHBaseServiceHandler_getScanner | /**
* Returns the Scanner associated with the specified Id.
* @param id of the Scanner to get
* @return a Scanner, or null if the Id is invalid
*/
private ResultScanner getScanner(int id) {
return scannerMap.getIfPresent(id);
} | 3.68 |
rocketmq-connect_WorkerSourceTask_execute | /**
* execute poll and send record
*/
@Override
protected void execute() {
while (isRunning()) {
updateCommittableOffsets();
if (shouldPause()) {
onPause();
try {
// wait unpause
if (awaitUnpause()) {
onResume();
}
continue;
} catch (InterruptedException e) {
// do exception
}
}
if (CollectionUtils.isEmpty(toSendRecord)) {
try {
prepareToPollTask();
long start = System.currentTimeMillis();
toSendRecord = poll();
if (null != toSendRecord && toSendRecord.size() > 0) {
recordPollReturned(toSendRecord.size(), System.currentTimeMillis() - start);
}
if (toSendRecord == null) {
continue;
}
log.trace("{} About to send {} records to RocketMQ", this, toSendRecord.size());
if (!sendRecord()) {
stopRequestedLatch.await(SEND_FAILED_BACKOFF_MS, TimeUnit.MILLISECONDS);
}
} catch (InterruptedException e) {
// Ignore and allow to exit.
} catch (Exception e) {
try {
finalOffsetCommit(true);
} catch (Exception offsetException) {
log.error("Failed to commit offsets for already-failing task", offsetException);
}
throw e;
} finally {
finalOffsetCommit(false);
// record source poll times
connectStatsManager.incSourceRecordPollTotalTimes();
}
}
AtomicLong atomicLong = connectStatsService.singleSourceTaskTimesTotal(id().toString());
if (null != atomicLong) {
atomicLong.addAndGet(toSendRecord == null ? 0 : toSendRecord.size());
}
}
} | 3.68 |
hbase_HBaseConfiguration_addDeprecatedKeys | /**
* The hbase.ipc.server.reservoir.initial.max and hbase.ipc.server.reservoir.initial.buffer.size
* were introduced in HBase2.0.0, while in HBase3.0.0 the two config keys will be replaced by
* hbase.server.allocator.max.buffer.count and hbase.server.allocator.buffer.size. Also the
* hbase.ipc.server.reservoir.enabled will be replaced by hbase.server.allocator.pool.enabled.
* Keep the three old config keys here for HBase2.x compatibility. <br>
* HBASE-24667: This config hbase.regionserver.hostname.disable.master.reversedns will be replaced
* by hbase.unsafe.regionserver.hostname.disable.master.reversedns. Keep the old config keys here
* for backward compatibility. <br>
* Note: Before Hadoop-3.3, we must call the addDeprecations method before creating the
* Configuration object to work correctly. After this bug is fixed in hadoop-3.3, there will be no
* order problem.
* @see <a href="https://issues.apache.org/jira/browse/HADOOP-15708">HADOOP-15708</a>
*/
private static void addDeprecatedKeys() {
Configuration.addDeprecations(new DeprecationDelta[] {
new DeprecationDelta("hbase.regionserver.hostname", "hbase.unsafe.regionserver.hostname"),
new DeprecationDelta("hbase.regionserver.hostname.disable.master.reversedns",
"hbase.unsafe.regionserver.hostname.disable.master.reversedns"),
new DeprecationDelta("hbase.offheapcache.minblocksize", "hbase.blockcache.minblocksize"),
new DeprecationDelta("hbase.ipc.server.reservoir.enabled",
"hbase.server.allocator.pool.enabled"),
new DeprecationDelta("hbase.ipc.server.reservoir.initial.max",
"hbase.server.allocator.max.buffer.count"),
new DeprecationDelta("hbase.ipc.server.reservoir.initial.buffer.size",
"hbase.server.allocator.buffer.size"),
new DeprecationDelta("hlog.bulk.output", "wal.bulk.output"),
new DeprecationDelta("hlog.input.tables", "wal.input.tables"),
new DeprecationDelta("hlog.input.tablesmap", "wal.input.tablesmap"),
new DeprecationDelta("hbase.master.mob.ttl.cleaner.period",
"hbase.master.mob.cleaner.period"),
new DeprecationDelta("hbase.normalizer.min.region.count",
"hbase.normalizer.merge.min.region.count") });
} | 3.68 |
flink_KubernetesUtils_createJobGraphStore | /**
* Create a {@link DefaultJobGraphStore} with {@link NoOpJobGraphStoreWatcher}.
*
* @param configuration configuration to build a RetrievableStateStorageHelper
* @param flinkKubeClient flink kubernetes client
* @param configMapName ConfigMap name
* @param lockIdentity lock identity to check the leadership
* @return a {@link DefaultJobGraphStore} with {@link NoOpJobGraphStoreWatcher}
* @throws Exception when create the storage helper
*/
public static JobGraphStore createJobGraphStore(
Configuration configuration,
FlinkKubeClient flinkKubeClient,
String configMapName,
String lockIdentity)
throws Exception {
final KubernetesStateHandleStore<JobGraph> stateHandleStore =
createJobGraphStateHandleStore(
configuration, flinkKubeClient, configMapName, lockIdentity);
return new DefaultJobGraphStore<>(
stateHandleStore,
NoOpJobGraphStoreWatcher.INSTANCE,
KubernetesJobGraphStoreUtil.INSTANCE);
} | 3.68 |
pulsar_MessageCryptoBc_createIESParameterSpec | // required since Bouncycastle 1.72 when using ECIES, it is required to pass in an IESParameterSpec
public static IESParameterSpec createIESParameterSpec() {
// the IESParameterSpec to use was discovered by debugging BouncyCastle 1.69 and running the
// test org.apache.pulsar.client.api.SimpleProducerConsumerTest#testCryptoWithChunking
return new IESParameterSpec(null, null, 128);
} | 3.68 |
flink_ExistingSavepoint_readKeyedState | /**
* Read keyed state from an operator in a {@code Savepoint}.
*
* @param uid The uid of the operator.
* @param function The {@link KeyedStateReaderFunction} that is called for each key in state.
* @param keyTypeInfo The type information of the key in state.
* @param outTypeInfo The type information of the output of the transform reader function.
* @param <K> The type of the key in state.
* @param <OUT> The output type of the transform function.
* @return A {@code DataSet} of objects read from keyed state.
* @throws IOException If the savepoint does not contain operator state with the given uid.
*/
public <K, OUT> DataSource<OUT> readKeyedState(
String uid,
KeyedStateReaderFunction<K, OUT> function,
TypeInformation<K> keyTypeInfo,
TypeInformation<OUT> outTypeInfo)
throws IOException {
OperatorState operatorState = metadata.getOperatorState(uid);
KeyedStateInputFormat<K, VoidNamespace, OUT> inputFormat =
new KeyedStateInputFormat<>(
operatorState,
stateBackend,
env.getConfiguration(),
new KeyedStateReaderOperator<>(function, keyTypeInfo));
return env.createInput(inputFormat, outTypeInfo);
} | 3.68 |
hadoop_BlockMovementAttemptFinished_getTargetType | /**
* @return target storage type.
*/
public StorageType getTargetType() {
return targetType;
} | 3.68 |
morf_DataSetProducerAdapter_isTableEmpty | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#isTableEmpty(java.lang.String)
*/
@Override
public boolean isTableEmpty(String tableName) {
return delegate.isTableEmpty(tableName);
} | 3.68 |
hbase_CommonFSUtils_getDirUri | /**
* Returns the URI in the string format
* @param c configuration
* @param p path
* @return - the URI's to string format
*/
public static String getDirUri(final Configuration c, Path p) throws IOException {
if (p.toUri().getScheme() != null) {
return p.toUri().toString();
}
return null;
} | 3.68 |
hmily_HmilyRepositoryFacade_updateHmilyParticipantStatus | /**
* Update hmily participant status.
*
* @param transId the trans id
* @param status the status
*/
public void updateHmilyParticipantStatus(final Long transId, final Integer status) {
checkRows(hmilyRepository.updateHmilyParticipantStatus(transId, status));
} | 3.68 |
hbase_JVMClusterUtil_createMasterThread | /**
* Creates a {@link MasterThread}. Call 'start' on the returned thread to make it run.
* @param c Configuration to use.
* @param hmc Class to create.
* @param index Used distinguishing the object returned.
* @return Master added.
*/
public static JVMClusterUtil.MasterThread createMasterThread(final Configuration c,
final Class<? extends HMaster> hmc, final int index) throws IOException {
HMaster server;
try {
server = hmc.getConstructor(Configuration.class).newInstance(c);
} catch (InvocationTargetException ite) {
Throwable target = ite.getTargetException();
throw new RuntimeException("Failed construction of Master: " + hmc.toString()
+ ((target.getCause() != null) ? target.getCause().getMessage() : ""), target);
} catch (Exception e) {
throw new IOException(e);
}
// Needed if a master based registry is configured for internal cluster connections. Here, we
// just add the current master host port since we do not know other master addresses up front
// in mini cluster tests.
c.set(HConstants.MASTER_ADDRS_KEY,
Preconditions.checkNotNull(server.getServerName().getAddress()).toString());
return new JVMClusterUtil.MasterThread(server, index);
} | 3.68 |
hbase_BackupManifest_getBackupImage | /**
* Get this backup image.
* @return the backup image.
*/
public BackupImage getBackupImage() {
return backupImage;
} | 3.68 |
hbase_HRegionServer_run | /**
* The HRegionServer sticks in this loop until closed.
*/
@Override
public void run() {
if (isStopped()) {
LOG.info("Skipping run; stopped");
return;
}
try {
// Do pre-registration initializations; zookeeper, lease threads, etc.
preRegistrationInitialization();
} catch (Throwable e) {
abort("Fatal exception during initialization", e);
}
try {
if (!isStopped() && !isAborted()) {
installShutdownHook();
// Initialize the RegionServerCoprocessorHost now that our ephemeral
// node was created, in case any coprocessors want to use ZooKeeper
this.rsHost = new RegionServerCoprocessorHost(this, this.conf);
// Try and register with the Master; tell it we are here. Break if server is stopped or
// the clusterup flag is down or hdfs went wacky. Once registered successfully, go ahead and
// start up all Services. Use RetryCounter to get backoff in case Master is struggling to
// come up.
LOG.debug("About to register with Master.");
TraceUtil.trace(() -> {
RetryCounterFactory rcf =
new RetryCounterFactory(Integer.MAX_VALUE, this.sleeper.getPeriod(), 1000 * 60 * 5);
RetryCounter rc = rcf.create();
while (keepLooping()) {
RegionServerStartupResponse w = reportForDuty();
if (w == null) {
long sleepTime = rc.getBackoffTimeAndIncrementAttempts();
LOG.warn("reportForDuty failed; sleeping {} ms and then retrying.", sleepTime);
this.sleeper.sleep(sleepTime);
} else {
handleReportForDutyResponse(w);
break;
}
}
}, "HRegionServer.registerWithMaster");
}
if (!isStopped() && isHealthy()) {
TraceUtil.trace(() -> {
// start the snapshot handler and other procedure handlers,
// since the server is ready to run
if (this.rspmHost != null) {
this.rspmHost.start();
}
// Start the Quota Manager
if (this.rsQuotaManager != null) {
rsQuotaManager.start(getRpcServer().getScheduler());
}
if (this.rsSpaceQuotaManager != null) {
this.rsSpaceQuotaManager.start();
}
}, "HRegionServer.startup");
}
// We registered with the Master. Go into run mode.
long lastMsg = EnvironmentEdgeManager.currentTime();
long oldRequestCount = -1;
// The main run loop.
while (!isStopped() && isHealthy()) {
if (!isClusterUp()) {
if (onlineRegions.isEmpty()) {
stop("Exiting; cluster shutdown set and not carrying any regions");
} else if (!this.stopping) {
this.stopping = true;
LOG.info("Closing user regions");
closeUserRegions(isAborted());
} else {
boolean allUserRegionsOffline = areAllUserRegionsOffline();
if (allUserRegionsOffline) {
// Set stopped if no more write requests tp meta tables
// since last time we went around the loop. Any open
// meta regions will be closed on our way out.
if (oldRequestCount == getWriteRequestCount()) {
stop("Stopped; only catalog regions remaining online");
break;
}
oldRequestCount = getWriteRequestCount();
} else {
// Make sure all regions have been closed -- some regions may
// have not got it because we were splitting at the time of
// the call to closeUserRegions.
closeUserRegions(this.abortRequested.get());
}
LOG.debug("Waiting on " + getOnlineRegionsAsPrintableString());
}
}
long now = EnvironmentEdgeManager.currentTime();
if ((now - lastMsg) >= msgInterval) {
tryRegionServerReport(lastMsg, now);
lastMsg = EnvironmentEdgeManager.currentTime();
}
if (!isStopped() && !isAborted()) {
this.sleeper.sleep();
}
} // for
} catch (Throwable t) {
if (!rpcServices.checkOOME(t)) {
String prefix = t instanceof YouAreDeadException ? "" : "Unhandled: ";
abort(prefix + t.getMessage(), t);
}
}
final Span span = TraceUtil.createSpan("HRegionServer exiting main loop");
try (Scope ignored = span.makeCurrent()) {
if (this.leaseManager != null) {
this.leaseManager.closeAfterLeasesExpire();
}
if (this.splitLogWorker != null) {
splitLogWorker.stop();
}
stopInfoServer();
// Send cache a shutdown.
if (blockCache != null) {
blockCache.shutdown();
}
if (mobFileCache != null) {
mobFileCache.shutdown();
}
// Send interrupts to wake up threads if sleeping so they notice shutdown.
// TODO: Should we check they are alive? If OOME could have exited already
if (this.hMemManager != null) {
this.hMemManager.stop();
}
if (this.cacheFlusher != null) {
this.cacheFlusher.interruptIfNecessary();
}
if (this.compactSplitThread != null) {
this.compactSplitThread.interruptIfNecessary();
}
// Stop the snapshot and other procedure handlers, forcefully killing all running tasks
if (rspmHost != null) {
rspmHost.stop(this.abortRequested.get() || this.killed);
}
if (this.killed) {
// Just skip out w/o closing regions. Used when testing.
} else if (abortRequested.get()) {
if (this.dataFsOk) {
closeUserRegions(abortRequested.get()); // Don't leave any open file handles
}
LOG.info("aborting server " + this.serverName);
} else {
closeUserRegions(abortRequested.get());
LOG.info("stopping server " + this.serverName);
}
regionReplicationBufferManager.stop();
closeClusterConnection();
// Closing the compactSplit thread before closing meta regions
if (!this.killed && containsMetaTableRegions()) {
if (!abortRequested.get() || this.dataFsOk) {
if (this.compactSplitThread != null) {
this.compactSplitThread.join();
this.compactSplitThread = null;
}
closeMetaTableRegions(abortRequested.get());
}
}
if (!this.killed && this.dataFsOk) {
waitOnAllRegionsToClose(abortRequested.get());
LOG.info("stopping server " + this.serverName + "; all regions closed.");
}
// Stop the quota manager
if (rsQuotaManager != null) {
rsQuotaManager.stop();
}
if (rsSpaceQuotaManager != null) {
rsSpaceQuotaManager.stop();
rsSpaceQuotaManager = null;
}
// flag may be changed when closing regions throws exception.
if (this.dataFsOk) {
shutdownWAL(!abortRequested.get());
}
// Make sure the proxy is down.
if (this.rssStub != null) {
this.rssStub = null;
}
if (this.lockStub != null) {
this.lockStub = null;
}
if (this.rpcClient != null) {
this.rpcClient.close();
}
if (this.leaseManager != null) {
this.leaseManager.close();
}
if (this.pauseMonitor != null) {
this.pauseMonitor.stop();
}
if (!killed) {
stopServiceThreads();
}
if (this.rpcServices != null) {
this.rpcServices.stop();
}
try {
deleteMyEphemeralNode();
} catch (KeeperException.NoNodeException nn) {
// pass
} catch (KeeperException e) {
LOG.warn("Failed deleting my ephemeral node", e);
}
// We may have failed to delete the znode at the previous step, but
// we delete the file anyway: a second attempt to delete the znode is likely to fail again.
ZNodeClearer.deleteMyEphemeralNodeOnDisk();
closeZooKeeper();
closeTableDescriptors();
LOG.info("Exiting; stopping=" + this.serverName + "; zookeeper connection closed.");
span.setStatus(StatusCode.OK);
} finally {
span.end();
}
} | 3.68 |
flink_CustomSinkOperatorUidHashes_build | /**
* Constructs the {@link CustomSinkOperatorUidHashes} with the given uid hashes.
*
* @return {@link CustomSinkOperatorUidHashes}
*/
public CustomSinkOperatorUidHashes build() {
return new CustomSinkOperatorUidHashes(
writerUidHash, committerUidHash, globalCommitterUidHash);
} | 3.68 |
dubbo_Predicates_alwaysTrue | /**
* {@link Predicate} always return <code>true</code>
*
* @param <T> the type to test
* @return <code>true</code>
*/
static <T> Predicate<T> alwaysTrue() {
return e -> true;
} | 3.68 |
querydsl_SpatialSupport_addSupport | /**
* Register spatial types to the given codegen module
*
* @param module module to be customized for spatial support
*/
public void addSupport(AbstractModule module) {
module.bindInstance(SQLCodegenModule.ENTITYPATH_TYPE, RelationalPathSpatial.class);
registerTypes(module.get(Configuration.class));
} | 3.68 |
hbase_TimestampsFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof TimestampsFilter)) {
return false;
}
TimestampsFilter other = (TimestampsFilter) o;
return this.getTimestamps().equals(other.getTimestamps());
} | 3.68 |
framework_KeyMapper_remove | /**
* Removes object from the mapper.
*
* @param removeobj
* the object to be removed.
*/
@Override
public void remove(V removeobj) {
final String key = objectIdKeyMap
.remove(identifierGetter.apply(removeobj));
if (key != null) {
keyObjectMap.remove(key);
}
} | 3.68 |
zxing_HybridBinarizer_getBlackMatrix | /**
* Calculates the final BitMatrix once for all requests. This could be called once from the
* constructor instead, but there are some advantages to doing it lazily, such as making
* profiling easier, and not doing heavy lifting when callers don't expect it.
*/
@Override
public BitMatrix getBlackMatrix() throws NotFoundException {
if (matrix != null) {
return matrix;
}
LuminanceSource source = getLuminanceSource();
int width = source.getWidth();
int height = source.getHeight();
if (width >= MINIMUM_DIMENSION && height >= MINIMUM_DIMENSION) {
byte[] luminances = source.getMatrix();
int subWidth = width >> BLOCK_SIZE_POWER;
if ((width & BLOCK_SIZE_MASK) != 0) {
subWidth++;
}
int subHeight = height >> BLOCK_SIZE_POWER;
if ((height & BLOCK_SIZE_MASK) != 0) {
subHeight++;
}
int[][] blackPoints = calculateBlackPoints(luminances, subWidth, subHeight, width, height);
BitMatrix newMatrix = new BitMatrix(width, height);
calculateThresholdForBlock(luminances, subWidth, subHeight, width, height, blackPoints, newMatrix);
matrix = newMatrix;
} else {
// If the image is too small, fall back to the global histogram approach.
matrix = super.getBlackMatrix();
}
return matrix;
} | 3.68 |
hbase_FavoredStochasticBalancer_getOnlineFavoredNodes | /**
* Return list of favored nodes that are online.
*/
private List<ServerName> getOnlineFavoredNodes(List<ServerName> onlineServers,
List<ServerName> serversWithoutStartCodes) {
if (serversWithoutStartCodes == null) {
return null;
} else {
List<ServerName> result = Lists.newArrayList();
for (ServerName sn : serversWithoutStartCodes) {
for (ServerName online : onlineServers) {
if (ServerName.isSameAddress(sn, online)) {
result.add(online);
}
}
}
return result;
}
} | 3.68 |
hadoop_SinglePendingCommit_getPartCount | /**
* Get the number of etags.
* @return the size of the etag list.
*/
public int getPartCount() {
return etags.size();
} | 3.68 |
hbase_SnapshotDescriptionUtils_getCorruptedFlagFileForSnapshot | /**
* Get the flag file path if the snapshot is corrupted
* @param workingDir the directory where we build the specific snapshot
* @return {@link Path} snapshot corrupted flag file path
*/
public static Path getCorruptedFlagFileForSnapshot(final Path workingDir) {
return new Path(workingDir, SNAPSHOT_CORRUPTED_FILE);
} | 3.68 |
flink_KeyedStream_maxBy | /**
* Applies an aggregation that gives the current element with the maximum value at the given
* position by the given key. An independent aggregate is kept per key. If more elements have
* the maximum value at the given position, the operator returns either the first or last one,
* depending on the parameter set.
*
* @param positionToMaxBy The field position in the data points to minimize. This is applicable
* to Tuple types, Scala case classes, and primitive types (which is considered as having
* one field).
* @param first If true, then the operator return the first element with the maximum value,
* otherwise returns the last
* @return The transformed DataStream.
*/
public SingleOutputStreamOperator<T> maxBy(int positionToMaxBy, boolean first) {
return aggregate(
new ComparableAggregator<>(
positionToMaxBy,
getType(),
AggregationFunction.AggregationType.MAXBY,
first,
getExecutionConfig()));
} | 3.68 |
hudi_HoodieFileGroupReader_next | /**
* @return The next record after calling {@link #hasNext}.
*/
public T next() {
return recordBuffer.next();
} | 3.68 |
graphhopper_OSMNodeData_addCopyOfNode | /**
* Creates a copy of the coordinates stored for the given node ID
*
* @return the (artificial) OSM node ID created for the copied node and the associated ID
*/
SegmentNode addCopyOfNode(SegmentNode node) {
GHPoint3D point = getCoordinates(node.id);
if (point == null)
throw new IllegalStateException("Cannot copy node : " + node.osmNodeId + ", because it is missing");
final long newOsmId = nextArtificialOSMNodeId++;
if (idsByOsmNodeIds.put(newOsmId, INTERMEDIATE_NODE) != EMPTY_NODE)
throw new IllegalStateException("Artificial osm node id already exists: " + newOsmId);
long id = addPillarNode(newOsmId, point.getLat(), point.getLon(), point.getEle());
return new SegmentNode(newOsmId, id, node.tags);
} | 3.68 |
hadoop_BooleanWritable_equals | /**
*/
@Override
public boolean equals(Object o) {
if (!(o instanceof BooleanWritable)) {
return false;
}
BooleanWritable other = (BooleanWritable) o;
return this.value == other.value;
} | 3.68 |
hbase_EntityLock_requestLock | /**
* Sends rpc to the master to request lock. The lock request is queued with other lock requests.
* Call {@link #await()} to wait on lock. Always call {@link #unlock()} after calling the below,
* even after error.
*/
public void requestLock() throws IOException {
if (procId == null) {
try {
procId = stub.requestLock(null, lockRequest).getProcId();
} catch (Exception e) {
throw ProtobufUtil.handleRemoteException(e);
}
worker.start();
} else {
LOG.info("Lock already queued : " + toString());
}
} | 3.68 |
hadoop_MawoConfiguration_getClusterManagerURL | /**
* Get cluster manager URL.
* @return value of ycloud.url
*/
public String getClusterManagerURL() {
return configsMap.get(CLUSTER_MANAGER_URL);
} | 3.68 |
hudi_HoodieDataTableUtils_getBaseAndLogFilePathsFromFileSystem | /**
* @return All hoodie files of the table from the file system.
* @throws IOException upon errors.
*/
static List<Path> getBaseAndLogFilePathsFromFileSystem(HoodieTableMetadata tableMetadata, String basePath) throws IOException {
List<String> allPartitionPaths = tableMetadata.getAllPartitionPaths()
.stream().map(partitionPath ->
FSUtils.getPartitionPath(basePath, partitionPath).toString())
.collect(Collectors.toList());
return tableMetadata.getAllFilesInPartitions(allPartitionPaths).values().stream()
.map(fileStatuses ->
Arrays.stream(fileStatuses).map(fileStatus -> fileStatus.getPath()).collect(Collectors.toList()))
.flatMap(list -> list.stream())
.collect(Collectors.toList());
} | 3.68 |
flink_VertexInputInfoStore_get | /**
* Get a {@link JobVertexInputInfo}.
*
* @param jobVertexId the job vertex id
* @param resultId the intermediate result id
* @return the {@link JobVertexInputInfo} identified by the job vertex id and intermediate
* result id
*/
public JobVertexInputInfo get(JobVertexID jobVertexId, IntermediateDataSetID resultId) {
checkNotNull(jobVertexId);
checkNotNull(resultId);
return checkNotNull(jobVertexInputInfos.get(jobVertexId).get(resultId));
} | 3.68 |
hibernate-validator_AbstractMethodOverrideCheck_getOverriddenMethod | /**
* Find a method that is overridden by the one passed to this function.
*
* @param currentMethod the method for which we want to find the overridden methods
* @param typeElement the class or interface analyzed
* @return the overridden method if there is one, and {@code null} otherwise
*/
private ExecutableElement getOverriddenMethod(ExecutableElement currentMethod, TypeElement typeElement) {
if ( typeElement == null ) {
return null;
}
TypeElement enclosingTypeElement = getEnclosingTypeElement( currentMethod );
for ( Element element : elementUtils.getAllMembers( typeElement ) ) {
if ( !element.getKind().equals( ElementKind.METHOD ) ) {
continue;
}
if ( elementUtils.overrides( currentMethod, (ExecutableElement) element, enclosingTypeElement ) ) {
return (ExecutableElement) element;
}
}
return null;
} | 3.68 |
flink_RowDataUtil_isRetractMsg | /**
* Returns true if the message is either {@link RowKind#DELETE} or {@link
* RowKind#UPDATE_BEFORE}, which refers to a retract operation of aggregation.
*/
public static boolean isRetractMsg(RowData row) {
RowKind kind = row.getRowKind();
return kind == RowKind.UPDATE_BEFORE || kind == RowKind.DELETE;
} | 3.68 |
flink_HadoopRecoverableFsDataOutputStream_waitUntilLeaseIsRevoked | /**
* Called when resuming execution after a failure and waits until the lease of the file we are
* resuming is free.
*
* <p>The lease of the file we are resuming writing/committing to may still belong to the
* process that failed previously and whose state we are recovering.
*
* @param path The path to the file we want to resume writing to.
*/
private static boolean waitUntilLeaseIsRevoked(final FileSystem fs, final Path path)
throws IOException {
Preconditions.checkState(fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs = (DistributedFileSystem) fs;
dfs.recoverLease(path);
final Deadline deadline = Deadline.now().plus(Duration.ofMillis(LEASE_TIMEOUT));
boolean isClosed = dfs.isFileClosed(path);
while (!isClosed && deadline.hasTimeLeft()) {
try {
Thread.sleep(500L);
} catch (InterruptedException e1) {
throw new IOException("Recovering the lease failed: ", e1);
}
isClosed = dfs.isFileClosed(path);
}
return isClosed;
} | 3.68 |
framework_AbsoluteLayout_getTopUnits | /**
* Gets the unit for the 'top' attribute.
*
* @return See {@link Sizeable} UNIT_SYMBOLS for a description of the
* available units.
*/
public Unit getTopUnits() {
return topUnits;
} | 3.68 |
framework_VAbstractOrderedLayout_addOrMoveSlot | /**
* Add or move a slot to another index.
* <p>
* For internal use only. May be removed or replaced in the future.
* <p>
* You should note that the index does not refer to the DOM index if
* spacings are used. If spacings are used then the index will be adjusted
* to include the spacings when inserted.
* <p>
* For instance when using spacing the index converts to DOM index in the
* following way:
*
* <pre>
* index : 0 -> DOM index: 0
* index : 1 -> DOM index: 1
* index : 2 -> DOM index: 3
* index : 3 -> DOM index: 5
* index : 4 -> DOM index: 7
* </pre>
*
* When using this method never account for spacings.
* <p>
* The caller should remove all spacings before calling this method and
* re-add them (if necessary) after this method. This can be done before and
* after all slots have been added/moved.
* </p>
*
* @since 7.1.4
*
* @param slot
* The slot to move or add
* @param index
* The index where the slot should be placed.
* @param adjustSpacing
* true to recalculate spacings for the whole layout after the
* operation
*/
public void addOrMoveSlot(Slot slot, int index, boolean adjustSpacing) {
Profiler.enter(
"VAOL.onConnectorHierarchyChange addOrMoveSlot find index");
if (slot.getParent() == this) {
int currentIndex = getWidgetIndex(slot);
if (index == currentIndex) {
Profiler.leave(
"VAOL.onConnectorHierarchyChange addOrMoveSlot find index");
return;
}
}
Profiler.leave(
"VAOL.onConnectorHierarchyChange addOrMoveSlot find index");
Profiler.enter("VAOL.onConnectorHierarchyChange addOrMoveSlot insert");
insert(slot, index);
Profiler.leave("VAOL.onConnectorHierarchyChange addOrMoveSlot insert");
if (adjustSpacing) {
Profiler.enter(
"VAOL.onConnectorHierarchyChange addOrMoveSlot setSpacing");
setSpacing(spacing);
Profiler.leave(
"VAOL.onConnectorHierarchyChange addOrMoveSlot setSpacing");
}
} | 3.68 |
dubbo_WrappedChannelHandler_getSharedExecutorService | /**
* get the shared executor for current Server or Client
*
* @return
*/
public ExecutorService getSharedExecutorService() {
// Application may be destroyed before channel disconnected, avoid create new application model
// see https://github.com/apache/dubbo/issues/9127
if (url.getApplicationModel() == null || url.getApplicationModel().isDestroyed()) {
return GlobalResourcesRepository.getGlobalExecutorService();
}
// note: url.getOrDefaultApplicationModel() may create new application model
ApplicationModel applicationModel = url.getOrDefaultApplicationModel();
ExecutorRepository executorRepository = ExecutorRepository.getInstance(applicationModel);
ExecutorService executor = executorRepository.getExecutor(url);
if (executor == null) {
executor = executorRepository.createExecutorIfAbsent(url);
}
return executor;
} | 3.68 |
flink_FlinkBushyJoinReorderRule_findBestOrder | /**
* Find best join reorder using bushy join reorder strategy. We will first try to reorder all
* the inner join type input factors in the multiJoin. Then, we will add all outer join factors
* to the top of reordered join tree generated by the first step. If there are factors, which
* join condition is true, we will add these factors to the top in the final step.
*/
private static RelNode findBestOrder(RelBuilder relBuilder, LoptMultiJoin multiJoin) {
// Reorder all the inner join type input factors in the multiJoin.
List<Map<Set<Integer>, JoinPlan>> foundPlansForInnerJoin =
reorderInnerJoin(relBuilder, multiJoin);
Map<Set<Integer>, JoinPlan> lastLevelOfInnerJoin =
foundPlansForInnerJoin.get(foundPlansForInnerJoin.size() - 1);
JoinPlan bestPlanForInnerJoin = getBestPlan(lastLevelOfInnerJoin);
JoinPlan containOuterJoinPlan;
// Add all outer join factors in the multiJoin (including left/right/full) on the
// top of tree if outer join condition exists in multiJoin.
if (multiJoin.getMultiJoinRel().isFullOuterJoin() || outerJoinConditionExists(multiJoin)) {
containOuterJoinPlan = addOuterJoinToTop(bestPlanForInnerJoin, multiJoin, relBuilder);
} else {
containOuterJoinPlan = bestPlanForInnerJoin;
}
JoinPlan finalPlan;
// Add all cross join factors whose join condition is true to the top.
if (containOuterJoinPlan.factorIds.size() != multiJoin.getNumJoinFactors()) {
finalPlan = addCrossJoinToTop(containOuterJoinPlan, multiJoin, relBuilder);
} else {
finalPlan = containOuterJoinPlan;
}
final List<String> fieldNames = multiJoin.getMultiJoinRel().getRowType().getFieldNames();
return createTopProject(relBuilder, multiJoin, finalPlan, fieldNames);
} | 3.68 |
hbase_SplitLogManagerCoordination_getServerName | /** Returns server name */
public ServerName getServerName() {
return master.getServerName();
} | 3.68 |
flink_RpcEndpoint_callAsync | /**
* Execute the callable in the main thread of the underlying RPC service, returning a future for
* the result of the callable. If the callable is not completed within the given timeout, then
* the future will be failed with a {@link TimeoutException}.
*
* @param callable Callable to be executed in the main thread of the underlying rpc server
* @param timeout Timeout for the callable to be completed
* @param <V> Return type of the callable
* @return Future for the result of the callable.
*/
protected <V> CompletableFuture<V> callAsync(Callable<V> callable, Duration timeout) {
return rpcServer.callAsync(callable, timeout);
} | 3.68 |
pulsar_ResourceUnitRanking_calculateBrokerCapacity | /**
* Calculate how many bundles could be handle with the specified resources.
*/
private static long calculateBrokerCapacity(ResourceQuota defaultQuota, double usableCPU, double usableMem,
double usableBandwidthOut, double usableBandwidthIn) {
// estimate capacity with usable CPU
double cpuCapacity = (usableCPU / cpuUsageByMsgRate)
/ (defaultQuota.getMsgRateIn() + defaultQuota.getMsgRateOut());
// estimate capacity with usable memory
double memCapacity = usableMem / defaultQuota.getMemory();
// estimate capacity with usable outbound bandwidth
double bandwidthOutCapacity = usableBandwidthOut / defaultQuota.getBandwidthOut();
// estimate capacity with usable inbound bandwidth
double bandwidthInCapacity = usableBandwidthIn / defaultQuota.getBandwidthIn();
// the ServiceUnit capacity is determined by the minimum capacity of resources
double capacity = Math.min(cpuCapacity,
Math.min(memCapacity, Math.min(bandwidthOutCapacity, bandwidthInCapacity)));
return (long) Math.max(capacity, 0);
} | 3.68 |
hadoop_AbstractS3ACommitter_setupTask | /**
* Task setup. Fails if the the UUID was generated locally, and
* the same committer wasn't used for job setup.
* {@inheritDoc}
* @throws PathCommitException if the task UUID options are unsatisfied.
*/
@Override
public void setupTask(TaskAttemptContext context) throws IOException {
TaskAttemptID attemptID = context.getTaskAttemptID();
// update the context so that task IO in the same thread has
// the relevant values.
new AuditContextUpdater(context)
.updateCurrentAuditContext();
try (DurationInfo d = new DurationInfo(LOG, "Setup Task %s",
attemptID)) {
// reject attempts to set up the task where the output won't be
// picked up
if (!jobSetup
&& getUUIDSource() == JobUUIDSource.GeneratedLocally) {
// on anything other than a test run, the context must not have been
// generated locally.
throw new PathCommitException(getOutputPath().toString(),
"Task attempt " + attemptID
+ " " + E_SELF_GENERATED_JOB_UUID);
}
Path taskAttemptPath = getTaskAttemptPath(context);
FileSystem fs = taskAttemptPath.getFileSystem(getConf());
// delete that ta path if somehow it was there
fs.delete(taskAttemptPath, true);
// create an empty directory
fs.mkdirs(taskAttemptPath);
}
} | 3.68 |
hudi_TableChangesHelper_applyAddChange2Fields | /**
* Apply add operation and column position change operation.
*
* @param fields origin column fields.
* @param adds column fields to be added.
* @param pchanges a wrapper class hold all the position change operations.
* @return column fields after adjusting the position.
*/
public static List<Types.Field> applyAddChange2Fields(List<Types.Field> fields, ArrayList<Types.Field> adds, ArrayList<TableChange.ColumnPositionChange> pchanges) {
if (adds == null && pchanges == null) {
return fields;
}
LinkedList<Types.Field> result = new LinkedList<>(fields);
// apply add columns
if (adds != null && !adds.isEmpty()) {
result.addAll(adds);
}
// apply position change
if (pchanges != null && !pchanges.isEmpty()) {
for (TableChange.ColumnPositionChange pchange : pchanges) {
Types.Field srcField = result.stream().filter(f -> f.fieldId() == pchange.getSrcId()).findFirst().get();
Types.Field dsrField = result.stream().filter(f -> f.fieldId() == pchange.getDsrId()).findFirst().orElse(null);
// we remove srcField first
result.remove(srcField);
switch (pchange.type()) {
case AFTER:
// add srcField after dsrField
result.add(result.indexOf(dsrField) + 1, srcField);
break;
case BEFORE:
// add srcField before dsrField
result.add(result.indexOf(dsrField), srcField);
break;
case FIRST:
result.addFirst(srcField);
break;
default:
// should not reach here
}
}
}
return result;
} | 3.68 |
hadoop_CacheDirectiveStats_getFilesNeeded | /**
* @return The number of files needed.
*/
public long getFilesNeeded() {
return filesNeeded;
} | 3.68 |
hbase_StoreFileReader_passesKeyRangeFilter | /**
* Checks whether the given scan rowkey range overlaps with the current storefile's
* @param scan the scan specification. Used to determine the rowkey range.
* @return true if there is overlap, false otherwise
*/
public boolean passesKeyRangeFilter(Scan scan) {
Optional<Cell> firstKeyKV = this.getFirstKey();
Optional<Cell> lastKeyKV = this.getLastKey();
if (!firstKeyKV.isPresent() || !lastKeyKV.isPresent()) {
// the file is empty
return false;
}
if (
Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW)
&& Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)
) {
return true;
}
byte[] smallestScanRow = scan.isReversed() ? scan.getStopRow() : scan.getStartRow();
byte[] largestScanRow = scan.isReversed() ? scan.getStartRow() : scan.getStopRow();
boolean nonOverLapping =
(getComparator().compareRows(firstKeyKV.get(), largestScanRow, 0, largestScanRow.length) > 0
&& !Bytes.equals(scan.isReversed() ? scan.getStartRow() : scan.getStopRow(),
HConstants.EMPTY_END_ROW))
|| getComparator().compareRows(lastKeyKV.get(), smallestScanRow, 0, smallestScanRow.length)
< 0;
return !nonOverLapping;
} | 3.68 |
hbase_HRegion_reckonDeltasByStore | /**
* Reckon the Cells to apply to WAL, memstore, and to return to the Client in passed column
* family/Store. Does Get of current value and then adds passed in deltas for this Store
* returning the result.
* @param mutation The encompassing Mutation object
* @param deltas Changes to apply to this Store; either increment amount or data to append
* @param results In here we accumulate all the Cells we are to return to the client. If null,
* client doesn't want results returned.
* @return Resulting Cells after <code>deltas</code> have been applied to current values. Side
* effect is our filling out of the <code>results</code> List.
*/
private List<Cell> reckonDeltasByStore(HStore store, Mutation mutation, long now,
List<Cell> deltas, List<Cell> results) throws IOException {
assert mutation instanceof Increment || mutation instanceof Append;
byte[] columnFamily = store.getColumnFamilyDescriptor().getName();
List<Pair<Cell, Cell>> cellPairs = new ArrayList<>(deltas.size());
// Sort the cells so that they match the order that they appear in the Get results.
// Otherwise, we won't be able to find the existing values if the cells are not specified
// in order by the client since cells are in an array list.
deltas.sort(store.getComparator());
// Get previous values for all columns in this family.
Get get = new Get(mutation.getRow());
for (Cell cell : deltas) {
get.addColumn(columnFamily, CellUtil.cloneQualifier(cell));
}
TimeRange tr;
if (mutation instanceof Increment) {
tr = ((Increment) mutation).getTimeRange();
} else {
tr = ((Append) mutation).getTimeRange();
}
if (tr != null) {
get.setTimeRange(tr.getMin(), tr.getMax());
}
try (RegionScanner scanner = region.getScanner(new Scan(get))) {
// NOTE: Please don't use HRegion.get() instead,
// because it will copy cells to heap. See HBASE-26036
List<Cell> currentValues = new ArrayList<>();
scanner.next(currentValues);
// Iterate the input columns and update existing values if they were found, otherwise
// add new column initialized to the delta amount
int currentValuesIndex = 0;
for (int i = 0; i < deltas.size(); i++) {
Cell delta = deltas.get(i);
Cell currentValue = null;
if (
currentValuesIndex < currentValues.size()
&& CellUtil.matchingQualifier(currentValues.get(currentValuesIndex), delta)
) {
currentValue = currentValues.get(currentValuesIndex);
if (i < (deltas.size() - 1) && !CellUtil.matchingQualifier(delta, deltas.get(i + 1))) {
currentValuesIndex++;
}
}
// Switch on whether this an increment or an append building the new Cell to apply.
Cell newCell;
if (mutation instanceof Increment) {
long deltaAmount = getLongValue(delta);
final long newValue =
currentValue == null ? deltaAmount : getLongValue(currentValue) + deltaAmount;
newCell = reckonDelta(delta, currentValue, columnFamily, now, mutation,
(oldCell) -> Bytes.toBytes(newValue));
} else {
newCell = reckonDelta(delta, currentValue, columnFamily, now, mutation,
(oldCell) -> ByteBuffer
.wrap(new byte[delta.getValueLength() + oldCell.getValueLength()])
.put(oldCell.getValueArray(), oldCell.getValueOffset(), oldCell.getValueLength())
.put(delta.getValueArray(), delta.getValueOffset(), delta.getValueLength())
.array());
}
if (region.maxCellSize > 0) {
int newCellSize = PrivateCellUtil.estimatedSerializedSizeOf(newCell);
if (newCellSize > region.maxCellSize) {
String msg = "Cell with size " + newCellSize + " exceeds limit of "
+ region.maxCellSize + " bytes in region " + this;
LOG.debug(msg);
throw new DoNotRetryIOException(msg);
}
}
cellPairs.add(new Pair<>(currentValue, newCell));
// Add to results to get returned to the Client. If null, cilent does not want results.
if (results != null) {
results.add(newCell);
}
}
// Give coprocessors a chance to update the new cells before apply to WAL or memstore
if (region.coprocessorHost != null) {
// Here the operation must be increment or append.
cellPairs = mutation instanceof Increment
? region.coprocessorHost.postIncrementBeforeWAL(mutation, cellPairs)
: region.coprocessorHost.postAppendBeforeWAL(mutation, cellPairs);
}
}
return cellPairs.stream().map(Pair::getSecond).collect(Collectors.toList());
} | 3.68 |
framework_MSSQLGenerator_generateSelectQuery | /*
* (non-Javadoc)
*
* @see com.vaadin.addon.sqlcontainer.query.generator.DefaultSQLGenerator#
* generateSelectQuery(java.lang.String, java.util.List,
* com.vaadin.addon.sqlcontainer.query.FilteringMode, java.util.List, int,
* int, java.lang.String)
*/
@Override
public StatementHelper generateSelectQuery(String tableName,
List<Filter> filters, List<OrderBy> orderBys, int offset,
int pagelength, String toSelect) {
if (tableName == null || tableName.trim().equals("")) {
throw new IllegalArgumentException("Table name must be given.");
}
/* Adjust offset and page length parameters to match "row numbers" */
offset = pagelength > 1 ? ++offset : offset;
pagelength = pagelength > 1 ? --pagelength : pagelength;
toSelect = toSelect == null ? "*" : toSelect;
StatementHelper sh = getStatementHelper();
StringBuffer query = new StringBuffer();
/* Row count request is handled here */
if ("COUNT(*)".equalsIgnoreCase(toSelect)) {
query.append(String.format(
"SELECT COUNT(*) AS %s FROM (SELECT * FROM %s",
QueryBuilder.quote("rowcount"), tableName));
if (filters != null && !filters.isEmpty()) {
query.append(
QueryBuilder.getWhereStringForFilters(filters, sh));
}
query.append(") AS t");
sh.setQueryString(query.toString());
return sh;
}
/* SELECT without row number constraints */
if (offset == 0 && pagelength == 0) {
query.append("SELECT ").append(toSelect).append(" FROM ")
.append(tableName);
if (filters != null) {
query.append(
QueryBuilder.getWhereStringForFilters(filters, sh));
}
if (orderBys != null) {
for (OrderBy o : orderBys) {
generateOrderBy(query, o, orderBys.indexOf(o) == 0);
}
}
sh.setQueryString(query.toString());
return sh;
}
/* Remaining SELECT cases are handled here */
query.append("SELECT * FROM (SELECT row_number() OVER (");
if (orderBys != null) {
for (OrderBy o : orderBys) {
generateOrderBy(query, o, orderBys.indexOf(o) == 0);
}
}
query.append(") AS rownum, " + toSelect + " FROM ").append(tableName);
if (filters != null) {
query.append(QueryBuilder.getWhereStringForFilters(filters, sh));
}
query.append(") AS a WHERE a.rownum BETWEEN ").append(offset)
.append(" AND ").append(offset + pagelength);
sh.setQueryString(query.toString());
return sh;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.