name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
dubbo_ServiceBean_publishExportEvent | /**
* @since 2.6.5
*/
private void publishExportEvent() {
ServiceBeanExportedEvent exportEvent = new ServiceBeanExportedEvent(this);
applicationEventPublisher.publishEvent(exportEvent);
} | 3.68 |
hadoop_Anonymizer_run | /**
* Runs the actual anonymization tool.
*/
public int run() throws Exception {
try {
anonymizeTrace();
} catch (IOException ioe) {
System.err.println("Error running the trace anonymizer!");
ioe.printStackTrace();
System.out.println("\n\nAnonymization unsuccessful!");
return -1;
}
try {
anonymizeTopology();
} catch (IOException ioe) {
System.err.println("Error running the cluster topology anonymizer!");
ioe.printStackTrace();
System.out.println("\n\nAnonymization unsuccessful!");
return -1;
}
statePool.persist();
System.out.println("Anonymization completed successfully!");
return 0;
} | 3.68 |
morf_SqlDialect_getSqlForCriterionValue | /**
* Convert the an Object criterion value (i.e. right hand side) to valid SQL
* based on its type.
*
* @param value the object to convert to a string
* @return a string representation of the object
*/
protected String getSqlForCriterionValue(Object value) {
if (value instanceof String) {
return getSqlFrom((String) value);
}
if (value instanceof Boolean) {
return getSqlFrom((Boolean) value);
}
if (value instanceof LocalDate) {
return getSqlFrom((LocalDate) value);
}
if (value instanceof Criterion) {
return getSqlFrom((Criterion) value);
}
if (value instanceof AliasedField) {
return getSqlFrom((AliasedField) value);
}
return value.toString();
} | 3.68 |
hibernate-validator_ResourceLoaderHelper_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17")
private static <T> T run(PrivilegedAction<T> action) {
return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run();
} | 3.68 |
streampipes_AbstractConfigurablePipelineElementBuilder_requiredSlideToggle | /**
* Assigns a new required slide toggle for a true/false selection
*
* @param label The {@link org.apache.streampipes.sdk.helpers.Label}
* that describes why this parameter is needed in an user-friendly manner.
* @param defaultValue The toggle's default value
* @return this
*/
public K requiredSlideToggle(Label label, boolean defaultValue) {
SlideToggleStaticProperty slideToggle = new SlideToggleStaticProperty(
label.getInternalId(),
label.getLabel(),
label.getDescription(),
defaultValue);
slideToggle.setSelected(defaultValue);
this.staticProperties.add(slideToggle);
return me();
} | 3.68 |
framework_AbstractDateField_setDateFormat | /**
* Sets formatting used by some component implementations. See
* {@link SimpleDateFormat} for format details.
*
* By default it is encouraged to used default formatting defined by Locale,
* but due some JVM bugs it is sometimes necessary to use this method to
* override formatting. See Vaadin issue #2200.
*
* @param dateFormat
* the dateFormat to set, can be {@code null}
*
* @see com.vaadin.ui.AbstractComponent#setLocale(Locale))
*/
public void setDateFormat(String dateFormat) {
getState().format = dateFormat;
} | 3.68 |
flink_InPlaceMutableHashTable_overwriteRecordAt | /**
* Overwrites a record at the specified position. The record is read from a DataInputView
* (this will be the staging area). WARNING: The record must not be larger than the original
* record.
*
* @param pointer Points to the position to overwrite.
* @param input The DataInputView to read the record from
* @param size The size of the record
* @throws IOException
*/
public void overwriteRecordAt(long pointer, DataInputView input, int size)
throws IOException {
setWritePosition(pointer);
outView.write(input, size);
} | 3.68 |
morf_ArchiveDataSetWriter_close | /**
* @see java.util.zip.ZipOutputStream#close()
*/
@Override
public void close() throws IOException {
// Suppress the close
} | 3.68 |
flink_CompressedSerializedValue_deserializeValue | /**
* Decompress and deserialize the data to get the original object.
*
* @param loader the classloader to deserialize
* @return the deserialized object
* @throws IOException exception during decompression and deserialization
* @throws ClassNotFoundException if class is not found in the classloader
*/
@Override
public T deserializeValue(ClassLoader loader) throws IOException, ClassNotFoundException {
Preconditions.checkNotNull(loader, "No classloader has been passed");
return InstantiationUtil.decompressAndDeserializeObject(getByteArray(), loader);
} | 3.68 |
hudi_JavaExecutionStrategy_readRecordsForGroupWithLogs | /**
* Read records from baseFiles and apply updates.
*/
private List<HoodieRecord<T>> readRecordsForGroupWithLogs(List<ClusteringOperation> clusteringOps,
String instantTime) {
HoodieWriteConfig config = getWriteConfig();
HoodieTable table = getHoodieTable();
List<HoodieRecord<T>> records = new ArrayList<>();
clusteringOps.forEach(clusteringOp -> {
long maxMemoryPerCompaction = IOUtils.getMaxMemoryPerCompaction(new JavaTaskContextSupplier(), config);
LOG.info("MaxMemoryPerCompaction run as part of clustering => " + maxMemoryPerCompaction);
Option<HoodieFileReader> baseFileReader = Option.empty();
HoodieMergedLogRecordScanner scanner = null;
try {
Schema readerSchema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(config.getSchema()));
scanner = HoodieMergedLogRecordScanner.newBuilder()
.withFileSystem(table.getMetaClient().getFs())
.withBasePath(table.getMetaClient().getBasePath())
.withLogFilePaths(clusteringOp.getDeltaFilePaths())
.withReaderSchema(readerSchema)
.withLatestInstantTime(instantTime)
.withMaxMemorySizeInBytes(maxMemoryPerCompaction)
.withReadBlocksLazily(config.getCompactionLazyBlockReadEnabled())
.withReverseReader(config.getCompactionReverseLogReadEnabled())
.withBufferSize(config.getMaxDFSStreamBufferSize())
.withSpillableMapBasePath(config.getSpillableMapBasePath())
.withPartition(clusteringOp.getPartitionPath())
.withDiskMapType(config.getCommonConfig().getSpillableDiskMapType())
.withBitCaskDiskMapCompressionEnabled(config.getCommonConfig().isBitCaskDiskMapCompressionEnabled())
.withRecordMerger(config.getRecordMerger())
.build();
baseFileReader = StringUtils.isNullOrEmpty(clusteringOp.getDataFilePath())
? Option.empty()
: Option.of(HoodieFileReaderFactory.getReaderFactory(recordType).getFileReader(table.getHadoopConf(), new Path(clusteringOp.getDataFilePath())));
HoodieTableConfig tableConfig = table.getMetaClient().getTableConfig();
Iterator<HoodieRecord<T>> fileSliceReader = new HoodieFileSliceReader(baseFileReader, scanner, readerSchema, tableConfig.getPreCombineField(), writeConfig.getRecordMerger(),
tableConfig.getProps(),
tableConfig.populateMetaFields() ? Option.empty() : Option.of(Pair.of(tableConfig.getRecordKeyFieldProp(),
tableConfig.getPartitionFieldProp())));
fileSliceReader.forEachRemaining(records::add);
} catch (IOException e) {
throw new HoodieClusteringException("Error reading input data for " + clusteringOp.getDataFilePath()
+ " and " + clusteringOp.getDeltaFilePaths(), e);
} finally {
if (scanner != null) {
scanner.close();
}
if (baseFileReader.isPresent()) {
baseFileReader.get().close();
}
}
});
return records;
} | 3.68 |
flink_ExecutorNotifier_notifyReadyAsync | /**
* Call the given callable once. Notify the {@link #executorToNotify} to execute the handler.
*
* <p>Note that when this method is invoked multiple times, it is possible that multiple
* callables are executed concurrently, so do the handlers. For example, assuming both the
* workerExecutor and executorToNotify are single threaded. The following code may still throw a
* <code>ConcurrentModificationException</code>.
*
* <pre>{@code
* final List<Integer> list = new ArrayList<>();
*
* // The callable adds an integer 1 to the list, while it works at the first glance,
* // A ConcurrentModificationException may be thrown because the caller and
* // handler may modify the list at the same time.
* notifier.notifyReadyAsync(
* () -> list.add(1),
* (ignoredValue, ignoredThrowable) -> list.add(2));
* }</pre>
*
* <p>Instead, the above logic should be implemented in as:
*
* <pre>{@code
* // Modify the state in the handler.
* notifier.notifyReadyAsync(() -> 1, (v, ignoredThrowable) -> {
* list.add(v));
* list.add(2);
* });
* }</pre>
*
* @param callable the callable to execute before notifying the executor to notify.
* @param handler the handler that handles the result from the callable.
* @param initialDelayMs the initial delay in ms before invoking the given callable.
* @param periodMs the interval in ms to invoke the callable.
*/
public <T> void notifyReadyAsync(
Callable<T> callable,
BiConsumer<T, Throwable> handler,
long initialDelayMs,
long periodMs) {
workerExecutor.scheduleAtFixedRate(
() -> {
try {
T result = callable.call();
executorToNotify.execute(() -> handler.accept(result, null));
} catch (Throwable t) {
executorToNotify.execute(() -> handler.accept(null, t));
}
},
initialDelayMs,
periodMs,
TimeUnit.MILLISECONDS);
} | 3.68 |
hbase_MasterObserver_preDisableTable | /**
* Called prior to disabling a table. Called as part of disable table RPC call.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
*/
default void preDisableTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName) throws IOException {
} | 3.68 |
pulsar_ManagedCursorImpl_recover | /**
* Performs the initial recovery, reading the mark-deleted position from the ledger and then calling initialize to
* have a new opened ledger.
*/
void recover(final VoidCallback callback) {
// Read the meta-data ledgerId from the store
log.info("[{}] Recovering from bookkeeper ledger cursor: {}", ledger.getName(), name);
ledger.getStore().asyncGetCursorInfo(ledger.getName(), name, new MetaStoreCallback<ManagedCursorInfo>() {
@Override
public void operationComplete(ManagedCursorInfo info, Stat stat) {
updateCursorLedgerStat(info, stat);
lastActive = info.getLastActive() != 0 ? info.getLastActive() : lastActive;
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Recover cursor last active to [{}]", ledger.getName(), name, lastActive);
}
Map<String, String> recoveredCursorProperties = Collections.emptyMap();
if (info.getCursorPropertiesCount() > 0) {
// Recover properties map
recoveredCursorProperties = new HashMap<>();
for (int i = 0; i < info.getCursorPropertiesCount(); i++) {
StringProperty property = info.getCursorProperties(i);
recoveredCursorProperties.put(property.getName(), property.getValue());
}
}
cursorProperties = recoveredCursorProperties;
if (info.getCursorsLedgerId() == -1L) {
// There is no cursor ledger to read the last position from. It means the cursor has been properly
// closed and the last mark-delete position is stored in the ManagedCursorInfo itself.
PositionImpl recoveredPosition = new PositionImpl(info.getMarkDeleteLedgerId(),
info.getMarkDeleteEntryId());
if (info.getIndividualDeletedMessagesCount() > 0) {
recoverIndividualDeletedMessages(info.getIndividualDeletedMessagesList());
}
Map<String, Long> recoveredProperties = Collections.emptyMap();
if (info.getPropertiesCount() > 0) {
// Recover properties map
recoveredProperties = new HashMap<>();
for (int i = 0; i < info.getPropertiesCount(); i++) {
LongProperty property = info.getProperties(i);
recoveredProperties.put(property.getName(), property.getValue());
}
}
recoveredCursor(recoveredPosition, recoveredProperties, recoveredCursorProperties, null);
callback.operationComplete();
} else {
// Need to proceed and read the last entry in the specified ledger to find out the last position
log.info("[{}] Cursor {} meta-data recover from ledger {}", ledger.getName(), name,
info.getCursorsLedgerId());
recoverFromLedger(info, callback);
}
}
@Override
public void operationFailed(MetaStoreException e) {
callback.operationFailed(e);
}
});
} | 3.68 |
hadoop_ResourceRequest_setAllocationRequestId | /**
* Set the optional <em>ID</em> corresponding to this allocation request. This
* ID is an identifier for different {@code ResourceRequest}s from the <b>same
* application</b>. The allocated {@code Container}(s) received as part of the
* {@code AllocateResponse} response will have the ID corresponding to the
* original {@code ResourceRequest} for which the RM made the allocation.
* <p>
* The scheduler may return multiple {@code AllocateResponse}s corresponding
* to the same ID as and when scheduler allocates {@code Container}(s).
* <b>Applications</b> can continue to completely ignore the returned ID in
* the response and use the allocation for any of their outstanding requests.
* <p>
* If one wishes to replace an entire {@code ResourceRequest} corresponding to
* a specific ID, they can simply cancel the corresponding {@code
* ResourceRequest} and submit a new one afresh.
* <p>
* If the ID is not set, scheduler will continue to work as previously and all
* allocated {@code Container}(s) will have the default ID, -1.
*
* @param allocationRequestID the <em>ID</em> corresponding to this allocation
* request.
*/
@Public
@Evolving
public void setAllocationRequestId(long allocationRequestID) {
throw new UnsupportedOperationException();
} | 3.68 |
hbase_RegionLocations_removeElementsWithNullLocation | /**
* Set the element to null if its getServerName method returns null. Returns null if all the
* elements are removed.
*/
public RegionLocations removeElementsWithNullLocation() {
HRegionLocation[] newLocations = new HRegionLocation[locations.length];
boolean hasNonNullElement = false;
for (int i = 0; i < locations.length; i++) {
if (locations[i] != null && locations[i].getServerName() != null) {
hasNonNullElement = true;
newLocations[i] = locations[i];
}
}
return hasNonNullElement ? new RegionLocations(newLocations) : null;
} | 3.68 |
framework_LegacyLocatorStrategy_getElementsByPathStartingAt | /**
* {@inheritDoc}
*/
@Override
public List<Element> getElementsByPathStartingAt(String path,
Element root) {
// This type of search is not supported in LegacyLocator
List<Element> array = new ArrayList<>();
Element e = getElementByPathStartingAt(path, root);
if (e != null) {
array.add(e);
}
return array;
} | 3.68 |
hadoop_WorkerId_readFields | /** {@inheritDoc} */
@Override
public final void readFields(final DataInput dataInput) throws IOException {
workerId.readFields(dataInput);
hostname.readFields(dataInput);
ipAdd.readFields(dataInput);
} | 3.68 |
hudi_GcsObjectMetadataFetcher_getGcsObjectMetadata | /**
* @param cloudObjectMetadataDF a Dataset that contains metadata of GCS objects. Assumed to be a persisted form
* of a Cloud Storage Pubsub Notification event.
* @param checkIfExists Check if each file exists, before returning its full path
* @return A {@link List} of {@link CloudObjectMetadata} containing GCS info.
*/
public List<CloudObjectMetadata> getGcsObjectMetadata(JavaSparkContext jsc, Dataset<Row> cloudObjectMetadataDF, boolean checkIfExists) {
SerializableConfiguration serializableHadoopConf = new SerializableConfiguration(jsc.hadoopConfiguration());
return cloudObjectMetadataDF
.select("bucket", "name", "size")
.distinct()
.mapPartitions(getCloudObjectMetadataPerPartition(GCS_PREFIX, serializableHadoopConf, checkIfExists), Encoders.kryo(CloudObjectMetadata.class))
.collectAsList();
} | 3.68 |
hudi_JavaExecutionStrategy_readRecordsForGroup | /**
* Get a list of all records for the group. This includes all records from file slice
* (Apply updates from log files, if any).
*/
private List<HoodieRecord<T>> readRecordsForGroup(HoodieClusteringGroup clusteringGroup, String instantTime) {
List<ClusteringOperation> clusteringOps = clusteringGroup.getSlices().stream().map(ClusteringOperation::create).collect(Collectors.toList());
boolean hasLogFiles = clusteringOps.stream().anyMatch(op -> op.getDeltaFilePaths().size() > 0);
if (hasLogFiles) {
// if there are log files, we read all records into memory for a file group and apply updates.
return readRecordsForGroupWithLogs(clusteringOps, instantTime);
} else {
// We want to optimize reading records for case there are no log files.
return readRecordsForGroupBaseFiles(clusteringOps);
}
} | 3.68 |
graphhopper_VectorTile_removeLayers | /**
* <code>repeated .vector_tile.Tile.Layer layers = 3;</code>
*/
public Builder removeLayers(int index) {
if (layersBuilder_ == null) {
ensureLayersIsMutable();
layers_.remove(index);
onChanged();
} else {
layersBuilder_.remove(index);
}
return this;
} | 3.68 |
flink_PermanentBlobCache_releaseJob | /**
* Unregisters use of job-related BLOBs and allow them to be released.
*
* @param jobId ID of the job this blob belongs to
* @see #registerJob(JobID)
*/
@Override
public void releaseJob(JobID jobId) {
checkNotNull(jobId);
synchronized (jobRefCounters) {
RefCount ref = jobRefCounters.get(jobId);
if (ref == null || ref.references == 0) {
log.warn(
"improper use of releaseJob() without a matching number of registerJob() calls for jobId "
+ jobId);
return;
}
--ref.references;
if (ref.references == 0) {
ref.keepUntil = System.currentTimeMillis() + cleanupInterval;
}
}
} | 3.68 |
framework_CollapseEvent_getCollapsedItem | /**
* Get the collapsed item that triggered this event.
*
* @return the collapsed item
*/
public T getCollapsedItem() {
return collapsedItem;
} | 3.68 |
framework_VTabsheet_setStyleNames | /**
* Sets the style names for this tab according to the given parameters.
*
* @param selected
* {@code true} if the tab is selected, {@code false}
* otherwise
* @param first
* {@code true} if the tab is the first one from the left,
* {@code false} otherwise
* @param keyboardFocus
* {@code true} if the tab should display keyboard navigation
* focus styles, {@code false} otherwise -- the focus style
* name is used by the compatibility themes like
* {@code reindeer} ({@code valo} relies on {@code :focus}
* pseudo-class)
*/
public void setStyleNames(boolean selected, boolean first,
boolean keyboardFocus) {
setStyleName(td, TD_FIRST_CLASSNAME, first);
setStyleName(td, TD_SELECTED_CLASSNAME, selected);
setStyleName(td, TD_SELECTED_FIRST_CLASSNAME, selected && first);
setStyleName(div, DIV_SELECTED_CLASSNAME, selected);
setStyleName(td, TD_FOCUS_CLASSNAME, keyboardFocus);
setStyleName(td, TD_FOCUS_FIRST_CLASSNAME, keyboardFocus && first);
setStyleName(div, DIV_FOCUS_CLASSNAME, keyboardFocus);
} | 3.68 |
framework_MenuTooltip_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 13914;
} | 3.68 |
flink_OptimizerNode_mergeLists | /**
* The node IDs are assigned in graph-traversal order (pre-order), hence, each list is sorted by
* ID in ascending order and all consecutive lists start with IDs in ascending order.
*
* @param markJoinedBranchesAsPipelineBreaking True, if the
*/
protected final boolean mergeLists(
List<UnclosedBranchDescriptor> child1open,
List<UnclosedBranchDescriptor> child2open,
List<UnclosedBranchDescriptor> result,
boolean markJoinedBranchesAsPipelineBreaking) {
// remove branches which have already been closed
removeClosedBranches(child1open);
removeClosedBranches(child2open);
result.clear();
// check how many open branches we have. the cases:
// 1) if both are null or empty, the result is null
// 2) if one side is null (or empty), the result is the other side.
// 3) both are set, then we need to merge.
if (child1open == null || child1open.isEmpty()) {
if (child2open != null && !child2open.isEmpty()) {
result.addAll(child2open);
}
return false;
}
if (child2open == null || child2open.isEmpty()) {
result.addAll(child1open);
return false;
}
int index1 = child1open.size() - 1;
int index2 = child2open.size() - 1;
boolean didCloseABranch = false;
// as both lists (child1open and child2open) are sorted in ascending ID order
// we can do a merge-join-like loop which preserved the order in the result list
// and eliminates duplicates
while (index1 >= 0 || index2 >= 0) {
int id1 = -1;
int id2 = index2 >= 0 ? child2open.get(index2).getBranchingNode().getId() : -1;
while (index1 >= 0 && (id1 = child1open.get(index1).getBranchingNode().getId()) > id2) {
result.add(child1open.get(index1));
index1--;
}
while (index2 >= 0 && (id2 = child2open.get(index2).getBranchingNode().getId()) > id1) {
result.add(child2open.get(index2));
index2--;
}
// match: they share a common branching child
if (id1 == id2) {
didCloseABranch = true;
// if this is the latest common child, remember it
OptimizerNode currBanchingNode = child1open.get(index1).getBranchingNode();
long vector1 = child1open.get(index1).getJoinedPathsVector();
long vector2 = child2open.get(index2).getJoinedPathsVector();
// check if this is the same descriptor, (meaning that it contains the same paths)
// if it is the same, add it only once, otherwise process the join of the paths
if (vector1 == vector2) {
result.add(child1open.get(index1));
} else {
// we merge (re-join) a branch
// mark the branch as a point where we break the pipeline
if (markJoinedBranchesAsPipelineBreaking) {
currBanchingNode.markAllOutgoingConnectionsAsPipelineBreaking();
}
if (this.hereJoinedBranches == null) {
this.hereJoinedBranches = new ArrayList<OptimizerNode>(2);
}
this.hereJoinedBranches.add(currBanchingNode);
// see, if this node closes the branch
long joinedInputs = vector1 | vector2;
// this is 2^size - 1, which is all bits set at positions 0..size-1
long allInputs = (0x1L << currBanchingNode.getOutgoingConnections().size()) - 1;
if (joinedInputs == allInputs) {
// closed - we can remove it from the stack
addClosedBranch(currBanchingNode);
} else {
// not quite closed
result.add(new UnclosedBranchDescriptor(currBanchingNode, joinedInputs));
}
}
index1--;
index2--;
}
}
// merged. now we need to reverse the list, because we added the elements in reverse order
Collections.reverse(result);
return didCloseABranch;
} | 3.68 |
AreaShop_GeneralRegion_getDoubleSetting | /**
* Get a double setting for this region, defined as follows
* - If the region has the setting in its own file (/regions/regionName.yml), use that
* - If the region has groups, use the setting defined by the most important group, if any
* - Otherwise fallback to the default.yml file setting
* @param path The path to get the setting of
* @return The value of the setting
*/
public double getDoubleSetting(String path) {
if(config.isSet(path)) {
return config.getDouble(path);
}
double result = 0;
int priority = Integer.MIN_VALUE;
boolean found = false;
for(RegionGroup group : plugin.getFileManager().getGroups()) {
if(group.isMember(this) && group.getSettings().isSet(path) && group.getPriority() > priority) {
result = group.getSettings().getDouble(path);
priority = group.getPriority();
found = true;
}
}
if(found) {
return result;
}
if(this.getFileManager().getRegionSettings().isSet(path)) {
return this.getFileManager().getRegionSettings().getDouble(path);
} else {
return this.getFileManager().getFallbackRegionSettings().getDouble(path);
}
} | 3.68 |
querydsl_MongodbExpressions_near | /**
* Finds the closest points relative to the given location and orders the results with decreasing proximity
*
* @param expr location
* @param latVal latitude
* @param longVal longitude
* @return predicate
*/
public static BooleanExpression near(Expression<Double[]> expr, double latVal, double longVal) {
return Expressions.booleanOperation(MongodbOps.NEAR, expr, ConstantImpl.create(new Double[]{latVal, longVal}));
} | 3.68 |
framework_ComboBoxElement_openNextPage | /**
* Opens next popup page.
*
* @return True if next page opened. false if doesn't have next page
*/
public boolean openNextPage() {
try {
clickElement(getSuggestionPopup().findElement(byNextPage));
return true;
} catch (WebDriverException e) {
// PhantomJS driver can throw WDE instead of the more specific
// NoSuchElementException
return false;
}
} | 3.68 |
dubbo_TriDecoder_processHeader | /**
* Processes the GRPC compression header which is composed of the compression flag and the outer
* frame length.
*/
private void processHeader() {
int type = accumulate.readUnsignedByte();
if ((type & RESERVED_MASK) != 0) {
throw new RpcException("gRPC frame header malformed: reserved bits not zero");
}
compressedFlag = (type & COMPRESSED_FLAG_MASK) != 0;
requiredLength = accumulate.readInt();
// Continue reading the frame body.
state = GrpcDecodeState.PAYLOAD;
} | 3.68 |
hadoop_ClientCache_getClient | /**
* Construct & cache an IPC client with the user-provided SocketFactory
* if no cached client exists. Default response type is ObjectWritable.
*
* @param conf Configuration
* @param factory SocketFactory for client socket
* @return an IPC client
*/
public synchronized Client getClient(Configuration conf, SocketFactory factory) {
return this.getClient(conf, factory, ObjectWritable.class);
} | 3.68 |
rocketmq-connect_AbstractLocalSchemaRegistryClient_checkSubjectExists | /**
* check subject exists
*
* @param subject
* @return
*/
public Boolean checkSubjectExists(String namespace, String subject) {
try {
schemaRegistryClient.getSchemaBySubject(cluster, namespace, subject);
return Boolean.TRUE;
} catch (RestClientException | IOException e) {
if (e instanceof RestClientException) {
return Boolean.FALSE;
} else {
throw new RuntimeException(e);
}
}
} | 3.68 |
flink_CoGroupOperator_withPartitioner | /**
* Sets a custom partitioner for the CoGroup operation. The partitioner will be
* called on the join keys to determine the partition a key should be assigned to.
* The partitioner is evaluated on both inputs in the same way.
*
* <p>NOTE: A custom partitioner can only be used with single-field CoGroup keys,
* not with composite CoGroup keys.
*
* @param partitioner The custom partitioner to be used.
* @return This CoGroup operator, to allow for function chaining.
*/
public CoGroupOperatorWithoutFunction withPartitioner(Partitioner<?> partitioner) {
if (partitioner != null) {
keys1.validateCustomPartitioner(partitioner, null);
keys2.validateCustomPartitioner(partitioner, null);
}
this.customPartitioner = input1.clean(partitioner);
return this;
} | 3.68 |
flink_JobGraph_getUserArtifacts | /**
* Gets the list of assigned user jar paths.
*
* @return The list of assigned user jar paths
*/
public Map<String, DistributedCache.DistributedCacheEntry> getUserArtifacts() {
return userArtifacts;
} | 3.68 |
flink_EnvironmentSettings_getConfiguration | /** Get the underlying {@link Configuration}. */
public Configuration getConfiguration() {
return configuration;
} | 3.68 |
flink_OpenApiSpecGenerator_overrideIdSchemas | /**
* Various ID classes are effectively internal classes that aren't sufficiently annotated to
* work with automatic schema extraction. This method overrides the schema of these to a string
* regex pattern.
*
* <p>Resulting spec diff:
*
* <pre>
* JobID:
* - type: object
* - properties:
* - upperPart:
* - type: integer
* - format: int64
* - lowerPart:
* - type: integer
* - format: int64
* - bytes:
* - type: array
* - items:
* - type: string
* - format: byte
* + pattern: "[0-9a-f]{32}"
* + type: string
* </pre>
*/
private static void overrideIdSchemas(final OpenAPI openApi) {
final Schema idSchema = new Schema().type("string").pattern("[0-9a-f]{32}");
openApi.getComponents()
.addSchemas(JobID.class.getSimpleName(), idSchema)
.addSchemas(JobVertexID.class.getSimpleName(), idSchema)
.addSchemas(IntermediateDataSetID.class.getSimpleName(), idSchema)
.addSchemas(TriggerId.class.getSimpleName(), idSchema)
.addSchemas(ResourceID.class.getSimpleName(), idSchema);
} | 3.68 |
flink_RocksDBNativeMetricMonitor_registerColumnFamily | /**
* Register gauges to pull native metrics for the column family.
*
* @param columnFamilyName group name for the new gauges
* @param handle native handle to the column family
*/
void registerColumnFamily(String columnFamilyName, ColumnFamilyHandle handle) {
boolean columnFamilyAsVariable = options.isColumnFamilyAsVariable();
MetricGroup group =
columnFamilyAsVariable
? metricGroup.addGroup(COLUMN_FAMILY_KEY, columnFamilyName)
: metricGroup.addGroup(columnFamilyName);
for (String property : options.getProperties()) {
RocksDBNativePropertyMetricView gauge =
new RocksDBNativePropertyMetricView(handle, property);
group.gauge(property, gauge);
}
} | 3.68 |
querydsl_PolygonExpression_interiorRingN | /**
* Returns the N th interior ring for this Polygon as a LineString.
*
* @param idx one based index
* @return interior ring at index
*/
public LineStringExpression<LineString> interiorRingN(int idx) {
return GeometryExpressions.lineStringOperation(SpatialOps.INTERIOR_RINGN, mixin, ConstantImpl.create(idx));
} | 3.68 |
pulsar_SchemaUtils_jsonifySchemaInfo | /**
* Jsonify the schema info.
*
* @param schemaInfo the schema info
* @return the jsonified schema info
*/
public static String jsonifySchemaInfo(SchemaInfo schemaInfo) {
GsonBuilder gsonBuilder = new GsonBuilder()
.setPrettyPrinting()
.registerTypeHierarchyAdapter(byte[].class, new ByteArrayToStringAdapter(schemaInfo))
.registerTypeHierarchyAdapter(Map.class, SCHEMA_PROPERTIES_SERIALIZER);
return gsonBuilder.create().toJson(schemaInfo);
} | 3.68 |
framework_RowVisibilityChangeEvent_getFirstVisibleRow | /**
* Gets the index of the first row that is at least partially visible.
*
* @return the index of the first visible row
*/
public int getFirstVisibleRow() {
return visibleRows.getStart();
} | 3.68 |
flink_MapValue_containsKey | /*
* (non-Javadoc)
* @see java.util.Map#containsKey(java.lang.Object)
*/
@Override
public boolean containsKey(final Object key) {
return this.map.containsKey(key);
} | 3.68 |
morf_SqlDialect_getSqlForCountDistinct | /**
* Converts the count function into SQL.
*
* @param function the function details
* @return a string representation of the SQL
*/
protected String getSqlForCountDistinct(Function function) {
return "COUNT(DISTINCT " + getSqlFrom(function.getArguments().get(0)) + ")";
} | 3.68 |
flink_DistributedRandomSampler_sample | /**
* Combine the first phase and second phase in sequence, implemented for test purpose only.
*
* @param input Source data.
* @return Sample result in sequence.
*/
@Override
public Iterator<T> sample(Iterator<T> input) {
return sampleInCoordinator(sampleInPartition(input));
} | 3.68 |
morf_AbstractSqlDialectTest_testAlterIntegerColumn | /**
* Test altering an integer column.
*/
@Test
public void testAlterIntegerColumn() {
testAlterTableColumn(TEST_TABLE, AlterationType.ALTER, getColumn(TEST_TABLE, INT_FIELD), column(INT_FIELD, DataType.INTEGER), expectedAlterTableAlterIntegerColumnStatement());
} | 3.68 |
hadoop_BalanceJob_getId | /**
* Get the uid of the job.
*/
public String getId() {
return this.id;
} | 3.68 |
flink_FutureUtils_checkStateAndGet | /**
* Perform check state that future has completed normally and return the result.
*
* @return the result of completable future.
* @throws IllegalStateException Thrown, if future has not completed or it has completed
* exceptionally.
*/
public static <T> T checkStateAndGet(CompletableFuture<T> future) {
checkCompletedNormally(future);
return getWithoutException(future);
} | 3.68 |
dubbo_NettyHttpHandler_executeFilters | /**
* execute rest filters
*
* @param restFilterContext
* @param restFilters
* @throws Exception
*/
public void executeFilters(RestFilterContext restFilterContext, List<RestFilter> restFilters) throws Exception {
for (RestFilter restFilter : restFilters) {
restFilter.filter(restFilterContext);
if (restFilterContext.complete()) {
break;
}
}
} | 3.68 |
framework_MouseEventDetails_getName | /**
* Returns a human readable text representing the button.
*
* @return
*/
public String getName() {
return name;
} | 3.68 |
hudi_ClusteringPlanStrategy_getPlanVersion | /**
* Version to support future changes for plan.
*/
protected int getPlanVersion() {
return CLUSTERING_PLAN_VERSION_1;
} | 3.68 |
streampipes_StatementHandler_generatePreparedStatement | /**
* Initializes the variables {@link StatementHandler#eventParameterMap} and {@link StatementHandler#preparedStatement}
* according to the parameter event.
*
* @param event The event which is getting analyzed
* @throws SpRuntimeException When the tablename is not allowed
* @throws SQLException When the prepareStatement cannot be evaluated
*/
public void generatePreparedStatement(DbDescription dbDescription, TableDescription tableDescription,
Connection connection, final Map<String, Object> event)
throws SQLException, SpRuntimeException {
// input: event
// wanted: INSERT INTO test4321 ( randomString, randomValue ) VALUES ( ?,? );
eventParameterMap.clear();
StringBuilder statement1 = new StringBuilder("INSERT INTO ");
StringBuilder statement2 = new StringBuilder("VALUES ( ");
SQLStatementUtils.checkRegEx(tableDescription.getName(), "Tablename", dbDescription);
statement1.append(tableDescription.getName()).append(" ( ");
// Starts index at 1, since the parameterIndex in the PreparedStatement starts at 1 as well
extendPreparedStatement(dbDescription, event, statement1, statement2, 1, "", "");
statement1.append(" ) ");
statement2.append(" );");
String finalStatement = statement1.append(statement2).toString();
this.preparedStatement = connection.prepareStatement(finalStatement);
} | 3.68 |
framework_VTree_selectAllChildrenUntil | /**
* Selects all children until a stop child is reached
*
* @param root
* The root not to start from
* @param stopNode
* The node to finish with
* @param includeRootNode
* Should the root node be selected
* @param includeStopNode
* Should the stop node be selected
*
* @return Returns false if the stop child was found, else true if all
* children was selected
*/
private boolean selectAllChildrenUntil(TreeNode root, TreeNode stopNode,
boolean includeRootNode, boolean includeStopNode) {
if (includeRootNode) {
root.setSelected(true);
selectedIds.add(root.key);
}
if (root.getState() && root != stopNode) {
for (TreeNode child : root.getChildren()) {
if (!child.isLeaf() && child.getState() && child != stopNode) {
if (!selectAllChildrenUntil(child, stopNode, true,
includeStopNode)) {
return false;
}
} else if (child == stopNode) {
if (includeStopNode) {
child.setSelected(true);
selectedIds.add(child.key);
}
return false;
} else {
child.setSelected(true);
selectedIds.add(child.key);
}
}
}
selectionHasChanged = true;
return true;
} | 3.68 |
AreaShop_Utils_isNumeric | /**
* Check if an input is numeric.
* @param input The input to check
* @return true if the input is numeric, otherwise false
*/
@SuppressWarnings("ResultOfMethodCallIgnored")
public static boolean isNumeric(String input) {
try {
Integer.parseInt(input);
return true;
} catch(NumberFormatException ignored) {
return false;
}
} | 3.68 |
pulsar_ResourceUnitRanking_getEstimatedLoadPercentageString | /**
* Get the load percentage in String, with detail resource usages.
*/
public String getEstimatedLoadPercentageString() {
return String.format(
"msgrate: %.0f, load: %.1f%% - cpu: %.1f%%, mem: %.1f%%, directMemory: %.1f%%, "
+ "bandwidthIn: %.1f%%, bandwidthOut: %.1f%%",
this.estimatedMessageRate,
this.estimatedLoadPercentage, this.estimatedLoadPercentageCPU, this.estimatedLoadPercentageMemory,
this.estimatedLoadPercentageDirectMemory, this.estimatedLoadPercentageBandwidthIn,
this.estimatedLoadPercentageBandwidthOut);
} | 3.68 |
framework_Escalator_getFooter | /**
* Returns the row container for the footer in this Escalator.
*
* @return the footer. Never <code>null</code>
*/
public RowContainer getFooter() {
return footer;
} | 3.68 |
querydsl_PathBuilder_getDate | /**
* Create a new Date path
*
* @param <A>
* @param property property name
* @param type property type
* @return property path
*/
@SuppressWarnings("unchecked")
public <A extends Comparable<?>> DatePath<A> getDate(String property, Class<A> type) {
Class<? extends A> vtype = validate(property, type);
return super.createDate(property, (Class<? super A>) vtype);
} | 3.68 |
flink_AsyncDataStream_unorderedWait | /**
* Adds an AsyncWaitOperator. The order of output stream records may be reordered.
*
* @param in Input {@link DataStream}
* @param func {@link AsyncFunction}
* @param timeout for the asynchronous operation to complete
* @param timeUnit of the given timeout
* @param <IN> Type of input record
* @param <OUT> Type of output record
* @return A new {@link SingleOutputStreamOperator}.
*/
public static <IN, OUT> SingleOutputStreamOperator<OUT> unorderedWait(
DataStream<IN> in, AsyncFunction<IN, OUT> func, long timeout, TimeUnit timeUnit) {
return addOperator(
in,
func,
timeUnit.toMillis(timeout),
DEFAULT_QUEUE_CAPACITY,
OutputMode.UNORDERED,
NO_RETRY_STRATEGY);
} | 3.68 |
flink_TieredStorageConfiguration_getTotalExclusiveBufferNum | /**
* Get the total exclusive buffer number.
*
* @return the total exclusive buffer number.
*/
public int getTotalExclusiveBufferNum() {
return accumulatorExclusiveBuffers
+ memoryTierExclusiveBuffers
+ diskTierExclusiveBuffers
+ (remoteStorageBasePath == null ? 0 : remoteTierExclusiveBuffers);
} | 3.68 |
zxing_MatrixToImageWriter_writeToStream | /**
* As {@link #writeToStream(BitMatrix, String, OutputStream)}, but allows customization of the output.
*
* @param matrix {@link BitMatrix} to write
* @param format image format
* @param stream {@link OutputStream} to write image to
* @param config output configuration
* @throws IOException if writes to the stream fail
*/
public static void writeToStream(BitMatrix matrix, String format, OutputStream stream, MatrixToImageConfig config)
throws IOException {
BufferedImage image = toBufferedImage(matrix, config);
if (!ImageIO.write(image, format, stream)) {
throw new IOException("Could not write an image of format " + format);
}
} | 3.68 |
hbase_HRegionServer_cleanup | /**
* Cleanup after Throwable caught invoking method. Converts <code>t</code> to IOE if it isn't
* already.
* @param t Throwable
* @param msg Message to log in error. Can be null.
* @return Throwable converted to an IOE; methods can only let out IOEs.
*/
private Throwable cleanup(final Throwable t, final String msg) {
// Don't log as error if NSRE; NSRE is 'normal' operation.
if (t instanceof NotServingRegionException) {
LOG.debug("NotServingRegionException; " + t.getMessage());
return t;
}
Throwable e = t instanceof RemoteException ? ((RemoteException) t).unwrapRemoteException() : t;
if (msg == null) {
LOG.error("", e);
} else {
LOG.error(msg, e);
}
if (!rpcServices.checkOOME(t)) {
checkFileSystem();
}
return t;
} | 3.68 |
hbase_HMaster_createActiveMasterManager | /**
* Protected to have custom implementations in tests override the default ActiveMaster
* implementation.
*/
protected ActiveMasterManager createActiveMasterManager(ZKWatcher zk, ServerName sn,
org.apache.hadoop.hbase.Server server) throws InterruptedIOException {
return new ActiveMasterManager(zk, sn, server);
} | 3.68 |
dubbo_DynamicConfiguration_getConfig | /**
* Get the configuration mapped to the given key and the given group with {@link #getDefaultTimeout() the default
* timeout}
*
* @param key the key to represent a configuration
* @param group the group where the key belongs to
* @return target configuration mapped to the given key and the given group
*/
default String getConfig(String key, String group) {
return getConfig(key, group, getDefaultTimeout());
} | 3.68 |
flink_YarnClusterDescriptor_getYarnJobClusterEntrypoint | /**
* The class to start the application master with. This class runs the main method in case of
* the job cluster.
*/
protected String getYarnJobClusterEntrypoint() {
return YarnJobClusterEntrypoint.class.getName();
} | 3.68 |
framework_AbstractInMemoryContainer_fireItemsRemoved | /**
* Notify item set change listeners that items has been removed from the
* container.
*
* @param firstPosition
* position of the first visible removed item in the view prior
* to removal
* @param firstItemId
* id of the first visible removed item, of type {@link Object}
* to satisfy {@link Container#removeItem(Object)} API
* @param numberOfItems
* the number of removed visible items
*
*/
protected void fireItemsRemoved(int firstPosition, Object firstItemId,
int numberOfItems) {
BaseItemRemoveEvent removeEvent = new BaseItemRemoveEvent(this,
firstItemId, firstPosition, numberOfItems);
fireItemSetChange(removeEvent);
} | 3.68 |
flink_Either_isLeft | /** @return true if this is a Left value, false if this is a Right value */
public final boolean isLeft() {
return getClass() == Left.class;
} | 3.68 |
morf_UpdateStatement_set | /**
* Specifies the fields to set.
*
* @param destinationFields the fields to update in the database table
* @return a statement with the changes applied.
*/
public UpdateStatement set(AliasedField... destinationFields) {
if (AliasedField.immutableDslEnabled()) {
return shallowCopy().set(destinationFields).build();
} else {
this.fields.addAll(Arrays.asList(destinationFields));
return this;
}
} | 3.68 |
hbase_SnapshotDescriptionUtils_getCompletedSnapshotDir | /**
* Get the directory for a completed snapshot. This directory is a sub-directory of snapshot root
* directory and all the data files for a snapshot are kept under this directory.
* @param snapshotName name of the snapshot being taken
* @param rootDir hbase root directory
* @return the final directory for the completed snapshot
*/
public static Path getCompletedSnapshotDir(final String snapshotName, final Path rootDir) {
return getSpecifiedSnapshotDir(getSnapshotsDir(rootDir), snapshotName);
} | 3.68 |
hbase_Tag_matchingValue | /**
* Matches the value part of given tags
* @param t1 Tag to match the value
* @param t2 Tag to match the value
* @return True if values of both tags are same.
*/
public static boolean matchingValue(Tag t1, Tag t2) {
if (t1.hasArray() && t2.hasArray()) {
return Bytes.equals(t1.getValueArray(), t1.getValueOffset(), t1.getValueLength(),
t2.getValueArray(), t2.getValueOffset(), t2.getValueLength());
}
if (t1.hasArray()) {
return ByteBufferUtils.equals(t2.getValueByteBuffer(), t2.getValueOffset(),
t2.getValueLength(), t1.getValueArray(), t1.getValueOffset(), t1.getValueLength());
}
if (t2.hasArray()) {
return ByteBufferUtils.equals(t1.getValueByteBuffer(), t1.getValueOffset(),
t1.getValueLength(), t2.getValueArray(), t2.getValueOffset(), t2.getValueLength());
}
return ByteBufferUtils.equals(t1.getValueByteBuffer(), t1.getValueOffset(), t1.getValueLength(),
t2.getValueByteBuffer(), t2.getValueOffset(), t2.getValueLength());
} | 3.68 |
hbase_Pair_getFirst | /**
* Return the first element stored in the pair.
*/
public T1 getFirst() {
return first;
} | 3.68 |
framework_FilesystemContainer_setRecursive | /**
* Sets the container recursive property. Set this to false to limit the
* files directly under the root file.
* <p>
* Note : This is meaningful only if the root really is a directory.
* </p>
*
* @param recursive
* the New value for recursive property.
*/
public void setRecursive(boolean recursive) {
this.recursive = recursive;
} | 3.68 |
framework_AbstractInMemoryContainer_internalAddItemAfter | /**
* Add an item after a given (visible) item, and perform filtering. An event
* is fired if the filtered view changes.
*
* The new item is added at the beginning if previousItemId is null.
*
* @param previousItemId
* item id of a visible item after which to add the new item, or
* null to add at the beginning
* @param newItemId
* @param item
* new item to add
* @param filter
* true to perform filtering and send event after adding the
* item, false to skip these operations for batch inserts - if
* false, caller needs to make sure these operations are
* performed at the end of the batch
* @return item added or null if no item was added
*/
protected ITEMCLASS internalAddItemAfter(ITEMIDTYPE previousItemId,
ITEMIDTYPE newItemId, ITEMCLASS item, boolean filter) {
// only add if the previous item is visible
ITEMCLASS newItem = null;
if (previousItemId == null) {
newItem = internalAddAt(0, newItemId, item);
} else if (containsId(previousItemId)) {
newItem = internalAddAt(getAllItemIds().indexOf(previousItemId) + 1,
newItemId, item);
}
if (newItem != null && filter) {
// TODO filter only this item, use fireItemAdded()
filterAll();
if (!isFiltered()) {
// TODO hack: does not detect change in filterAll() in this case
fireItemAdded(indexOfId(newItemId), newItemId, item);
}
}
return newItem;
} | 3.68 |
hudi_FlatteningTransformer_apply | /**
* Configs supported.
*/
@Override
public Dataset<Row> apply(JavaSparkContext jsc, SparkSession sparkSession, Dataset<Row> rowDataset,
TypedProperties properties) {
try {
// tmp table name doesn't like dashes
String tmpTable = TMP_TABLE.concat(UUID.randomUUID().toString().replace("-", "_"));
LOG.info("Registering tmp table : " + tmpTable);
rowDataset.createOrReplaceTempView(tmpTable);
Dataset<Row> transformed = sparkSession.sql("select " + flattenSchema(rowDataset.schema(), null) + " from " + tmpTable);
sparkSession.catalog().dropTempView(tmpTable);
return transformed;
} catch (Exception e) {
throw new HoodieTransformExecutionException("Failed to apply flattening transformer", e);
}
} | 3.68 |
flink_PythonDriver_constructPythonCommands | /**
* Constructs the Python commands which will be executed in python process.
*
* @param pythonDriverOptions parsed Python command options
*/
static List<String> constructPythonCommands(final PythonDriverOptions pythonDriverOptions) {
final List<String> commands = new ArrayList<>();
// disable output buffer
commands.add("-u");
if (pythonDriverOptions.getEntryPointScript().isPresent()) {
commands.add(pythonDriverOptions.getEntryPointScript().get());
} else {
commands.add("-m");
commands.add(pythonDriverOptions.getEntryPointModule());
}
commands.addAll(pythonDriverOptions.getProgramArgs());
return commands;
} | 3.68 |
hudi_StreamSync_getClusteringInstantOpt | /**
* Schedule clustering.
* Called from {@link HoodieStreamer} when async clustering is enabled.
*
* @return Requested clustering instant.
*/
public Option<String> getClusteringInstantOpt() {
if (writeClient != null) {
return writeClient.scheduleClustering(Option.empty());
} else {
return Option.empty();
}
} | 3.68 |
dubbo_MetadataServiceDelegation_serviceName | /**
* Gets the current Dubbo Service name
*
* @return non-null
*/
@Override
public String serviceName() {
return ApplicationModel.ofNullable(applicationModel).getApplicationName();
} | 3.68 |
flink_Time_toMilliseconds | /**
* Converts the time interval to milliseconds.
*
* @return The time interval in milliseconds.
*/
public long toMilliseconds() {
return unit.toMillis(size);
} | 3.68 |
streampipes_RuntimeResolvableRequestHandler_handleRuntimeResponse | // for backwards compatibility
public RuntimeOptionsResponse handleRuntimeResponse(ResolvesContainerProvidedOptions resolvesOptions,
RuntimeOptionsRequest req) throws SpConfigurationException {
List<Option> availableOptions =
resolvesOptions.resolveOptions(req.getRequestId(),
makeExtractor(req));
SelectionStaticProperty sp = getConfiguredProperty(req);
sp.setOptions(availableOptions);
return new RuntimeOptionsResponse(req, sp);
} | 3.68 |
hbase_RegionScannerImpl_isFilterDone | /** Returns True if a filter rules the scanner is over, done. */
@Override
public synchronized boolean isFilterDone() throws IOException {
return isFilterDoneInternal();
} | 3.68 |
flink_FlinkAggregateExpandDistinctAggregatesRule_createSelectDistinct | /**
* Given an {@link org.apache.calcite.rel.core.Aggregate} and the ordinals of the arguments to a
* particular call to an aggregate function, creates a 'select distinct' relational expression
* which projects the group columns and those arguments but nothing else.
*
* <p>For example, given
*
* <blockquote>
*
* <pre>select f0, count(distinct f1), count(distinct f2)
* from t group by f0</pre>
*
* </blockquote>
*
* <p>and the argument list
*
* <blockquote>
*
* {2}
*
* </blockquote>
*
* <p>returns
*
* <blockquote>
*
* <pre>select distinct f0, f2 from t</pre>
*
* </blockquote>
*
* <p>The <code>sourceOf</code> map is populated with the source of each column; in this case
* sourceOf.get(0) = 0, and sourceOf.get(1) = 2.
*
* @param relBuilder Relational expression builder
* @param aggregate Aggregate relational expression
* @param argList Ordinals of columns to make distinct
* @param filterArg Ordinal of column to filter on, or -1
* @param sourceOf Out parameter, is populated with a map of where each output field came from
* @return Aggregate relational expression which projects the required columns
*/
private RelBuilder createSelectDistinct(
RelBuilder relBuilder,
Aggregate aggregate,
List<Integer> argList,
int filterArg,
Map<Integer, Integer> sourceOf) {
relBuilder.push(aggregate.getInput());
final List<Pair<RexNode, String>> projects = new ArrayList<>();
final List<RelDataTypeField> childFields = relBuilder.peek().getRowType().getFieldList();
for (int i : aggregate.getGroupSet()) {
sourceOf.put(i, projects.size());
projects.add(RexInputRef.of2(i, childFields));
}
if (filterArg >= 0) {
sourceOf.put(filterArg, projects.size());
projects.add(RexInputRef.of2(filterArg, childFields));
}
for (Integer arg : argList) {
if (filterArg >= 0) {
// Implement
// agg(DISTINCT arg) FILTER $f
// by generating
// SELECT DISTINCT ... CASE WHEN $f THEN arg ELSE NULL END AS arg
// and then applying
// agg(arg)
// as usual.
//
// It works except for (rare) agg functions that need to see null
// values.
final RexBuilder rexBuilder = aggregate.getCluster().getRexBuilder();
final RexInputRef filterRef = RexInputRef.of(filterArg, childFields);
final Pair<RexNode, String> argRef = RexInputRef.of2(arg, childFields);
RexNode condition =
rexBuilder.makeCall(
SqlStdOperatorTable.CASE,
filterRef,
argRef.left,
rexBuilder.makeNullLiteral(argRef.left.getType()));
sourceOf.put(arg, projects.size());
projects.add(Pair.of(condition, "i$" + argRef.right));
continue;
}
if (sourceOf.get(arg) != null) {
continue;
}
sourceOf.put(arg, projects.size());
projects.add(RexInputRef.of2(arg, childFields));
}
relBuilder.project(Pair.left(projects), Pair.right(projects));
// Get the distinct values of the GROUP BY fields and the arguments
// to the agg functions.
relBuilder.push(
aggregate.copy(
aggregate.getTraitSet(),
relBuilder.build(),
ImmutableBitSet.range(projects.size()),
null,
com.google.common.collect.ImmutableList.<AggregateCall>of()));
return relBuilder;
} | 3.68 |
AreaShop_SignsFeature_update | /**
* Update all signs connected to this region.
* @return true if all signs are updated correctly, false if one or more updates failed
*/
public boolean update() {
boolean result = true;
for(RegionSign sign : signs.values()) {
result &= sign.update();
}
return result;
} | 3.68 |
framework_DesignContext_getContext | /**
* Returns the new component context.
*
* @return the context
*
* @since 8.5
*/
public DesignContext getContext() {
return context;
} | 3.68 |
hbase_Constraints_enabled | /**
* Check to see if the given constraint is enabled.
* @param desc {@link TableDescriptor} to check.
* @param clazz {@link Constraint} to check for
* @return <tt>true</tt> if the {@link Constraint} is present and enabled. <tt>false</tt>
* otherwise.
* @throws IOException If the constraint has improperly stored in the table
*/
public static boolean enabled(TableDescriptor desc, Class<? extends Constraint> clazz)
throws IOException {
// get the kv
Pair<String, String> entry = getKeyValueForClass(desc, clazz);
// its not enabled so just return false. In fact, its not even present!
if (entry == null) {
return false;
}
// get the info about the constraint
Configuration conf = readConfiguration(entry.getSecond());
return conf.getBoolean(ENABLED_KEY, false);
} | 3.68 |
flink_InternalOperatorIOMetricGroup_reuseInputMetricsForTask | /** Causes the containing task to use this operators input record counter. */
public void reuseInputMetricsForTask() {
TaskIOMetricGroup taskIO = parentMetricGroup.getTaskIOMetricGroup();
taskIO.reuseRecordsInputCounter(this.numRecordsIn);
} | 3.68 |
pulsar_BrokerInterceptor_producerClosed | /**
* Called by the broker when a producer is closed.
*
* @param cnx client Connection
* @param producer Producer object
* @param metadata A map of metadata
*/
default void producerClosed(ServerCnx cnx,
Producer producer,
Map<String, String> metadata) {
} | 3.68 |
hbase_Segment_last | // *** Methods for SegmentsScanner
public Cell last() {
return getCellSet().last();
} | 3.68 |
flink_IOUtils_closeStream | /**
* Closes the stream ignoring {@link IOException}. Must only be called in cleaning up from
* exception handlers.
*
* @param stream the stream to close
*/
public static void closeStream(final java.io.Closeable stream) {
cleanup(null, stream);
} | 3.68 |
dubbo_DubboCertManager_signWithRsa | /**
* Generate key pair with RSA
*
* @return key pair
*/
protected static KeyPair signWithRsa() {
KeyPair keyPair = null;
try {
KeyPairGenerator kpGenerator = KeyPairGenerator.getInstance("RSA");
kpGenerator.initialize(4096);
java.security.KeyPair keypair = kpGenerator.generateKeyPair();
PublicKey publicKey = keypair.getPublic();
PrivateKey privateKey = keypair.getPrivate();
ContentSigner signer = new JcaContentSignerBuilder("SHA256WithRSA").build(keypair.getPrivate());
keyPair = new KeyPair(publicKey, privateKey, signer);
} catch (NoSuchAlgorithmException | OperatorCreationException e) {
logger.error(
CONFIG_SSL_CERT_GENERATE_FAILED,
"",
"",
"Generate Key with SHA256WithRSA algorithm failed. Please check if your system support.",
e);
}
return keyPair;
} | 3.68 |
hbase_SimpleRegionNormalizer_getAverageRegionSizeMb | /**
* Also make sure tableRegions contains regions of the same table
* @param tableRegions regions of table to normalize
* @param tableDescriptor the TableDescriptor
* @return average region size depending on
* @see TableDescriptor#getNormalizerTargetRegionCount()
*/
private double getAverageRegionSizeMb(final List<RegionInfo> tableRegions,
final TableDescriptor tableDescriptor) {
if (isEmpty(tableRegions)) {
throw new IllegalStateException(
"Cannot calculate average size of a table without any regions.");
}
TableName table = tableDescriptor.getTableName();
double avgRegionSize;
int targetRegionCount = tableDescriptor.getNormalizerTargetRegionCount();
long targetRegionSize = tableDescriptor.getNormalizerTargetRegionSize();
LOG.debug("Table {} configured with target region count {}, target region size {} MB", table,
targetRegionCount, targetRegionSize);
if (targetRegionSize > 0) {
avgRegionSize = targetRegionSize;
} else {
final int regionCount = tableRegions.size();
final long totalSizeMb = tableRegions.stream().mapToLong(this::getRegionSizeMB).sum();
if (targetRegionCount > 0) {
avgRegionSize = totalSizeMb / (double) targetRegionCount;
} else {
avgRegionSize = totalSizeMb / (double) regionCount;
}
LOG.debug("Table {}, total aggregated regions size: {} MB and average region size {} MB",
table, totalSizeMb, String.format("%.3f", avgRegionSize));
}
return avgRegionSize;
} | 3.68 |
hbase_Call_toShortString | /**
* Builds a simplified {@link #toString()} that includes just the id and method name.
*/
public String toShortString() {
return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).append("id", id)
.append("methodName", md.getName()).toString();
} | 3.68 |
querydsl_AbstractHibernateQuery_setLockMode | /**
* Set the lock mode for the given path.
* @return the current object
*/
@SuppressWarnings("unchecked")
public Q setLockMode(Path<?> path, LockMode lockMode) {
lockModes.put(path, lockMode);
return (Q) this;
} | 3.68 |
hbase_ServerManager_createDestinationServersList | /**
* Calls {@link #createDestinationServersList} without server to exclude.
*/
public List<ServerName> createDestinationServersList() {
return createDestinationServersList(null);
} | 3.68 |
framework_ComboBox_isScrollToSelectedItem | /**
* Returns true if the select should find the page with the selected item
* when opening the popup (single select combo box only).
*
* @see #setScrollToSelectedItem(boolean)
*
* @return true if the page with the selected item will be shown when
* opening the popup
*/
public boolean isScrollToSelectedItem() {
return scrollToSelectedItem;
} | 3.68 |
framework_AbstractListing_setItemCaptionGenerator | /**
* Sets the item caption generator that is used to produce the strings shown
* in the combo box for each item. By default,
* {@link String#valueOf(Object)} is used.
*
* @param itemCaptionGenerator
* the item caption provider to use, not null
*/
protected void setItemCaptionGenerator(
ItemCaptionGenerator<T> itemCaptionGenerator) {
Objects.requireNonNull(itemCaptionGenerator,
"Item caption generators must not be null");
this.itemCaptionGenerator = itemCaptionGenerator;
getDataCommunicator().reset();
} | 3.68 |
dubbo_Utf8Utils_isNotTrailingByte | /**
* Returns whether the byte is not a valid continuation of the form '10XXXXXX'.
*/
private static boolean isNotTrailingByte(byte b) {
return b > (byte) 0xBF;
} | 3.68 |
hbase_StorageClusterVersionModel_setVersion | /**
* @param version the storage cluster version
*/
public void setVersion(String version) {
this.version = version;
} | 3.68 |
hadoop_YarnServerSecurityUtils_parseCredentials | /**
* Parses the container launch context and returns a Credential instance that
* contains all the tokens from the launch context.
*
* @param launchContext ContainerLaunchContext.
* @return the credential instance
* @throws IOException if there are I/O errors.
*/
public static Credentials parseCredentials(
ContainerLaunchContext launchContext) throws IOException {
Credentials credentials = new Credentials();
ByteBuffer tokens = launchContext.getTokens();
if (tokens != null) {
DataInputByteBuffer buf = new DataInputByteBuffer();
tokens.rewind();
buf.reset(tokens);
credentials.readTokenStorageStream(buf);
if (LOG.isDebugEnabled()) {
for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
LOG.debug("{}={}", tk.getService(), tk);
}
}
}
return credentials;
} | 3.68 |
hadoop_AllocateResponse_setUpdateErrors | /**
* Set the list of container update errors to inform the
* Application Master about the container updates that could not be
* satisfied due to error.
* @param updateErrors list of <code>UpdateContainerError</code> for
* containers updates requests that were in error
*/
@Public
@Unstable
public void setUpdateErrors(List<UpdateContainerError> updateErrors) {
} | 3.68 |
hbase_MetricsConnection_getNumActionsPerServerHist | /** numActionsPerServerHist metric */
public Histogram getNumActionsPerServerHist() {
return numActionsPerServerHist;
} | 3.68 |
hbase_NamespacesInstanceResource_post | /**
* Build a response for POST create namespace with properties specified.
* @param model properties used for create.
* @param uriInfo (JAX-RS context variable) request URL
* @return response code.
*/
@POST
@Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public Response post(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) {
return processUpdate(model, false, uriInfo);
} | 3.68 |
hbase_HMaster_getClientIdAuditPrefix | /** Returns Client info for use as prefix on an audit log string; who did an action */
@Override
public String getClientIdAuditPrefix() {
return "Client=" + RpcServer.getRequestUserName().orElse(null) + "/"
+ RpcServer.getRemoteAddress().orElse(null);
} | 3.68 |
flink_MemoryLogger_getMemoryPoolStatsAsString | /**
* Gets the memory pool statistics from the JVM.
*
* @param poolBeans The collection of memory pool beans.
* @return A string denoting the names and sizes of the memory pools.
*/
public static String getMemoryPoolStatsAsString(List<MemoryPoolMXBean> poolBeans) {
StringBuilder bld = new StringBuilder("Off-heap pool stats: ");
int count = 0;
for (MemoryPoolMXBean bean : poolBeans) {
if (bean.getType() == MemoryType.NON_HEAP) {
if (count > 0) {
bld.append(", ");
}
count++;
MemoryUsage usage = bean.getUsage();
long used = usage.getUsed() >> 20;
long committed = usage.getCommitted() >> 20;
long max = usage.getMax() >> 20;
bld.append('[').append(bean.getName()).append(": ");
bld.append(used).append('/').append(committed).append('/').append(max);
bld.append(" MB (used/committed/max)]");
}
}
return bld.toString();
} | 3.68 |
pulsar_PrometheusMetricStreams_flushAllToStream | /**
* Flush all the stored metrics to the supplied stream.
* @param stream the stream to write to.
*/
void flushAllToStream(SimpleTextOutputStream stream) {
metricStreamMap.values().forEach(s -> stream.write(s.getBuffer()));
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.