name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
framework_DDEventHandleStrategy_handleDragEnter | /**
* Handles drag enter on new element.
*
* @param mediator
* VDragAndDropManager data accessor
* @param target
* target element over which DnD event has happened
*/
protected void handleDragEnter(Element target, DDManagerMediator mediator) {
VDragAndDropManager manager = mediator.getManager();
if (manager.getCurrentDropHandler() != null) {
mediator.getDragEvent().setElementOver(target);
manager.getCurrentDropHandler().dragEnter(mediator.getDragEvent());
}
} | 3.68 |
flink_StreamingFileSink_build | /** Creates the actual sink. */
public StreamingFileSink<IN> build() {
return new StreamingFileSink<>(this, bucketCheckInterval);
} | 3.68 |
framework_AbstractComponentTest_populateSettingsMenu | /**
* Override to add items to the "settings" menu.
*
* NOTE, Call super class first to preserve current order. If you override
* this in a class and another class overrides it you might break tests
* because the wrong items will be selected.
*
* @param settingsMenu
*/
protected void populateSettingsMenu(MenuItem settingsMenu) {
MenuItem showEventLog = settingsMenu.addItem("Show event log",
new MenuBar.Command() {
@Override
public void menuSelected(MenuItem selectedItem) {
boolean selected = !isSelected(selectedItem);
setLogVisible(selected);
setSelected(selectedItem, selected);
}
});
setSelected(showEventLog, true);
settingsMenu.addItem("Clear log", new MenuBar.Command() {
@Override
public void menuSelected(MenuItem selectedItem) {
log.clear();
}
});
MenuItem layoutSize = settingsMenu.addItem("Parent layout size", null);
MenuItem layoutWidth = layoutSize.addItem("Width", null);
MenuItem layoutHeight = layoutSize.addItem("Height", null);
for (final String name : sizeOptions.keySet()) {
layoutWidth.addItem(name, new MenuBar.Command() {
@Override
public void menuSelected(MenuItem selectedItem) {
getTestComponents().get(0).getParent()
.setWidth(sizeOptions.get(name));
log("Parent layout width set to " + name);
}
});
layoutHeight.addItem(name, new MenuBar.Command() {
@Override
public void menuSelected(MenuItem selectedItem) {
getTestComponents().get(0).getParent()
.setHeight(sizeOptions.get(name));
log("Parent layout height set to " + name);
}
});
}
} | 3.68 |
hbase_MetaFixer_fixOverlaps | /**
* Fix overlaps noted in CJ consistency report.
*/
List<Long> fixOverlaps(CatalogJanitorReport report) throws IOException {
List<Long> pidList = new ArrayList<>();
for (Set<RegionInfo> regions : calculateMerges(maxMergeCount, report.getOverlaps())) {
RegionInfo[] regionsArray = regions.toArray(new RegionInfo[] {});
try {
pidList.add(this.masterServices.mergeRegions(regionsArray, true, HConstants.NO_NONCE,
HConstants.NO_NONCE));
} catch (MergeRegionException mre) {
LOG.warn("Failed overlap fix of {}", regionsArray, mre);
}
}
return pidList;
} | 3.68 |
hadoop_Validate_checkState | /**
* Check state.
* @param expression expression which must hold.
* @param format format string
* @param args arguments for the error string
* @throws IllegalStateException if the state is not valid.
*/
public static void checkState(boolean expression,
String format,
Object... args) {
if (!expression) {
throw new IllegalStateException(String.format(format, args));
}
} | 3.68 |
hbase_BulkLoadHFilesTool_inferBoundaries | /**
* Infers region boundaries for a new table.
* <p/>
* Parameter: <br/>
* bdryMap is a map between keys to an integer belonging to {+1, -1}
* <ul>
* <li>If a key is a start key of a file, then it maps to +1</li>
* <li>If a key is an end key of a file, then it maps to -1</li>
* </ul>
* <p>
* Algo:<br/>
* <ol>
* <li>Poll on the keys in order:
* <ol type="a">
* <li>Keep adding the mapped values to these keys (runningSum)</li>
* <li>Each time runningSum reaches 0, add the start Key from when the runningSum had started to a
* boundary list.</li>
* </ol>
* </li>
* <li>Return the boundary list.</li>
* </ol>
*/
public static byte[][] inferBoundaries(SortedMap<byte[], Integer> bdryMap) {
List<byte[]> keysArray = new ArrayList<>();
int runningValue = 0;
byte[] currStartKey = null;
boolean firstBoundary = true;
for (Map.Entry<byte[], Integer> item : bdryMap.entrySet()) {
if (runningValue == 0) {
currStartKey = item.getKey();
}
runningValue += item.getValue();
if (runningValue == 0) {
if (!firstBoundary) {
keysArray.add(currStartKey);
}
firstBoundary = false;
}
}
return keysArray.toArray(new byte[0][]);
} | 3.68 |
dubbo_AbstractServerCall_responseErr | /**
* Error in create stream, unsupported config or triple protocol error.
*
* @param status response status
*/
protected void responseErr(TriRpcStatus status) {
if (closed) {
return;
}
closed = true;
stream.complete(status, null, false, CommonConstants.TRI_EXCEPTION_CODE_NOT_EXISTS);
LOGGER.error(
PROTOCOL_FAILED_REQUEST,
"",
"",
"Triple request error: service=" + serviceName + " method" + methodName,
status.asException());
} | 3.68 |
querydsl_StringExpression_isNotEmpty | /**
* Create a {@code !this.isEmpty()} expression
*
* <p>Return true if this String is not empty</p>
*
* @return !this.isEmpty()
* @see java.lang.String#isEmpty()
*/
public BooleanExpression isNotEmpty() {
return isEmpty().not();
} | 3.68 |
hbase_LruCachedBlockQueue_poll | /** Returns The next element in this queue, or {@code null} if the queue is empty. */
public LruCachedBlock poll() {
return queue.poll();
} | 3.68 |
hbase_LongComparator_parseFrom | /**
* Parses a serialized representation of {@link LongComparator}
* @param pbBytes A pb serialized {@link LongComparator} instance
* @return An instance of {@link LongComparator} made from <code>bytes</code>
* @throws DeserializationException if an error occurred
* @see #toByteArray
*/
public static LongComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
ComparatorProtos.LongComparator proto;
try {
proto = ComparatorProtos.LongComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
return new LongComparator(Bytes.toLong(proto.getComparable().getValue().toByteArray()));
} | 3.68 |
pulsar_ManagedLedgerImpl_isValidPosition | /**
* Validate whether a specified position is valid for the current managed ledger.
*
* @param position
* the position to validate
* @return true if the position is valid, false otherwise
*/
public boolean isValidPosition(PositionImpl position) {
PositionImpl lac = lastConfirmedEntry;
if (log.isDebugEnabled()) {
log.debug("IsValid position: {} -- last: {}", position, lac);
}
if (!ledgers.containsKey(position.getLedgerId())){
return false;
} else if (position.getEntryId() < 0) {
return false;
} else if (currentLedger != null && position.getLedgerId() == currentLedger.getId()) {
// If current ledger is empty, the largest read position can be "{current_ledger: 0}".
// Else, the read position can be set to "{LAC + 1}" when subscribe at LATEST,
return (position.getLedgerId() == lac.getLedgerId() && position.getEntryId() <= lac.getEntryId() + 1)
|| position.getEntryId() == 0;
} else if (position.getLedgerId() == lac.getLedgerId()) {
// The ledger witch maintains LAC was closed, and there is an empty current ledger.
// If entry id is larger than LAC, it should be "{current_ledger: 0}".
return position.getEntryId() <= lac.getEntryId();
} else {
// Look in the ledgers map
LedgerInfo ls = ledgers.get(position.getLedgerId());
if (ls == null) {
if (position.getLedgerId() < lac.getLedgerId()) {
// Pointing to a non-existing ledger that is older than the current ledger is invalid
return false;
} else {
// Pointing to a non-existing ledger is only legitimate if the ledger was empty
return position.getEntryId() == 0;
}
}
return position.getEntryId() < ls.getEntries();
}
} | 3.68 |
hadoop_SaslParticipant_createServerSaslParticipant | /**
* Creates a SaslParticipant wrapping a SaslServer.
*
* @param saslProps properties of SASL negotiation
* @param callbackHandler for handling all SASL callbacks
* @return SaslParticipant wrapping SaslServer
* @throws SaslException for any error
*/
public static SaslParticipant createServerSaslParticipant(
Map<String, String> saslProps, CallbackHandler callbackHandler)
throws SaslException {
initializeSaslServerFactory();
return new SaslParticipant(saslServerFactory.createSaslServer(MECHANISM,
PROTOCOL, SERVER_NAME, saslProps, callbackHandler));
} | 3.68 |
MagicPlugin_BaseSpell_getPlayerFacing | /**
* Get the direction the player is facing as a BlockFace.
*
* @return a BlockFace representing the direction the player is facing
*/
public BlockFace getPlayerFacing()
{
return getFacing(getLocation());
} | 3.68 |
morf_DataValueLookup_getLong | /**
* Gets the value as a long. Will attempt conversion where possible
* and throw a suitable conversion exception if the conversion fails.
* May return {@code null} if the value is not set or is explicitly set
* to {@code null}.
*
* @param name The column name.
* @return The value.
*/
public default Long getLong(String name) {
String value = getValue(name);
return value == null ? null : Long.valueOf(value);
} | 3.68 |
hbase_RegionMover_build | /**
* This method builds the appropriate RegionMover object which can then be used to load/unload
* using load and unload methods
* @return RegionMover object
*/
public RegionMover build() throws IOException {
return new RegionMover(this);
} | 3.68 |
graphhopper_MMapDataAccess_load | /**
* Load memory mapped files into physical memory.
*/
public void load(int percentage) {
if (percentage < 0 || percentage > 100)
throw new IllegalArgumentException("Percentage for MMapDataAccess.load for " + getName() + " must be in [0,100] but was " + percentage);
int max = Math.round(segments.size() * percentage / 100f);
for (int i = 0; i < max; i++) {
segments.get(i).load();
}
} | 3.68 |
hbase_RegionServerSnapshotManager_cancelTasks | /**
* This attempts to cancel out all pending and in progress tasks (interruptions issues)
*/
void cancelTasks() throws InterruptedException {
Collection<Future<Void>> tasks = futures;
LOG.debug("cancelling " + tasks.size() + " tasks for snapshot " + name);
for (Future<Void> f : tasks) {
// TODO Ideally we'd interrupt hbase threads when we cancel. However it seems that there
// are places in the HBase code where row/region locks are taken and not released in a
// finally block. Thus we cancel without interrupting. Cancellations will be slower to
// complete but we won't suffer from unreleased locks due to poor code discipline.
f.cancel(false);
}
// evict remaining tasks and futures from taskPool.
futures.clear();
while (taskPool.poll() != null) {
}
stop();
} | 3.68 |
streampipes_DataExplorerUtils_sanitizeAndRegisterAtDataLake | /**
* Sanitizes the event schema and stores the DataLakeMeasurement to the couchDB
*
* @param client StreamPipes client to store measure
* @param measure DataLakeMeasurement
*/
public static DataLakeMeasure sanitizeAndRegisterAtDataLake(IStreamPipesClient client,
DataLakeMeasure measure) throws SpRuntimeException {
sanitizeDataLakeMeasure(measure);
registerAtDataLake(client, measure);
return measure;
} | 3.68 |
framework_VLayoutSlot_getWrapperElement | /**
* Returns the wrapper element for the contents of this slot.
*
* @return the wrapper element
*/
@SuppressWarnings("deprecation")
public com.google.gwt.user.client.Element getWrapperElement() {
return DOM.asOld(wrapper);
} | 3.68 |
hbase_AccessControlUtil_buildRevokeRequest | /**
* Create a request to revoke user table permissions.
* @param username the short user name whose permissions to be revoked
* @param tableName optional table name the permissions apply
* @param family optional column family
* @param qualifier optional qualifier
* @param actions the permissions to be revoked
* @return A {@link AccessControlProtos} RevokeRequest
*/
public static AccessControlProtos.RevokeRequest buildRevokeRequest(String username,
TableName tableName, byte[] family, byte[] qualifier,
AccessControlProtos.Permission.Action... actions) {
AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder();
AccessControlProtos.TablePermission.Builder permissionBuilder =
AccessControlProtos.TablePermission.newBuilder();
for (AccessControlProtos.Permission.Action a : actions) {
permissionBuilder.addAction(a);
}
if (tableName != null) {
permissionBuilder.setTableName(ProtobufUtil.toProtoTableName(tableName));
}
if (family != null) {
permissionBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family));
}
if (qualifier != null) {
permissionBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(qualifier));
}
ret.setType(AccessControlProtos.Permission.Type.Table).setTablePermission(permissionBuilder);
return AccessControlProtos.RevokeRequest.newBuilder()
.setUserPermission(AccessControlProtos.UserPermission.newBuilder()
.setUser(ByteString.copyFromUtf8(username)).setPermission(ret))
.build();
} | 3.68 |
framework_BrowserInfo_getBrowserMajorVersion | /**
* Returns the browser major version e.g., 3 for Firefox 3.5, 4 for Chrome
* 4, 8 for Internet Explorer 8.
* <p>
* Note that Internet Explorer 8 and newer will return the document mode so
* IE8 rendering as IE7 will return 7.
* </p>
*
* @return The major version of the browser.
*/
public int getBrowserMajorVersion() {
return browserDetails.getBrowserMajorVersion();
} | 3.68 |
dubbo_DubboDefaultPropertiesEnvironmentPostProcessor_addOrReplace | /**
* Copy from BusEnvironmentPostProcessor#addOrReplace(MutablePropertySources, Map)
*
* @param propertySources {@link MutablePropertySources}
* @param map Default Dubbo Properties
*/
private void addOrReplace(MutablePropertySources propertySources, Map<String, Object> map) {
MapPropertySource target = null;
if (propertySources.contains(PROPERTY_SOURCE_NAME)) {
PropertySource<?> source = propertySources.get(PROPERTY_SOURCE_NAME);
if (source instanceof MapPropertySource) {
target = (MapPropertySource) source;
for (Map.Entry<String, Object> entry : map.entrySet()) {
String key = entry.getKey();
if (!target.containsProperty(key)) {
target.getSource().put(key, entry.getValue());
}
}
}
}
if (target == null) {
target = new MapPropertySource(PROPERTY_SOURCE_NAME, map);
}
if (!propertySources.contains(PROPERTY_SOURCE_NAME)) {
propertySources.addLast(target);
}
} | 3.68 |
hbase_MasterObserver_postMoveServersAndTables | /**
* Called after servers are moved to target region server group
* @param ctx the environment to interact with the framework and master
* @param servers set of servers to move
* @param targetGroup name of group
*/
default void postMoveServersAndTables(final ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<Address> servers, Set<TableName> tables, String targetGroup) throws IOException {
} | 3.68 |
hadoop_TimelineHBaseSchemaConstants_getUsernameSplits | /**
* @return splits for splits where a user is a prefix.
*/
public static byte[][] getUsernameSplits() {
byte[][] kloon = USERNAME_SPLITS.clone();
// Deep copy.
for (int row = 0; row < USERNAME_SPLITS.length; row++) {
kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
}
return kloon;
} | 3.68 |
framework_Table_addColumnResizeListener | /**
* Adds a column resize listener to the Table. A column resize listener is
* called when a user resizes a columns width.
*
* @param listener
* The listener to attach to the Table
*/
public void addColumnResizeListener(ColumnResizeListener listener) {
addListener(TableConstants.COLUMN_RESIZE_EVENT_ID,
ColumnResizeEvent.class, listener,
ColumnResizeEvent.COLUMN_RESIZE_METHOD);
} | 3.68 |
hadoop_RandomResolver_getFirstNamespace | /**
* Get a random name space from the path.
*
* @param path Path ignored by this policy.
* @param loc Federated location with multiple destinations.
* @return Random name space.
*/
public String getFirstNamespace(final String path, final PathLocation loc) {
final Set<String> namespaces = (loc == null) ? null : loc.getNamespaces();
if (CollectionUtils.isEmpty(namespaces)) {
LOG.error("Cannot get namespaces for {}", loc);
return null;
}
final int index = ThreadLocalRandom.current().nextInt(namespaces.size());
return Iterables.get(namespaces, index);
} | 3.68 |
hbase_SaslAuthMethod_getSaslMechanism | /**
* Returns the SASL mechanism used by this authentication method.
*/
public String getSaslMechanism() {
return saslMech;
} | 3.68 |
hbase_VersionInfoUtil_getCurrentClientVersionInfo | /** Returns the versionInfo extracted from the current RpcCallContext */
public static HBaseProtos.VersionInfo getCurrentClientVersionInfo() {
return RpcServer.getCurrentCall().map(RpcCallContext::getClientVersionInfo)
.orElse(NonCallVersion.get());
} | 3.68 |
hadoop_HsSingleCounterPage_counterTableInit | /**
* @return The end of a javascript map that is the jquery datatable
* configuration for the jobs table. the Jobs table is assumed to be
* rendered by the class returned from {@link #content()}
*/
private String counterTableInit() {
return tableInit().
append(", aoColumnDefs:[").
append("{'sType':'title-numeric', 'aTargets': [ 1 ] }").
append("]} | 3.68 |
pulsar_TopicsBase_processPublishMessageResults | // Process results for all message publishing attempts
private void processPublishMessageResults(List<ProducerAck> produceMessageResults,
List<CompletableFuture<PositionImpl>> publishResults) {
// process publish message result
for (int index = 0; index < publishResults.size(); index++) {
try {
PositionImpl position = publishResults.get(index).get();
MessageId messageId = new MessageIdImpl(position.getLedgerId(), position.getEntryId(),
Integer.parseInt(produceMessageResults.get(index).getMessageId()));
produceMessageResults.get(index).setMessageId(messageId.toString());
} catch (Exception e) {
if (log.isDebugEnabled()) {
log.debug("Fail publish [{}] message with rest produce message request for topic {}",
index, topicName);
}
if (e instanceof BrokerServiceException.TopicNotFoundException) {
// Topic ownership might changed, force to look up again.
pulsar().getBrokerService().getOwningTopics().remove(topicName.getPartitionedTopicName());
}
extractException(e, produceMessageResults.get(index));
}
}
} | 3.68 |
hadoop_ValueAggregatorBaseDescriptor_configure | /**
* get the input file name.
*
* @param job a job configuration object
*/
public void configure(JobConf job) {
super.configure(job);
maxNumItems = job.getLong("aggregate.max.num.unique.values",
Long.MAX_VALUE);
} | 3.68 |
AreaShop_SignsFeature_getSignsByChunk | /**
* Get the map with signs by chunk.
* @return Map with signs by chunk: chunkString -> List<RegionSign>
*/
public static Map<String, List<RegionSign>> getSignsByChunk() {
return signsByChunk;
} | 3.68 |
pulsar_ConsumerConfiguration_setConsumerEventListener | /**
* Sets a {@link ConsumerEventListener} for the consumer.
*
* <p>
* The consumer group listener is used for receiving consumer state change in a consumer group for failover
* subscription. Application can then react to the consumer state changes.
*
* <p>
* This change is experimental. It is subject to changes coming in release 2.0.
*
* @param listener
* the consumer group listener object
* @return consumer configuration
* @since 2.0
*/
public ConsumerConfiguration setConsumerEventListener(ConsumerEventListener listener) {
Objects.requireNonNull(listener);
conf.setConsumerEventListener(listener);
return this;
} | 3.68 |
flink_CompactingHashTable_insertOrReplaceRecord | /**
* Replaces record in hash table if record already present or append record if not. May trigger
* expensive compaction.
*
* @param record record to insert or replace
* @throws IOException
*/
public void insertOrReplaceRecord(T record) throws IOException {
if (this.closed) {
return;
}
final int searchHashCode = MathUtils.jenkinsHash(this.buildSideComparator.hash(record));
final int posHashCode = searchHashCode % this.numBuckets;
// get the bucket for the given hash code
final MemorySegment originalBucket =
this.buckets[posHashCode >> this.bucketsPerSegmentBits];
final int originalBucketOffset =
(posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
MemorySegment bucket = originalBucket;
int bucketInSegmentOffset = originalBucketOffset;
// get the basic characteristics of the bucket
final int partitionNumber = bucket.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
final InMemoryPartition<T> partition = this.partitions.get(partitionNumber);
final MemorySegment[] overflowSegments = partition.overflowSegments;
this.buildSideComparator.setReference(record);
int countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
int numInSegment = 0;
int posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
// loop over all segments that are involved in the bucket (original bucket plus overflow
// buckets)
while (true) {
while (numInSegment < countInSegment) {
final int thisCode = bucket.getInt(posInSegment);
posInSegment += HASH_CODE_LEN;
// check if the hash code matches
if (thisCode == searchHashCode) {
// get the pointer to the pair
final int pointerOffset =
bucketInSegmentOffset
+ BUCKET_POINTER_START_OFFSET
+ (numInSegment * POINTER_LEN);
final long pointer = bucket.getLong(pointerOffset);
// deserialize the key to check whether it is really equal, or whether we had
// only a hash collision
T valueAtPosition = partition.readRecordAt(pointer);
if (this.buildSideComparator.equalToReference(valueAtPosition)) {
long newPointer = insertRecordIntoPartition(record, partition, true);
bucket.putLong(pointerOffset, newPointer);
return;
}
}
numInSegment++;
}
// this segment is done. check if there is another chained bucket
long newForwardPointer = bucket.getLong(bucketInSegmentOffset + HEADER_FORWARD_OFFSET);
if (newForwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
// nothing found. append and insert
long pointer = insertRecordIntoPartition(record, partition, false);
if (countInSegment < NUM_ENTRIES_PER_BUCKET) {
// we are good in our current bucket, put the values
bucket.putInt(
bucketInSegmentOffset
+ BUCKET_HEADER_LENGTH
+ (countInSegment * HASH_CODE_LEN),
searchHashCode); // hash code
bucket.putLong(
bucketInSegmentOffset
+ BUCKET_POINTER_START_OFFSET
+ (countInSegment * POINTER_LEN),
pointer); // pointer
bucket.putInt(
bucketInSegmentOffset + HEADER_COUNT_OFFSET,
countInSegment + 1); // update count
} else {
insertBucketEntryFromStart(
originalBucket,
originalBucketOffset,
searchHashCode,
pointer,
partitionNumber);
}
return;
}
final int overflowSegNum = (int) (newForwardPointer >>> 32);
bucket = overflowSegments[overflowSegNum];
bucketInSegmentOffset = (int) newForwardPointer;
countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
numInSegment = 0;
}
} | 3.68 |
hadoop_Paths_getMultipartUploadCommitsDirectory | /**
* Build a qualified temporary path for the multipart upload commit
* information in the supplied filesystem
* (which is expected to be the cluster FS).
* Currently {code $tempDir/$user/$uuid/staging-uploads} where
* {@code tempDir} is from
* {@link #tempDirForStaging(FileSystem, Configuration)}.
* @param fs target FS
* @param conf configuration
* @param uuid uuid of job
* @return a path which can be used for temporary work
* @throws IOException on an IO failure.
*/
@VisibleForTesting
static Path getMultipartUploadCommitsDirectory(FileSystem fs,
Configuration conf, String uuid) throws IOException {
return path(
tempDirForStaging(fs, conf),
UserGroupInformation.getCurrentUser().getShortUserName(),
uuid,
STAGING_UPLOADS);
} | 3.68 |
hbase_AbstractFSWAL_getInflightWALCloseCount | /** Returns number of WALs currently in the process of closing. */
public int getInflightWALCloseCount() {
return inflightWALClosures.size();
} | 3.68 |
hudi_HoodieTableFactory_setupHiveOptions | /**
* Sets up the hive options from the table definition.
*/
private static void setupHiveOptions(Configuration conf, ObjectIdentifier tablePath) {
if (!conf.contains(FlinkOptions.HIVE_SYNC_DB)) {
conf.setString(FlinkOptions.HIVE_SYNC_DB, tablePath.getDatabaseName());
}
if (!conf.contains(FlinkOptions.HIVE_SYNC_TABLE)) {
conf.setString(FlinkOptions.HIVE_SYNC_TABLE, tablePath.getObjectName());
}
} | 3.68 |
flink_NFACompiler_isCurrentGroupPatternFirstOfLoop | /**
* Checks if the current group pattern is the head of the TIMES/LOOPING quantifier or not a
* TIMES/LOOPING quantifier pattern.
*/
private boolean isCurrentGroupPatternFirstOfLoop() {
if (firstOfLoopMap.containsKey(currentGroupPattern)) {
return firstOfLoopMap.get(currentGroupPattern);
} else {
return true;
}
} | 3.68 |
graphhopper_ArrayUtil_constant | /**
* Creates an IntArrayList of a given size where each element is set to the given value
*/
public static IntArrayList constant(int size, int value) {
IntArrayList result = new IntArrayList(size);
Arrays.fill(result.buffer, value);
result.elementsCount = size;
return result;
} | 3.68 |
morf_H2MetaDataProvider_isPrimaryKeyIndex | /**
* H2 reports its primary key indexes as PRIMARY_KEY_49 or similar.
*
* @see org.alfasoftware.morf.jdbc.DatabaseMetaDataProvider#isPrimaryKeyIndex(RealName)
*/
@Override
protected boolean isPrimaryKeyIndex(RealName indexName) {
return indexName.getDbName().startsWith("PRIMARY_KEY");
} | 3.68 |
framework_DragAndDropService_isAttached | /*
* (non-Javadoc)
*
* @see com.vaadin.server.ClientConnector#isAttached()
*/
@Override
public boolean isAttached() {
return true;
} | 3.68 |
hbase_FavoredNodeAssignmentHelper_updateMetaWithFavoredNodesInfo | /**
* Update meta table with favored nodes info
*/
public static void updateMetaWithFavoredNodesInfo(
Map<RegionInfo, List<ServerName>> regionToFavoredNodes, Configuration conf) throws IOException {
// Write the region assignments to the meta table.
// TODO: See above overrides take a Connection rather than a Configuration only the
// Connection is a short circuit connection. That is not going to good in all cases, when
// master and meta are not colocated. Fix when this favored nodes feature is actually used
// someday.
try (Connection conn = ConnectionFactory.createConnection(conf)) {
updateMetaWithFavoredNodesInfo(regionToFavoredNodes, conn);
}
} | 3.68 |
flink_BinaryHashPartition_getPartitionNumber | /**
* Gets the partition number of this partition.
*
* @return This partition's number.
*/
int getPartitionNumber() {
return this.partitionNumber;
} | 3.68 |
framework_VaadinPortletService_getCurrentPortletRequest | /**
* Gets the currently processed portlet request. The current portlet request
* is automatically defined when the request is started. The current portlet
* request can not be used in e.g. background threads because of the way
* server implementations reuse request instances.
*
* @return the current portlet request instance if available, otherwise
* <code>null</code>
*
*/
public static PortletRequest getCurrentPortletRequest() {
VaadinPortletRequest currentRequest = getCurrentRequest();
if (currentRequest != null) {
return currentRequest.getPortletRequest();
} else {
return null;
}
} | 3.68 |
hbase_StorageClusterStatusModel_getRequests | /**
* @return the total number of requests per second handled by the cluster in the last reporting
* interval
*/
@XmlAttribute
public long getRequests() {
return requests;
} | 3.68 |
hbase_ServerManager_findDeadServersAndProcess | /**
* Find out the region servers crashed between the crash of the previous master instance and the
* current master instance and schedule SCP for them.
* <p/>
* Since the {@code RegionServerTracker} has already helped us to construct the online servers set
* by scanning zookeeper, now we can compare the online servers with {@code liveServersFromWALDir}
* to find out whether there are servers which are already dead.
* <p/>
* Must be called inside the initialization method of {@code RegionServerTracker} to avoid
* concurrency issue.
* @param deadServersFromPE the region servers which already have a SCP associated.
* @param liveServersFromWALDir the live region servers from wal directory.
*/
void findDeadServersAndProcess(Set<ServerName> deadServersFromPE,
Set<ServerName> liveServersFromWALDir) {
deadServersFromPE.forEach(deadservers::putIfAbsent);
liveServersFromWALDir.stream().filter(sn -> !onlineServers.containsKey(sn))
.forEach(this::expireServer);
} | 3.68 |
hadoop_FederationMembershipStateStoreInputValidator_checkSubClusterId | /**
* Validate if the SubCluster Id is present or not.
*
* @param subClusterId the identifier of the subcluster to be verified
* @throws FederationStateStoreInvalidInputException if the SubCluster Id is
* invalid
*/
protected static void checkSubClusterId(SubClusterId subClusterId)
throws FederationStateStoreInvalidInputException {
// check if cluster id is present
if (subClusterId == null) {
String message = "Missing SubCluster Id information."
+ " Please try again by specifying Subcluster Id information.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
// check if cluster id is valid
if (subClusterId.getId().isEmpty()) {
String message = "Invalid SubCluster Id information."
+ " Please try again by specifying valid Subcluster Id.";
LOG.warn(message);
throw new FederationStateStoreInvalidInputException(message);
}
} | 3.68 |
hadoop_ActiveAuditManagerS3A_deactivate | /**
* Switch to the unbounded span and then deactivate this span.
* No-op for invalid spans,
* so as to prevent the unbounded span from being closed
* and everything getting very confused.
*/
@Override
public void deactivate() {
// span is inactive; ignore
if (!isActive()) {
return;
}
// skipped for invalid spans,
// so as to prevent the unbounded span from being closed
// and everything getting very confused.
if (isValid) {
// deactivate the span
span.deactivate();
}
// remove the span from the reference map,
// sporadically triggering a prune operation.
removeActiveSpanFromMap();
} | 3.68 |
pulsar_InetAddressUtils_isIPv6StdAddress | /**
* Checks whether the parameter is a valid standard (non-compressed) IPv6 address.
*
* @param input the address string to check for validity
* @return true if the input parameter is a valid standard (non-compressed) IPv6 address
*/
public static boolean isIPv6StdAddress(final String input) {
return IPV6_STD_PATTERN.matcher(input).matches();
} | 3.68 |
rocketmq-connect_RestHandler_listConnectors | /**
* list all connectors
*
* @param context
*/
private void listConnectors(Context context) {
try {
Map<String, Map<String, Object>> out = new HashMap<>();
for (String connector : connectController.connectors()) {
Map<String, Object> connectorExpansions = new HashMap<>();
connectorExpansions.put("status", connectController.connectorStatus(connector));
connectorExpansions.put("info", connectController.connectorInfo(connector));
out.put(connector, connectorExpansions);
}
context.json(new HttpResponse<>(context.status(), out));
} catch (Exception ex) {
log.error("List all connectors failed. ", ex);
context.json(new ErrorMessage(HttpStatus.INTERNAL_SERVER_ERROR_500, ex.getMessage()));
}
} | 3.68 |
hmily_UndoHook_register | /**
* Register.
*
* @param function the function
*/
public void register(final Function<HmilyParticipantUndo, Boolean> function) {
consumers.add(function);
} | 3.68 |
hbase_ColumnTracker_doneWithColumn | /**
* This method is used to inform the column tracker that we are done with this column. We may get
* this information from external filters or timestamp range and we then need to indicate this
* information to tracker. It is currently implemented for ExplicitColumnTracker.
*/
default void doneWithColumn(Cell cell) {
} | 3.68 |
morf_RenameTable_apply | /**
* @see org.alfasoftware.morf.upgrade.SchemaChange#apply(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema apply(Schema schema) {
return applyChange(schema, oldTableName, newTableName);
} | 3.68 |
hbase_MobUtils_isRefOnlyScan | /**
* Indicates whether it's a reference only scan. The information is set in the attribute
* "hbase.mob.scan.ref.only" of scan. If it's a ref only scan, only the cells with ref tag are
* returned.
* @param scan The current scan.
* @return True if it's a ref only scan.
*/
public static boolean isRefOnlyScan(Scan scan) {
byte[] refOnly = scan.getAttribute(MobConstants.MOB_SCAN_REF_ONLY);
try {
return refOnly != null && Bytes.toBoolean(refOnly);
} catch (IllegalArgumentException e) {
return false;
}
} | 3.68 |
AreaShop_GithubUpdateCheck_getRepository | /**
* Get the repository that this update checker is checking.
* @return Used repository
*/
public String getRepository() {
return repository;
} | 3.68 |
hbase_HRegion_doMiniBatchMutate | /**
* Called to do a piece of the batch that came in to {@link #batchMutate(Mutation[])} In here we
* also handle replay of edits on region recover. Also gets change in size brought about by
* applying {@code batchOp}.
*/
private void doMiniBatchMutate(BatchOperation<?> batchOp) throws IOException {
boolean success = false;
WALEdit walEdit = null;
WriteEntry writeEntry = null;
boolean locked = false;
// We try to set up a batch in the range [batchOp.nextIndexToProcess,lastIndexExclusive)
MiniBatchOperationInProgress<Mutation> miniBatchOp = null;
/** Keep track of the locks we hold so we can release them in finally clause */
List<RowLock> acquiredRowLocks = Lists.newArrayListWithCapacity(batchOp.size());
// Check for thread interrupt status in case we have been signaled from
// #interruptRegionOperation.
checkInterrupt();
try {
// STEP 1. Try to acquire as many locks as we can and build mini-batch of operations with
// locked rows
miniBatchOp = batchOp.lockRowsAndBuildMiniBatch(acquiredRowLocks);
// We've now grabbed as many mutations off the list as we can
// Ensure we acquire at least one.
if (miniBatchOp.getReadyToWriteCount() <= 0) {
// Nothing to put/delete/increment/append -- an exception in the above such as
// NoSuchColumnFamily?
return;
}
// Check for thread interrupt status in case we have been signaled from
// #interruptRegionOperation. Do it before we take the lock and disable interrupts for
// the WAL append.
checkInterrupt();
lock(this.updatesLock.readLock(), miniBatchOp.getReadyToWriteCount());
locked = true;
// From this point until memstore update this operation should not be interrupted.
disableInterrupts();
// STEP 2. Update mini batch of all operations in progress with LATEST_TIMESTAMP timestamp
// We should record the timestamp only after we have acquired the rowLock,
// otherwise, newer puts/deletes/increment/append are not guaranteed to have a newer
// timestamp
long now = EnvironmentEdgeManager.currentTime();
batchOp.prepareMiniBatchOperations(miniBatchOp, now, acquiredRowLocks);
// STEP 3. Build WAL edit
List<Pair<NonceKey, WALEdit>> walEdits = batchOp.buildWALEdits(miniBatchOp);
// STEP 4. Append the WALEdits to WAL and sync.
for (Iterator<Pair<NonceKey, WALEdit>> it = walEdits.iterator(); it.hasNext();) {
Pair<NonceKey, WALEdit> nonceKeyWALEditPair = it.next();
walEdit = nonceKeyWALEditPair.getSecond();
NonceKey nonceKey = nonceKeyWALEditPair.getFirst();
if (walEdit != null && !walEdit.isEmpty()) {
writeEntry = doWALAppend(walEdit, batchOp, miniBatchOp, now, nonceKey);
}
// Complete mvcc for all but last writeEntry (for replay case)
if (it.hasNext() && writeEntry != null) {
mvcc.complete(writeEntry);
writeEntry = null;
}
}
// STEP 5. Write back to memStore
// NOTE: writeEntry can be null here
writeEntry = batchOp.writeMiniBatchOperationsToMemStore(miniBatchOp, writeEntry, now);
// STEP 6. Complete MiniBatchOperations: If required calls postBatchMutate() CP hook and
// complete mvcc for last writeEntry
batchOp.completeMiniBatchOperations(miniBatchOp, writeEntry);
writeEntry = null;
success = true;
} finally {
// Call complete rather than completeAndWait because we probably had error if walKey != null
if (writeEntry != null) mvcc.complete(writeEntry);
if (locked) {
this.updatesLock.readLock().unlock();
}
releaseRowLocks(acquiredRowLocks);
enableInterrupts();
final int finalLastIndexExclusive =
miniBatchOp != null ? miniBatchOp.getLastIndexExclusive() : batchOp.size();
final boolean finalSuccess = success;
batchOp.visitBatchOperations(true, finalLastIndexExclusive, (int i) -> {
Mutation mutation = batchOp.getMutation(i);
if (mutation instanceof Increment || mutation instanceof Append) {
if (finalSuccess) {
batchOp.retCodeDetails[i] =
new OperationStatus(OperationStatusCode.SUCCESS, batchOp.results[i]);
} else {
batchOp.retCodeDetails[i] = OperationStatus.FAILURE;
}
} else {
batchOp.retCodeDetails[i] =
finalSuccess ? OperationStatus.SUCCESS : OperationStatus.FAILURE;
}
return true;
});
batchOp.doPostOpCleanupForMiniBatch(miniBatchOp, walEdit, finalSuccess);
batchOp.nextIndexToProcess = finalLastIndexExclusive;
}
} | 3.68 |
dubbo_AdaptiveClassCodeGenerator_generateMethodThrows | /**
* generate method throws
*/
private String generateMethodThrows(Method method) {
Class<?>[] ets = method.getExceptionTypes();
if (ets.length > 0) {
String list = Arrays.stream(ets).map(Class::getCanonicalName).collect(Collectors.joining(", "));
return String.format(CODE_METHOD_THROWS, list);
} else {
return "";
}
} | 3.68 |
morf_BaseDataSetReader_addTableName | /**
* Add a table, along with its local filename
*
* @param tableName The table name, in it's correct case
* @param fileName The file name that holds it
*/
protected void addTableName(String tableName, String fileName) {
tableNameToFileNameMap.put(tableName.toUpperCase(), fileName);
tableNames.add(tableName);
} | 3.68 |
zxing_RSSExpandedReader_removePartialRows | // Remove all the rows that contains only specified pairs
private static void removePartialRows(Collection<ExpandedPair> pairs, Collection<ExpandedRow> rows) {
for (Iterator<ExpandedRow> iterator = rows.iterator(); iterator.hasNext();) {
ExpandedRow r = iterator.next();
if (r.getPairs().size() != pairs.size()) {
boolean allFound = true;
for (ExpandedPair p : r.getPairs()) {
if (!pairs.contains(p)) {
allFound = false;
break;
}
}
if (allFound) {
// 'pairs' contains all the pairs from the row 'r'
iterator.remove();
}
}
}
} | 3.68 |
pulsar_TopicName_isV2 | /**
* Returns true if this a V2 topic name prop/ns/topic-name.
* @return true if V2
*/
public boolean isV2() {
return cluster == null;
} | 3.68 |
pulsar_TransactionImpl_registerProducedTopic | // register the topics that will be modified by this transaction
public CompletableFuture<Void> registerProducedTopic(String topic) {
CompletableFuture<Void> completableFuture = new CompletableFuture<>();
if (checkIfOpen(completableFuture)) {
synchronized (TransactionImpl.this) {
// we need to issue the request to TC to register the produced topic
return registerPartitionMap.compute(topic, (key, future) -> {
if (future != null) {
return future.thenCompose(ignored -> CompletableFuture.completedFuture(null));
} else {
return tcClient.addPublishPartitionToTxnAsync(
txnId, Lists.newArrayList(topic))
.thenCompose(ignored -> CompletableFuture.completedFuture(null));
}
});
}
}
return completableFuture;
} | 3.68 |
hbase_BaseLoadBalancer_retainAssignment | /**
* Generates a bulk assignment startup plan, attempting to reuse the existing assignment
* information from META, but adjusting for the specified list of available/online servers
* available for assignment.
* <p>
* Takes a map of all regions to their existing assignment from META. Also takes a list of online
* servers for regions to be assigned to. Attempts to retain all assignment, so in some instances
* initial assignment will not be completely balanced.
* <p>
* Any leftover regions without an existing server to be assigned to will be assigned randomly to
* available servers.
* @param regions regions and existing assignment from meta
* @param servers available servers
* @return map of servers and regions to be assigned to them, or emptyMap if no assignment is
* possible (ie. no servers)
*/
@Override
@NonNull
public Map<ServerName, List<RegionInfo>> retainAssignment(Map<RegionInfo, ServerName> regions,
List<ServerName> servers) throws HBaseIOException {
// Update metrics
metricsBalancer.incrMiscInvocations();
int numServers = servers == null ? 0 : servers.size();
if (numServers == 0) {
LOG.warn("Wanted to do retain assignment but no servers to assign to");
return Collections.singletonMap(BOGUS_SERVER_NAME, new ArrayList<>(regions.keySet()));
}
if (numServers == 1) { // Only one server, nothing fancy we can do here
return Collections.singletonMap(servers.get(0), new ArrayList<>(regions.keySet()));
}
// Group all the old assignments by their hostname.
// We can't group directly by ServerName since the servers all have
// new start-codes.
// Group the servers by their hostname. It's possible we have multiple
// servers on the same host on different ports.
Map<ServerName, List<RegionInfo>> assignments = new HashMap<>();
ArrayListMultimap<String, ServerName> serversByHostname = ArrayListMultimap.create();
for (ServerName server : servers) {
assignments.put(server, new ArrayList<>());
serversByHostname.put(server.getHostnameLowerCase(), server);
}
// Collection of the hostnames that used to have regions
// assigned, but for which we no longer have any RS running
// after the cluster restart.
Set<String> oldHostsNoLongerPresent = Sets.newTreeSet();
// If the old servers aren't present, lets assign those regions later.
List<RegionInfo> randomAssignRegions = Lists.newArrayList();
int numRandomAssignments = 0;
int numRetainedAssigments = 0;
for (Map.Entry<RegionInfo, ServerName> entry : regions.entrySet()) {
RegionInfo region = entry.getKey();
ServerName oldServerName = entry.getValue();
List<ServerName> localServers = new ArrayList<>();
if (oldServerName != null) {
localServers = serversByHostname.get(oldServerName.getHostnameLowerCase());
}
if (localServers.isEmpty()) {
// No servers on the new cluster match up with this hostname, assign randomly, later.
randomAssignRegions.add(region);
if (oldServerName != null) {
oldHostsNoLongerPresent.add(oldServerName.getHostnameLowerCase());
}
} else if (localServers.size() == 1) {
// the usual case - one new server on same host
ServerName target = localServers.get(0);
assignments.get(target).add(region);
numRetainedAssigments++;
} else {
// multiple new servers in the cluster on this same host
if (localServers.contains(oldServerName)) {
assignments.get(oldServerName).add(region);
numRetainedAssigments++;
} else {
ServerName target = null;
for (ServerName tmp : localServers) {
if (tmp.getPort() == oldServerName.getPort()) {
target = tmp;
assignments.get(tmp).add(region);
numRetainedAssigments++;
break;
}
}
if (target == null) {
randomAssignRegions.add(region);
}
}
}
}
// If servers from prior assignment aren't present, then lets do randomAssignment on regions.
if (randomAssignRegions.size() > 0) {
BalancerClusterState cluster = createCluster(servers, regions.keySet());
for (Map.Entry<ServerName, List<RegionInfo>> entry : assignments.entrySet()) {
ServerName sn = entry.getKey();
for (RegionInfo region : entry.getValue()) {
cluster.doAssignRegion(region, sn);
}
}
for (RegionInfo region : randomAssignRegions) {
ServerName target = randomAssignment(cluster, region, servers);
assignments.get(target).add(region);
numRandomAssignments++;
}
}
String randomAssignMsg = "";
if (numRandomAssignments > 0) {
randomAssignMsg = numRandomAssignments + " regions were assigned "
+ "to random hosts, since the old hosts for these regions are no "
+ "longer present in the cluster. These hosts were:\n "
+ Joiner.on("\n ").join(oldHostsNoLongerPresent);
}
LOG.info("Reassigned " + regions.size() + " regions. " + numRetainedAssigments
+ " retained the pre-restart assignment. " + randomAssignMsg);
return Collections.unmodifiableMap(assignments);
} | 3.68 |
hbase_HFileWriterImpl_append | /**
* Add key/value to file. Keys must be added in an order that agrees with the Comparator passed on
* construction. Cell to add. Cannot be empty nor null.
*/
@Override
public void append(final Cell cell) throws IOException {
// checkKey uses comparator to check we are writing in order.
boolean dupKey = checkKey(cell);
if (!dupKey) {
checkBlockBoundary();
}
if (!blockWriter.isWriting()) {
newBlock();
}
blockWriter.write(cell);
totalKeyLength += PrivateCellUtil.estimatedSerializedSizeOfKey(cell);
totalValueLength += cell.getValueLength();
if (lenOfBiggestCell < PrivateCellUtil.estimatedSerializedSizeOf(cell)) {
lenOfBiggestCell = PrivateCellUtil.estimatedSerializedSizeOf(cell);
keyOfBiggestCell = PrivateCellUtil.getCellKeySerializedAsKeyValueKey(cell);
}
// Are we the first key in this block?
if (firstCellInBlock == null) {
// If cell is big, block will be closed and this firstCellInBlock reference will only last
// a short while.
firstCellInBlock = cell;
}
// TODO: What if cell is 10MB and we write infrequently? We hold on to cell here indefinitely?
lastCell = cell;
entryCount++;
this.maxMemstoreTS = Math.max(this.maxMemstoreTS, cell.getSequenceId());
int tagsLength = cell.getTagsLength();
if (tagsLength > this.maxTagsLength) {
this.maxTagsLength = tagsLength;
}
} | 3.68 |
streampipes_AdapterMasterManagement_deleteAdapter | /**
* First the adapter is stopped removed, then the corresponding data source is deleted
*
* @param elementId The elementId of the adapter instance
* @throws AdapterException when adapter can not be stopped
*/
public void deleteAdapter(String elementId) throws AdapterException {
// Stop stream adapter
try {
stopStreamAdapter(elementId);
} catch (AdapterException e) {
LOG.info("Could not stop adapter: " + elementId, e);
}
AdapterDescription adapter = adapterInstanceStorage.getAdapter(elementId);
// Delete adapter
adapterResourceManager.delete(elementId);
ExtensionsLogProvider.INSTANCE.remove(elementId);
LOG.info("Successfully deleted adapter: " + elementId);
// Delete data stream
this.dataStreamResourceManager.delete(adapter.getCorrespondingDataStreamElementId());
LOG.info("Successfully deleted data stream: " + adapter.getCorrespondingDataStreamElementId());
} | 3.68 |
flink_SourceCoordinator_deserializeCheckpoint | /**
* Restore the state of this source coordinator from the state bytes.
*
* @param bytes The checkpoint bytes that was returned from {@link #toBytes(long)}
* @throws Exception When the deserialization failed.
*/
private EnumChkT deserializeCheckpoint(byte[] bytes) throws Exception {
try (ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
DataInputStream in = new DataInputViewStreamWrapper(bais)) {
final int coordinatorSerdeVersion = readAndVerifyCoordinatorSerdeVersion(in);
int enumSerializerVersion = in.readInt();
int serializedEnumChkptSize = in.readInt();
byte[] serializedEnumChkpt = readBytes(in, serializedEnumChkptSize);
if (coordinatorSerdeVersion != SourceCoordinatorSerdeUtils.VERSION_0
&& bais.available() > 0) {
throw new IOException("Unexpected trailing bytes in enumerator checkpoint data");
}
return enumCheckpointSerializer.deserialize(enumSerializerVersion, serializedEnumChkpt);
}
} | 3.68 |
hudi_AbstractTableFileSystemView_getLatestFileSliceFilteringUncommittedFiles | /**
* Looks for the latest file slice that is not empty after filtering out the uncommitted files.
*
* <p>Note: Checks from the latest file slice first to improve the efficiency. There is no need to check
* every file slice, the uncommitted files only exist in the latest file slice basically.
*/
private Option<FileSlice> getLatestFileSliceFilteringUncommittedFiles(Stream<FileSlice> fileSlices) {
return Option.fromJavaOptional(fileSlices.flatMap(fileSlice -> filterUncommittedFiles(fileSlice, false)).findFirst());
} | 3.68 |
hibernate-validator_XmlParserHelper_getSchema | /**
* Returns the XML schema identified by the given resource name.
*
* @param schemaResource
* the resource name identifying the schema.
* @return the schema identified by the given resource name or {@code null} if the resource was not found or could
* not be loaded.
*/
public Schema getSchema(String schemaResource) {
Schema schema = schemaCache.get( schemaResource );
if ( schema != null ) {
return schema;
}
schema = loadSchema( schemaResource );
if ( schema != null ) {
Schema previous = schemaCache.putIfAbsent( schemaResource, schema );
return previous != null ? previous : schema;
}
else {
return null;
}
} | 3.68 |
flink_SingleInputPlanNode_getSortOrders | /**
* Gets the sort order for the specified driver comparator.
*
* @param id The id of the driver comparator for which the sort order is requested.
* @return The sort order of the specified driver comparator.
*/
public boolean[] getSortOrders(int id) {
return driverSortOrders[id];
} | 3.68 |
flink_ExecutionConfig_registerPojoType | /**
* Registers the given type with the serialization stack. If the type is eventually serialized
* as a POJO, then the type is registered with the POJO serializer. If the type ends up being
* serialized with Kryo, then it will be registered at Kryo to make sure that only tags are
* written.
*
* @param type The class of the type to register.
*/
public void registerPojoType(Class<?> type) {
if (type == null) {
throw new NullPointerException("Cannot register null type class.");
}
if (!registeredPojoTypes.contains(type)) {
registeredPojoTypes.add(type);
}
} | 3.68 |
framework_VColorPickerGradient_setCursor | /**
* Sets the latest coordinates for pressed-down mouse cursor and updates the
* cross elements.
*
* @param x
* x-coordinate
* @param y
* y-coordinate
*/
public void setCursor(int x, int y) {
cursorX = x;
cursorY = y;
if (x >= 0) {
lowercross.getElement().getStyle().setWidth(x, Unit.PX);
}
if (y >= 0) {
lowercross.getElement().getStyle().setTop(y, Unit.PX);
}
if (y >= 0) {
lowercross.getElement().getStyle().setHeight(height - y, Unit.PX);
} else {
lowercross.getElement().getStyle().setHeight(Math.abs(y), Unit.PX);
}
if (x >= 0) {
highercross.getElement().getStyle().setWidth(width - x, Unit.PX);
}
if (x >= 0) {
highercross.getElement().getStyle().setLeft(x, Unit.PX);
}
if (y >= 0) {
highercross.getElement().getStyle().setHeight(y, Unit.PX);
} else {
highercross.getElement().getStyle().setHeight(height + y, Unit.PX);
}
} | 3.68 |
framework_FreeformQuery_getResults | /**
* Fetches the results for the query. This implementation always fetches the
* entire record set, ignoring the offset and page length parameters. In
* order to support lazy loading of records, you must supply a
* FreeformQueryDelegate that implements the
* FreeformQueryDelegate.getQueryString(int,int) method.
*
* @throws SQLException
*
* @see FreeformQueryDelegate#getQueryString(int, int)
*/
@Override
@SuppressWarnings({ "deprecation", "finally" })
public ResultSet getResults(int offset, int pagelength)
throws SQLException {
ensureTransaction();
String query = queryString;
if (delegate != null) {
/* First try using prepared statement */
if (delegate instanceof FreeformStatementDelegate) {
try {
StatementHelper sh = ((FreeformStatementDelegate) delegate)
.getQueryStatement(offset, pagelength);
PreparedStatement pstmt = getConnection()
.prepareStatement(sh.getQueryString());
sh.setParameterValuesToStatement(pstmt);
return pstmt.executeQuery();
} catch (UnsupportedOperationException e) {
// Statement generation not supported, continue...
}
}
try {
query = delegate.getQueryString(offset, pagelength);
} catch (UnsupportedOperationException e) {
// This is fine, we'll just use the default queryString.
}
}
Statement statement = getConnection().createStatement();
ResultSet rs;
try {
rs = statement.executeQuery(query);
} catch (SQLException e) {
try {
statement.close();
} finally {
// throw the original exception even if closing the statement
// fails
throw e;
}
}
return rs;
} | 3.68 |
hbase_MemStoreLABImpl_copyCellInto | /**
* @see #copyBBECellInto(ByteBufferExtendedCell, int)
*/
private Cell copyCellInto(Cell cell, int maxAlloc) {
int size = Segment.getCellLength(cell);
Preconditions.checkArgument(size >= 0, "negative size");
// Callers should satisfy large allocations directly from JVM since they
// don't cause fragmentation as badly.
if (size > maxAlloc) {
return null;
}
Chunk c = null;
int allocOffset = 0;
while (true) {
// Try to get the chunk
c = getOrMakeChunk();
// we may get null because the some other thread succeeded in getting the lock
// and so the current thread has to try again to make its chunk or grab the chunk
// that the other thread created
// Try to allocate from this chunk
if (c != null) {
allocOffset = c.alloc(size);
if (allocOffset != -1) {
// We succeeded - this is the common case - small alloc
// from a big buffer
break;
}
// not enough space!
// try to retire this chunk
tryRetireChunk(c);
}
}
return copyToChunkCell(cell, c.getData(), allocOffset, size);
} | 3.68 |
flink_ApplicationDispatcherBootstrap_runApplicationAsync | /**
* Runs the user program entrypoint by scheduling a task on the given {@code scheduledExecutor}.
* The returned {@link CompletableFuture} completes when all jobs of the user application
* succeeded. if any of them fails, or if job submission fails.
*/
private CompletableFuture<Void> runApplicationAsync(
final DispatcherGateway dispatcherGateway,
final ScheduledExecutor scheduledExecutor,
final boolean enforceSingleJobExecution,
final boolean submitFailedJobOnApplicationError) {
final CompletableFuture<List<JobID>> applicationExecutionFuture = new CompletableFuture<>();
final Set<JobID> tolerateMissingResult = Collections.synchronizedSet(new HashSet<>());
// we need to hand in a future as return value because we need to get those JobIs out
// from the scheduled task that executes the user program
applicationExecutionTask =
scheduledExecutor.schedule(
() ->
runApplicationEntryPoint(
applicationExecutionFuture,
tolerateMissingResult,
dispatcherGateway,
scheduledExecutor,
enforceSingleJobExecution,
submitFailedJobOnApplicationError),
0L,
TimeUnit.MILLISECONDS);
return applicationExecutionFuture.thenCompose(
jobIds ->
getApplicationResult(
dispatcherGateway,
jobIds,
tolerateMissingResult,
scheduledExecutor));
} | 3.68 |
zxing_RSSExpandedReader_getRows | // Only used for unit testing
List<ExpandedRow> getRows() {
return this.rows;
} | 3.68 |
hbase_ServerRpcController_checkFailed | /**
* Throws an IOException back out if one is currently stored.
*/
public void checkFailed() throws IOException {
if (failedOnException()) {
throw getFailedOn();
}
} | 3.68 |
hbase_ReplicationLoad_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return this.sourceToString() + System.getProperty("line.separator") + this.sinkToString();
} | 3.68 |
hudi_BaseRollbackHelper_collectRollbackStats | /**
* Collect all file info that needs to be rolled back.
*/
public List<HoodieRollbackStat> collectRollbackStats(HoodieEngineContext context, HoodieInstant instantToRollback,
List<HoodieRollbackRequest> rollbackRequests) {
int parallelism = Math.max(Math.min(rollbackRequests.size(), config.getRollbackParallelism()), 1);
context.setJobStatus(this.getClass().getSimpleName(), "Collect rollback stats for upgrade/downgrade: " + config.getTableName());
// If not for conversion to HoodieRollbackInternalRequests, code fails. Using avro model (HoodieRollbackRequest) within spark.parallelize
// is failing with com.esotericsoftware.kryo.KryoException
// stack trace: https://gist.github.com/nsivabalan/b6359e7d5038484f8043506c8bc9e1c8
// related stack overflow post: https://issues.apache.org/jira/browse/SPARK-3601. Avro deserializes list as GenericData.Array.
List<SerializableHoodieRollbackRequest> serializableRequests = rollbackRequests.stream().map(SerializableHoodieRollbackRequest::new).collect(Collectors.toList());
return context.reduceByKey(maybeDeleteAndCollectStats(context, instantToRollback, serializableRequests, false, parallelism),
RollbackUtils::mergeRollbackStat, parallelism);
} | 3.68 |
flink_JobID_fromHexString | /**
* Parses a JobID from the given string.
*
* @param hexString string representation of a JobID
* @return Parsed JobID
* @throws IllegalArgumentException if the JobID could not be parsed from the given string
*/
public static JobID fromHexString(String hexString) {
try {
return new JobID(StringUtils.hexStringToByte(hexString));
} catch (Exception e) {
throw new IllegalArgumentException(
"Cannot parse JobID from \""
+ hexString
+ "\". The expected format is "
+ "[0-9a-fA-F]{32}, e.g. fd72014d4c864993a2e5a9287b4a9c5d.",
e);
}
} | 3.68 |
pulsar_AbstractMultiVersionReader_getSchemaInfoByVersion | /**
* TODO: think about how to make this async.
*/
protected SchemaInfo getSchemaInfoByVersion(byte[] schemaVersion) {
try {
return schemaInfoProvider.getSchemaByVersion(schemaVersion).get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new SerializationException(
"Interrupted at fetching schema info for " + SchemaUtils.getStringSchemaVersion(schemaVersion),
e
);
} catch (ExecutionException e) {
throw new SerializationException(
"Failed at fetching schema info for " + SchemaUtils.getStringSchemaVersion(schemaVersion),
e.getCause()
);
}
} | 3.68 |
flink_ReaderInfo_getSubtaskId | /** @return the ID of the subtask that runs the source reader. */
public int getSubtaskId() {
return subtaskId;
} | 3.68 |
hbase_HFileWriterImpl_checkKey | /**
* Checks that the given Cell's key does not violate the key order.
* @param cell Cell whose key to check.
* @return true if the key is duplicate
* @throws IOException if the key or the key order is wrong
*/
protected boolean checkKey(final Cell cell) throws IOException {
boolean isDuplicateKey = false;
if (cell == null) {
throw new IOException("Key cannot be null or empty");
}
if (lastCell != null) {
int keyComp = PrivateCellUtil.compareKeyIgnoresMvcc(this.hFileContext.getCellComparator(),
lastCell, cell);
if (keyComp > 0) {
String message = getLexicalErrorMessage(cell);
throw new IOException(message);
} else if (keyComp == 0) {
isDuplicateKey = true;
}
}
return isDuplicateKey;
} | 3.68 |
flink_Task_getFailureCause | /**
* If the task has failed, this method gets the exception that caused this task to fail.
* Otherwise this method returns null.
*
* @return The exception that caused the task to fail, or null, if the task has not failed.
*/
public Throwable getFailureCause() {
return failureCause;
} | 3.68 |
hbase_RecordFilter_newBuilder | /*
* For FilterBuilder
*/
public static FilterBuilder newBuilder(Field field) {
return new FilterBuilder(field, false);
} | 3.68 |
flink_KvStateLocation_getKvStateServerAddress | /**
* Returns the registered server address for the key group index or <code>null</code> if none is
* registered yet.
*
* @param keyGroupIndex Key group index to get server address for.
* @return the server address for the key group index or <code>null</code> if none is registered
* yet
* @throws IndexOutOfBoundsException If key group index < 0 or >= Number of key groups
*/
public InetSocketAddress getKvStateServerAddress(int keyGroupIndex) {
if (keyGroupIndex < 0 || keyGroupIndex >= numKeyGroups) {
throw new IndexOutOfBoundsException("Key group index");
}
return kvStateAddresses[keyGroupIndex];
} | 3.68 |
hbase_SequenceIdAccounting_getLowestSequenceId | /**
* @param sequenceids Map to search for lowest value.
* @return Lowest value found in <code>sequenceids</code>.
*/
private static long getLowestSequenceId(Map<?, Long> sequenceids) {
long lowest = HConstants.NO_SEQNUM;
for (Map.Entry<?, Long> entry : sequenceids.entrySet()) {
if (entry.getKey().toString().equals("METAFAMILY")) {
continue;
}
Long sid = entry.getValue();
if (lowest == HConstants.NO_SEQNUM || sid.longValue() < lowest) {
lowest = sid.longValue();
}
}
return lowest;
} | 3.68 |
hadoop_LocatedFileStatus_compareTo | /**
* Compare this FileStatus to another FileStatus
* @param o the FileStatus to be compared.
* @return a negative integer, zero, or a positive integer as this object
* is less than, equal to, or greater than the specified object.
*/
@Override
public int compareTo(FileStatus o) {
return super.compareTo(o);
} | 3.68 |
flink_RestServerEndpoint_checkAndCreateUploadDir | /**
* Checks whether the given directory exists and is writable. If it doesn't exist, this method
* will attempt to create it.
*
* @param uploadDir directory to check
* @param log logger used for logging output
* @throws IOException if the directory does not exist and cannot be created, or if the
* directory isn't writable
*/
private static synchronized void checkAndCreateUploadDir(final Path uploadDir, final Logger log)
throws IOException {
if (Files.exists(uploadDir) && Files.isWritable(uploadDir)) {
log.info("Using directory {} for file uploads.", uploadDir);
} else if (Files.isWritable(Files.createDirectories(uploadDir))) {
log.info("Created directory {} for file uploads.", uploadDir);
} else {
log.warn("Upload directory {} cannot be created or is not writable.", uploadDir);
throw new IOException(
String.format(
"Upload directory %s cannot be created or is not writable.",
uploadDir));
}
} | 3.68 |
hbase_HBaseConfiguration_applyClusterKeyToConf | /**
* Apply the settings in the given key to the given configuration, this is used to communicate
* with distant clusters
* @param conf configuration object to configure
* @param key string that contains the 3 required configuratins
*/
private static void applyClusterKeyToConf(Configuration conf, String key) throws IOException {
ZKConfig.ZKClusterKey zkClusterKey = ZKConfig.transformClusterKey(key);
conf.set(HConstants.ZOOKEEPER_QUORUM, zkClusterKey.getQuorumString());
conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClusterKey.getClientPort());
conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, zkClusterKey.getZnodeParent());
// Without the right registry, the above configs are useless. Also, we don't use setClass()
// here because the ConnectionRegistry* classes are not resolvable from this module.
// This will be broken if ZkConnectionRegistry class gets renamed or moved. Is there a better
// way?
LOG.info("Overriding client registry implementation to {}",
HConstants.ZK_CONNECTION_REGISTRY_CLASS);
conf.set(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY,
HConstants.ZK_CONNECTION_REGISTRY_CLASS);
} | 3.68 |
flink_HsBufferContext_startSpilling | /**
* Mark buffer status to startSpilling.
*
* @param spilledFuture completable future of this buffer's spilling operation.
* @return false, if spilling of the buffer has been started before or the buffer has been
* released already; true, otherwise.
*/
public boolean startSpilling(CompletableFuture<Void> spilledFuture) {
if (isReleased() || isSpillStarted()) {
return false;
}
spillStarted = true;
this.spilledFuture = spilledFuture;
// increase ref count when buffer is decided to spill.
buffer.retainBuffer();
// decrease ref count when buffer spilling is finished.
spilledFuture.thenRun(buffer::recycleBuffer);
return true;
} | 3.68 |
hbase_DisableTableProcedure_prepareDisable | /**
* Action before any real action of disabling table. Set the exception in the procedure instead of
* throwing it. This approach is to deal with backward compatible with 1.0.
* @param env MasterProcedureEnv
*/
private boolean prepareDisable(final MasterProcedureEnv env) throws IOException {
boolean canTableBeDisabled = true;
if (tableName.equals(TableName.META_TABLE_NAME)) {
setFailure("master-disable-table",
new ConstraintException("Cannot disable " + this.tableName));
canTableBeDisabled = false;
} else if (!env.getMasterServices().getTableDescriptors().exists(tableName)) {
setFailure("master-disable-table", new TableNotFoundException(tableName));
canTableBeDisabled = false;
} else if (!skipTableStateCheck) {
// There could be multiple client requests trying to disable or enable
// the table at the same time. Ensure only the first request is honored
// After that, no other requests can be accepted until the table reaches
// DISABLED or ENABLED.
//
// Note: in 1.0 release, we called TableStateManager.setTableStateIfInStates() to set
// the state to DISABLING from ENABLED. The implementation was done before table lock
// was implemented. With table lock, there is no need to set the state here (it will
// set the state later on). A quick state check should be enough for us to move forward.
TableStateManager tsm = env.getMasterServices().getTableStateManager();
TableState ts = tsm.getTableState(tableName);
if (!ts.isEnabled()) {
LOG.info("Not ENABLED, state={}, skipping disable; {}", ts.getState(), this);
setFailure("master-disable-table", new TableNotEnabledException(ts.toString()));
canTableBeDisabled = false;
}
}
// We are done the check. Future actions in this procedure could be done asynchronously.
releaseSyncLatch();
return canTableBeDisabled;
} | 3.68 |
hmily_HmilyTransactionRecoveryService_confirm | /**
* Confirm.
*
* @param hmilyParticipant the hmily participant
* @return the boolean
*/
public boolean confirm(final HmilyParticipant hmilyParticipant) {
try {
HmilyReflector.executor(HmilyActionEnum.CONFIRMING, ExecutorTypeEnum.LOCAL, hmilyParticipant);
removeHmilyParticipant(hmilyParticipant.getParticipantId());
return true;
} catch (Exception e) {
LOGGER.error("hmily Recovery executor confirm exception param:{} ", hmilyParticipant.toString(), e);
return false;
}
} | 3.68 |
pulsar_Producer_parseRemoteClusterName | /**
* Producer name for replicator is in format.
* "replicatorPrefix.localCluster" (old)
* "replicatorPrefix.localCluster-->remoteCluster" (new)
*/
private String parseRemoteClusterName(String producerName, boolean isRemote, String replicatorPrefix) {
if (isRemote) {
String clusterName = producerName.substring(replicatorPrefix.length());
return clusterName.contains(REPL_PRODUCER_NAME_DELIMITER)
? clusterName.split(REPL_PRODUCER_NAME_DELIMITER)[0] : clusterName;
}
return null;
} | 3.68 |
pulsar_SchemaData_fromSchemaInfo | /**
* Convert a schema info to a schema data.
*
* @param schemaInfo schema info
* @return the converted schema schema data
*/
public static SchemaData fromSchemaInfo(SchemaInfo schemaInfo) {
return SchemaData.builder()
.type(schemaInfo.getType())
.data(schemaInfo.getSchema())
.props(schemaInfo.getProperties())
.build();
} | 3.68 |
morf_FieldReference_nullsFirst | /**
* sets null value handling type to first
* @return this
*/
public Builder nullsFirst() {
this.nullValueHandling = Optional.of(NullValueHandling.FIRST);
return this;
} | 3.68 |
flink_LogicalTypeChecks_getFieldNames | /** Returns the field names of row and structured types. */
public static List<String> getFieldNames(LogicalType logicalType) {
return logicalType.accept(FIELD_NAMES_EXTRACTOR);
} | 3.68 |
hibernate-validator_Mod11CheckValidator_isCheckDigitValid | /**
* Validate check digit using Mod11 checksum
*
* @param digits The digits over which to calculate the checksum
* @param checkDigit the check digit
*
* @return {@code true} if the mod11 result matches the check digit, {@code false} otherwise
*/
@Override
public boolean isCheckDigitValid(List<Integer> digits, char checkDigit) {
if ( reverseOrder ) {
Collections.reverse( digits );
}
int modResult = ModUtil.calculateModXCheckWithWeights( digits, 11, this.threshold, customWeights );
switch ( modResult ) {
case 10:
return checkDigit == this.treatCheck10As;
case 11:
return checkDigit == this.treatCheck11As;
default:
return Character.isDigit( checkDigit ) && modResult == extractDigit( checkDigit );
}
} | 3.68 |
flink_SimpleOperatorFactory_of | /** Create a SimpleOperatorFactory from existed StreamOperator. */
@SuppressWarnings("unchecked")
public static <OUT> SimpleOperatorFactory<OUT> of(StreamOperator<OUT> operator) {
if (operator == null) {
return null;
} else if (operator instanceof StreamSource
&& ((StreamSource) operator).getUserFunction()
instanceof InputFormatSourceFunction) {
return new SimpleInputFormatOperatorFactory<OUT>((StreamSource) operator);
} else if (operator instanceof UserFunctionProvider
&& (((UserFunctionProvider<Function>) operator).getUserFunction()
instanceof OutputFormatSinkFunction)) {
return new SimpleOutputFormatOperatorFactory<>(
(((OutputFormatSinkFunction<?>)
((UserFunctionProvider<Function>) operator).getUserFunction())
.getFormat()),
operator);
} else if (operator instanceof AbstractUdfStreamOperator) {
return new SimpleUdfStreamOperatorFactory<OUT>((AbstractUdfStreamOperator) operator);
} else {
return new SimpleOperatorFactory<>(operator);
}
} | 3.68 |
pulsar_DefaultMetadataResolver_getWellKnownMetadataUrl | /**
* Gets a well-known metadata URL for the given OAuth issuer URL.
* @see <a href="https://tools.ietf.org/id/draft-ietf-oauth-discovery-08.html#ASConfig">
* OAuth Discovery: Obtaining Authorization Server Metadata</a>
* @param issuerUrl The authorization server's issuer identifier
* @return a URL
*/
public static URL getWellKnownMetadataUrl(URL issuerUrl) {
try {
return URI.create(issuerUrl.toExternalForm() + "/.well-known/openid-configuration").normalize().toURL();
} catch (MalformedURLException e) {
throw new IllegalArgumentException(e);
}
} | 3.68 |
morf_JdbcUrlElements_getPort | /**
* @return the TCP/IP Port to connect to.
*/
public int getPort() {
return port;
} | 3.68 |
hbase_HBaseTestingUtility_createDirsAndSetProperties | /**
* This is used before starting HDFS and map-reduce mini-clusters Run something like the below to
* check for the likes of '/tmp' references -- i.e. references outside of the test data dir -- in
* the conf.
*
* <pre>
* Configuration conf = TEST_UTIL.getConfiguration();
* for (Iterator<Map.Entry<String, String>> i = conf.iterator(); i.hasNext();) {
* Map.Entry<String, String> e = i.next();
* assertFalse(e.getKey() + " " + e.getValue(), e.getValue().contains("/tmp"));
* }
* </pre>
*/
private void createDirsAndSetProperties() throws IOException {
setupClusterTestDir();
conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
createDirAndSetProperty("test.cache.data");
createDirAndSetProperty("hadoop.tmp.dir");
hadoopLogDir = createDirAndSetProperty("hadoop.log.dir");
createDirAndSetProperty("mapreduce.cluster.local.dir");
createDirAndSetProperty("mapreduce.cluster.temp.dir");
enableShortCircuit();
Path root = getDataTestDirOnTestFS("hadoop");
conf.set(MapreduceTestingShim.getMROutputDirProp(),
new Path(root, "mapred-output-dir").toString());
conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());
conf.set("mapreduce.jobtracker.staging.root.dir",
new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());
conf.set("yarn.app.mapreduce.am.staging-dir",
new Path(root, "mapreduce-am-staging-root-dir").toString());
// Frustrate yarn's and hdfs's attempts at writing /tmp.
// Below is fragile. Make it so we just interpolate any 'tmp' reference.
createDirAndSetProperty("yarn.node-labels.fs-store.root-dir");
createDirAndSetProperty("yarn.node-attribute.fs-store.root-dir");
createDirAndSetProperty("yarn.nodemanager.log-dirs");
createDirAndSetProperty("yarn.nodemanager.remote-app-log-dir");
createDirAndSetProperty("yarn.timeline-service.entity-group-fs-store.active-dir");
createDirAndSetProperty("yarn.timeline-service.entity-group-fs-store.done-dir");
createDirAndSetProperty("yarn.nodemanager.remote-app-log-dir");
createDirAndSetProperty("dfs.journalnode.edits.dir");
createDirAndSetProperty("dfs.datanode.shared.file.descriptor.paths");
createDirAndSetProperty("nfs.dump.dir");
createDirAndSetProperty("java.io.tmpdir");
createDirAndSetProperty("dfs.journalnode.edits.dir");
createDirAndSetProperty("dfs.provided.aliasmap.inmemory.leveldb.dir");
createDirAndSetProperty("fs.s3a.committer.staging.tmp.path");
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.