name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
flink_PbCodegenUtils_getTypeStrFromLogicType | /**
* Get java type str from {@link LogicalType} which directly fetched from flink type.
*
* @return The returned code phrase will be used as java type str in codegen sections.
*/
public static String getTypeStrFromLogicType(LogicalType type) {
switch (type.getTypeRoot()) {
case INTEGER:
return "int";
case BIGINT:
return "long";
case FLOAT:
return "float";
case DOUBLE:
return "double";
case BOOLEAN:
return "boolean";
case VARCHAR:
case CHAR:
return "StringData";
case VARBINARY:
case BINARY:
return "byte[]";
case ROW:
return "RowData";
case MAP:
return "MapData";
case ARRAY:
return "ArrayData";
default:
throw new IllegalArgumentException("Unsupported data type in schema: " + type);
}
} | 3.68 |
hadoop_OBSWriteOperationHelper_putObject | /**
* PUT an object directly (i.e. not via the transfer manager).
*
* @param putObjectRequest the request
* @return the upload initiated
* @throws IOException on problems
*/
PutObjectResult putObject(final PutObjectRequest putObjectRequest)
throws IOException {
try {
return OBSCommonUtils.putObjectDirect(owner, putObjectRequest);
} catch (ObsException e) {
throw OBSCommonUtils.translateException("put",
putObjectRequest.getObjectKey(), e);
}
} | 3.68 |
flink_FutureCompletingBlockingQueue_peek | /**
* Get the first element from the queue without removing it.
*
* @return the first element in the queue, or Null if the queue is empty.
*/
public T peek() {
lock.lock();
try {
return queue.peek();
} finally {
lock.unlock();
}
} | 3.68 |
flink_AbstractMapTypeInfo_getValueTypeInfo | /**
* Returns the type information for the values in the map.
*
* @return The type information for the values in the map.
*/
public TypeInformation<V> getValueTypeInfo() {
return valueTypeInfo;
} | 3.68 |
hadoop_LocalityMulticastAMRMProxyPolicy_prettyPrintRequests | /**
* Print a list of Resource Requests into a one line string.
*
* @param response list of ResourceRequest
* @param max number of ResourceRequest to print
* @return the printed one line string
*/
public static String prettyPrintRequests(List<ResourceRequest> response, int max) {
StringBuilder builder = new StringBuilder();
for (ResourceRequest rr : response) {
builder.append("[id:").append(rr.getAllocationRequestId())
.append(" loc:")
.append(rr.getResourceName())
.append(" num:")
.append(rr.getNumContainers())
.append(" pri:")
.append(((rr.getPriority() != null) ? rr.getPriority().getPriority() : -1))
.append("], ");
if (max != -1) {
if (max-- <= 0) {
break;
}
}
}
return builder.toString();
} | 3.68 |
flink_AccumulatorRegistry_getSnapshot | /**
* Creates a snapshot of this accumulator registry.
*
* @return a serialized accumulator map
*/
public AccumulatorSnapshot getSnapshot() {
try {
return new AccumulatorSnapshot(jobID, taskID, userAccumulators);
} catch (Throwable e) {
LOG.warn("Failed to serialize accumulators for task.", e);
return null;
}
} | 3.68 |
rocketmq-connect_AbstractStateManagementService_initialize | /**
* initialize cb config
*
* @param config
*/
@Override
public void initialize(WorkerConfig config, RecordConverter converter) {
// set config
this.converter = converter;
this.converter.configure(new HashMap<>());
this.statusTopic = config.getConnectStatusTopic();
this.dataSynchronizer = initializationDataSynchronizer(config);
new BrokerBasedLog(config,
this.statusTopic,
ConnectUtil.createGroupName(statusManagePrefix, config.getWorkerId()),
new StatusChangeCallback(),
Serdes.serdeFrom(String.class),
Serdes.serdeFrom(byte[].class),
enabledCompactTopic()
);
} | 3.68 |
hbase_ColumnFamilyDescriptorBuilder_setEncryptionKey | /**
* Set the raw crypto key attribute for the family
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setEncryptionKey(byte[] keyBytes) {
return setValue(ENCRYPTION_KEY_BYTES, new Bytes(keyBytes));
} | 3.68 |
hadoop_OBSCommonUtils_maybeAddBeginningSlash | /**
* Add obs key started '/'.
*
* @param key object key
* @return new key
*/
static String maybeAddBeginningSlash(final String key) {
return !StringUtils.isEmpty(key) && !key.startsWith("/")
? "/" + key
: key;
} | 3.68 |
hbase_TablePermission_implies | /**
* Checks if this permission grants access to perform the given action on the given table and key
* value.
* @param table the table on which the operation is being performed
* @param kv the KeyValue on which the operation is being requested
* @param action the action requested
* @return <code>true</code> if the action is allowed over the given scope by this permission,
* otherwise <code>false</code>
*/
public boolean implies(TableName table, KeyValue kv, Action action) {
if (failCheckTable(table)) {
return false;
}
if (family != null && !CellUtil.matchingFamily(kv, family)) {
return false;
}
if (qualifier != null && !CellUtil.matchingQualifier(kv, qualifier)) {
return false;
}
// check actions
return super.implies(action);
} | 3.68 |
querydsl_Expressions_mapPath | /**
* Create a new Path expression
*
* @param keyType key type
* @param valueType value type
* @param queryType value expression type
* @param metadata path metadata
* @param <K> key type
* @param <V> value type
* @param <E> value expression type
* @return path expression
*/
public static <K, V, E extends SimpleExpression<? super V>> MapPath<K,V,E> mapPath(Class<? super K> keyType,
Class<? super V> valueType,
Class<E> queryType,
PathMetadata metadata) {
return new MapPath<K,V,E>(keyType, valueType, queryType, metadata);
} | 3.68 |
framework_VComboBox_setEmptySelectionCaption | /**
* Sets the empty selection caption for this VComboBox. The text is
* displayed in the text input when nothing is selected.
*
* @param emptySelectionCaption
* the empty selection caption
*
* @since 8.0.7
*/
public void setEmptySelectionCaption(String emptySelectionCaption) {
this.emptySelectionCaption = emptySelectionCaption;
if (selectedOptionKey == null) {
setText(emptySelectionCaption);
}
} | 3.68 |
hbase_Mutation_size | /**
* Number of KeyValues carried by this Mutation.
* @return the total number of KeyValues
*/
public int size() {
int size = 0;
for (List<Cell> cells : getFamilyCellMap().values()) {
size += cells.size();
}
return size;
} | 3.68 |
flink_RecoverableMultiPartUploadImpl_snapshotAndGetRecoverable | /**
* Creates a snapshot of this MultiPartUpload, from which the upload can be resumed.
*
* <p>Data buffered locally which is less than {@link
* org.apache.flink.fs.s3.common.FlinkS3FileSystem#S3_MULTIPART_MIN_PART_SIZE
* S3_MULTIPART_MIN_PART_SIZE}, and cannot be uploaded as part of the MPU and set to S3 as
* independent objects.
*
* <p>This implementation currently blocks until all part uploads are complete and returns a
* completed future.
*/
@Override
public S3Recoverable snapshotAndGetRecoverable(
@Nullable final RefCountedFSOutputStream incompletePartFile) throws IOException {
final String incompletePartObjectName = safelyUploadSmallPart(incompletePartFile);
// make sure all other uploads are complete
// this currently makes the method blocking,
// to be made non-blocking in the future
awaitPendingPartsUpload();
final String objectName = currentUploadInfo.getObjectName();
final String uploadId = currentUploadInfo.getUploadId();
final List<PartETag> completedParts = currentUploadInfo.getCopyOfEtagsOfCompleteParts();
final long sizeInBytes = currentUploadInfo.getExpectedSizeInBytes();
if (incompletePartObjectName == null) {
return new S3Recoverable(objectName, uploadId, completedParts, sizeInBytes);
} else {
return new S3Recoverable(
objectName,
uploadId,
completedParts,
sizeInBytes,
incompletePartObjectName,
incompletePartFile.getPos());
}
} | 3.68 |
hbase_BackupInfo_compareTo | /**
* We use only time stamps to compare objects during sort operation
*/
@Override
public int compareTo(BackupInfo o) {
Long thisTS =
Long.valueOf(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1));
Long otherTS = Long.valueOf(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1));
return thisTS.compareTo(otherTS);
} | 3.68 |
flink_CatalogManager_getBuiltInCatalogName | /**
* Gets the built-in catalog name. The built-in catalog is used for storing all non-serializable
* transient meta-objects.
*
* @return the built-in catalog name
*/
public String getBuiltInCatalogName() {
return builtInCatalogName;
} | 3.68 |
hudi_HoodieFlinkWriteClient_insertOverwrite | /**
* Removes all existing records from the partitions affected and inserts the given HoodieRecords, into the table.
*
* @param records HoodieRecords to insert
* @param instantTime Instant time of the commit
* @return list of WriteStatus to inspect errors and counts
*/
public List<WriteStatus> insertOverwrite(
List<HoodieRecord<T>> records, final String instantTime) {
HoodieTable<T, List<HoodieRecord<T>>, List<HoodieKey>, List<WriteStatus>> table =
initTable(WriteOperationType.INSERT_OVERWRITE, Option.ofNullable(instantTime));
table.validateInsertSchema();
preWrite(instantTime, WriteOperationType.INSERT_OVERWRITE, table.getMetaClient());
// create the write handle if not exists
HoodieWriteMetadata<List<WriteStatus>> result;
try (AutoCloseableWriteHandle closeableHandle = new AutoCloseableWriteHandle(records, instantTime, table, true)) {
result = ((HoodieFlinkTable<T>) table).insertOverwrite(context, closeableHandle.getWriteHandle(), instantTime, records);
}
return postWrite(result, instantTime, table);
} | 3.68 |
flink_LookupCacheManager_unregisterCache | /**
* Release the cache with the given identifier from the manager.
*
* <p>The manager will track a reference count of managed caches, and will close the cache if
* the reference count reaches 0.
*/
public synchronized void unregisterCache(String cacheIdentifier) {
RefCountedCache refCountedCache =
checkNotNull(
managedCaches.get(cacheIdentifier),
"Cache identifier '%s' is not registered",
cacheIdentifier);
if (refCountedCache.release()) {
managedCaches.remove(cacheIdentifier);
}
} | 3.68 |
flink_BlobClient_uploadFiles | /**
* Uploads the JAR files to the {@link PermanentBlobService} of the {@link BlobServer} at the
* given address with HA as configured.
*
* @param serverAddress Server address of the {@link BlobServer}
* @param clientConfig Any additional configuration for the blob client
* @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
* @param files List of files to upload
* @throws IOException if the upload fails
*/
public static List<PermanentBlobKey> uploadFiles(
InetSocketAddress serverAddress,
Configuration clientConfig,
JobID jobId,
List<Path> files)
throws IOException {
checkNotNull(jobId);
if (files.isEmpty()) {
return Collections.emptyList();
} else {
List<PermanentBlobKey> blobKeys = new ArrayList<>();
try (BlobClient blobClient = new BlobClient(serverAddress, clientConfig)) {
for (final Path file : files) {
final PermanentBlobKey key = blobClient.uploadFile(jobId, file);
blobKeys.add(key);
}
}
return blobKeys;
}
} | 3.68 |
hadoop_AbfsConfiguration_getBoolean | /**
* Returns the account-specific value if it exists, then looks for an
* account-agnostic value, and finally tries the default value.
* @param key Account-agnostic configuration key
* @param defaultValue Value returned if none is configured
* @return value if one exists, else the default value
*/
public boolean getBoolean(String key, boolean defaultValue) {
return rawConfig.getBoolean(accountConf(key), rawConfig.getBoolean(key, defaultValue));
} | 3.68 |
framework_HierarchyMapper_doCollapse | /**
* Collapses the given item.
*
* @param item
* the item to collapse
* @param position
* the index of item
*
* @return range of rows removed by collapsing the item
* @deprecated Use {@link #collapse(Object, Integer)} instead.
*/
@Deprecated
public Range doCollapse(T item, Optional<Integer> position) {
return collapse(item, position.orElse(null));
} | 3.68 |
hbase_ZKReplicationQueueStorageForMigration_listAllHFileRefs | /**
* Pair<PeerId, List<HFileRefs>>
*/
@SuppressWarnings("unchecked")
public MigrationIterator<Pair<String, List<String>>> listAllHFileRefs() throws KeeperException {
List<String> peerIds = ZKUtil.listChildrenNoWatch(zookeeper, hfileRefsZNode);
if (peerIds == null || peerIds.isEmpty()) {
ZKUtil.deleteNodeRecursively(zookeeper, hfileRefsZNode);
return EMPTY_ITER;
}
Iterator<String> iter = peerIds.iterator();
return new MigrationIterator<Pair<String, List<String>>>() {
private String previousPeerId;
@Override
public Pair<String, List<String>> next() throws KeeperException {
if (previousPeerId != null) {
ZKUtil.deleteNodeRecursively(zookeeper, getHFileRefsPeerNode(previousPeerId));
}
if (!iter.hasNext()) {
ZKUtil.deleteNodeRecursively(zookeeper, hfileRefsZNode);
return null;
}
String peerId = iter.next();
List<String> refs = ZKUtil.listChildrenNoWatch(zookeeper, getHFileRefsPeerNode(peerId));
previousPeerId = peerId;
return Pair.newPair(peerId, refs != null ? refs : Collections.emptyList());
}
};
} | 3.68 |
hadoop_FsCommand_registerCommands | /**
* Register the command classes used by the fs subcommand
* @param factory where to register the class
*/
public static void registerCommands(CommandFactory factory) {
factory.registerCommands(AclCommands.class);
factory.registerCommands(CopyCommands.class);
factory.registerCommands(Count.class);
factory.registerCommands(Delete.class);
factory.registerCommands(Display.class);
factory.registerCommands(Find.class);
factory.registerCommands(FsShellPermissions.class);
factory.registerCommands(FsUsage.class);
factory.registerCommands(Ls.class);
factory.registerCommands(Mkdir.class);
factory.registerCommands(MoveCommands.class);
factory.registerCommands(SetReplication.class);
factory.registerCommands(Stat.class);
factory.registerCommands(Tail.class);
factory.registerCommands(Head.class);
factory.registerCommands(Test.class);
factory.registerCommands(TouchCommands.class);
factory.registerCommands(Truncate.class);
factory.registerCommands(SnapshotCommands.class);
factory.registerCommands(XAttrCommands.class);
factory.registerCommands(Concat.class);
} | 3.68 |
hadoop_QueueResourceQuotas_getEffectiveMinResource | /*
* Effective Minimum Resource
*/
public Resource getEffectiveMinResource() {
return _get(NL, ResourceType.EFF_MIN_RESOURCE);
} | 3.68 |
hbase_RSGroupUtil_fillTables | /**
* Fill the tables field for {@link RSGroupInfo}, for backward compatibility.
*/
@SuppressWarnings("deprecation")
public static RSGroupInfo fillTables(RSGroupInfo rsGroupInfo, Collection<TableDescriptor> tds) {
RSGroupInfo newRsGroupInfo = new RSGroupInfo(rsGroupInfo);
Predicate<TableDescriptor> filter;
if (rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
filter = td -> {
Optional<String> optGroupName = td.getRegionServerGroup();
return !optGroupName.isPresent() || optGroupName.get().equals(RSGroupInfo.DEFAULT_GROUP);
};
} else {
filter = td -> {
Optional<String> optGroupName = td.getRegionServerGroup();
return optGroupName.isPresent() && optGroupName.get().equals(newRsGroupInfo.getName());
};
}
tds.stream().filter(filter).map(TableDescriptor::getTableName)
.forEach(newRsGroupInfo::addTable);
return newRsGroupInfo;
} | 3.68 |
hbase_RegionLocator_getRegionLocation | /**
* Finds the region with the given replica id on which the given row is being served.
* @param row Row to find.
* @param replicaId the replica id
* @return Location of the row.
* @throws IOException if a remote or network exception occurs
*/
default HRegionLocation getRegionLocation(byte[] row, int replicaId) throws IOException {
return getRegionLocation(row, replicaId, false);
} | 3.68 |
hadoop_CachedDNSToSwitchMapping_cacheResolvedHosts | /**
* Caches the resolved host:rack mappings. The two list
* parameters must be of equal size.
*
* @param uncachedHosts a list of hosts that were uncached
* @param resolvedHosts a list of resolved host entries where the element
* at index(i) is the resolved value for the entry in uncachedHosts[i]
*/
private void cacheResolvedHosts(List<String> uncachedHosts,
List<String> resolvedHosts) {
// Cache the result
if (resolvedHosts != null) {
for (int i=0; i<uncachedHosts.size(); i++) {
cache.put(uncachedHosts.get(i), resolvedHosts.get(i));
}
}
} | 3.68 |
hbase_VersionModel_getJVMVersion | /** Returns the JVM vendor and version */
@XmlAttribute(name = "JVM")
public String getJVMVersion() {
return jvmVersion;
} | 3.68 |
dubbo_RpcStatus_getAverageTps | /**
* Calculate average TPS (Transaction per second).
*
* @return tps
*/
public long getAverageTps() {
if (getTotalElapsed() >= 1000L) {
return getTotal() / (getTotalElapsed() / 1000L);
}
return getTotal();
} | 3.68 |
hbase_HBaseTestingUtility_createPreSplitLoadTestTable | /**
* Creates a pre-split table for load testing. If the table already exists, logs a warning and
* continues.
* @return the number of regions the table was split into
*/
public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor td,
ColumnFamilyDescriptor[] cds, SplitAlgorithm splitter, int numRegionsPerServer)
throws IOException {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(td);
for (ColumnFamilyDescriptor cd : cds) {
if (!td.hasColumnFamily(cd.getName())) {
builder.setColumnFamily(cd);
}
}
td = builder.build();
int totalNumberOfRegions = 0;
Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
Admin admin = unmanagedConnection.getAdmin();
try {
// create a table a pre-splits regions.
// The number of splits is set as:
// region servers * regions per region server).
int numberOfServers = admin.getRegionServers().size();
if (numberOfServers == 0) {
throw new IllegalStateException("No live regionservers");
}
totalNumberOfRegions = numberOfServers * numRegionsPerServer;
LOG.info("Number of live regionservers: " + numberOfServers + ", "
+ "pre-splitting table into " + totalNumberOfRegions + " regions " + "(regions per server: "
+ numRegionsPerServer + ")");
byte[][] splits = splitter.split(totalNumberOfRegions);
admin.createTable(td, splits);
} catch (MasterNotRunningException e) {
LOG.error("Master not running", e);
throw new IOException(e);
} catch (TableExistsException e) {
LOG.warn("Table " + td.getTableName() + " already exists, continuing");
} finally {
admin.close();
unmanagedConnection.close();
}
return totalNumberOfRegions;
} | 3.68 |
framework_VTabsheetBase_getConnectorForWidget | /**
* For internal use only. May be removed or replaced in the future.
*
* @param widget
* the widget whose connector to find
* @return the connector
*/
protected ComponentConnector getConnectorForWidget(Widget widget) {
return ConnectorMap.get(client).getConnector(widget);
} | 3.68 |
hbase_MasterObserver_postCompletedCreateTableAction | /**
* Called after the createTable operation has been requested. Called as part of create table RPC
* call. Called as part of create table procedure and it is async to the create RPC call.
* @param ctx the environment to interact with the framework and master
* @param desc the TableDescriptor for the table
* @param regions the initial regions created for the table
*/
default void postCompletedCreateTableAction(
final ObserverContext<MasterCoprocessorEnvironment> ctx, final TableDescriptor desc,
final RegionInfo[] regions) throws IOException {
} | 3.68 |
hadoop_CsiGrpcClient_createNodeBlockingStub | /**
* Creates a blocking stub for CSI node plugin on the given channel.
* @return the blocking stub
*/
public NodeGrpc.NodeBlockingStub createNodeBlockingStub() {
return NodeGrpc.newBlockingStub(channel);
} | 3.68 |
hbase_MetricsConnection_addThreadPools | /** Add thread pools of additional connections to the metrics */
private void addThreadPools(Supplier<ThreadPoolExecutor> batchPool,
Supplier<ThreadPoolExecutor> metaPool) {
batchPools.add(batchPool);
metaPools.add(metaPool);
} | 3.68 |
hudi_AvroSchemaCompatibility_checkReaderWriterCompatibility | /**
* Validates that the provided reader schema can be used to decode avro data
* written with the provided writer schema.
*
* @param reader schema to check.
* @param writer schema to check.
* @return a result object identifying any compatibility errors.
*/
public static SchemaPairCompatibility checkReaderWriterCompatibility(final Schema reader,
final Schema writer,
boolean checkNamingOverride) {
final SchemaCompatibilityResult compatibility =
new ReaderWriterCompatibilityChecker(checkNamingOverride).getCompatibility(reader, writer);
final String message;
switch (compatibility.getCompatibility()) {
case INCOMPATIBLE: {
message = String.format(
"Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n",
writer.toString(true), reader.toString(true));
break;
}
case COMPATIBLE: {
message = READER_WRITER_COMPATIBLE_MESSAGE;
break;
}
default:
throw new AvroRuntimeException("Unknown compatibility: " + compatibility);
}
return new SchemaPairCompatibility(compatibility, reader, writer, message);
} | 3.68 |
hudi_BaseHoodieWriteClient_scheduleClusteringAtInstant | /**
* Schedules a new clustering instant with passed-in instant time.
* @param instantTime clustering Instant Time
* @param extraMetadata Extra Metadata to be stored
*/
public boolean scheduleClusteringAtInstant(String instantTime, Option<Map<String, String>> extraMetadata) throws HoodieIOException {
return scheduleTableService(instantTime, extraMetadata, TableServiceType.CLUSTER).isPresent();
} | 3.68 |
querydsl_MetaDataExporter_setExportTables | /**
* Set whether tables should be exported
*
* @param exportTables
*/
public void setExportTables(boolean exportTables) {
this.exportTables = exportTables;
} | 3.68 |
flink_MemoryStateBackend_configure | /**
* Creates a copy of this state backend that uses the values defined in the configuration for
* fields where that were not specified in this state backend.
*
* @param config The configuration
* @param classLoader The class loader
* @return The re-configured variant of the state backend
*/
@Override
public MemoryStateBackend configure(ReadableConfig config, ClassLoader classLoader) {
return new MemoryStateBackend(this, config, classLoader);
} | 3.68 |
morf_ParallelQueryHint_getDegreeOfParallelism | /**
* @return the degree of parallelism for this PARALLEL query hint.
*/
public Optional<Integer> getDegreeOfParallelism() {
return Optional.ofNullable(degreeOfParallelism);
} | 3.68 |
hudi_HoodieEmptyRecord_readRecordPayload | /**
* NOTE: This method is declared final to make sure there's no polymorphism and therefore
* JIT compiler could perform more aggressive optimizations
*/
@Override
protected final T readRecordPayload(Kryo kryo, Input input) {
this.type = kryo.readObject(input, HoodieRecordType.class);
this.orderingVal = (Comparable<?>) kryo.readClassAndObject(input);
// NOTE: [[EmptyRecord]]'s payload is always null
return null;
} | 3.68 |
morf_ResultSetIterator_remove | /**
* @see java.util.Iterator#remove()
*/
@Override
public void remove() {
throw new UnsupportedOperationException("Cannot remove items from a result set iterator");
} | 3.68 |
framework_AbstractComponentConnector_onDropTargetAttached | /**
* Invoked when a {@link DropTargetExtensionConnector} has been attached to
* this component.
* <p>
* By default, does nothing. If you need to apply some changes to the
* widget, override this method.
* <p>
* This is a framework internal method, and should not be invoked manually.
*
* @since 8.1
* @see #onDropTargetDetached()
*/
public void onDropTargetAttached() {
} | 3.68 |
hadoop_DiskBalancerWorkStatus_setWorkItem | /**
* Sets the work item.
*
* @param workItem - sets the work item information
*/
public void setWorkItem(DiskBalancerWorkItem workItem) {
this.workItem = workItem;
} | 3.68 |
hbase_MasterProcedureScheduler_wakeTableSharedLock | /**
* Wake the procedures waiting for the specified table
* @param procedure the procedure releasing the lock
* @param table the name of the table that has the shared lock
*/
public void wakeTableSharedLock(final Procedure<?> procedure, final TableName table) {
schedLock();
try {
final LockAndQueue namespaceLock = locking.getNamespaceLock(table.getNamespaceAsString());
final LockAndQueue tableLock = locking.getTableLock(table);
int waitingCount = 0;
if (tableLock.releaseSharedLock()) {
addToRunQueue(tableRunQueue, getTableQueue(table),
() -> procedure + " released the shared lock");
waitingCount += wakeWaitingProcedures(tableLock);
}
if (namespaceLock.releaseSharedLock()) {
waitingCount += wakeWaitingProcedures(namespaceLock);
}
wakePollIfNeeded(waitingCount);
} finally {
schedUnlock();
}
} | 3.68 |
zxing_MatrixToImageWriter_toBufferedImage | /**
* As {@link #toBufferedImage(BitMatrix)}, but allows customization of the output.
*
* @param matrix {@link BitMatrix} to write
* @param config output configuration
* @return {@link BufferedImage} representation of the input
*/
public static BufferedImage toBufferedImage(BitMatrix matrix, MatrixToImageConfig config) {
int width = matrix.getWidth();
int height = matrix.getHeight();
BufferedImage image = new BufferedImage(width, height, config.getBufferedImageColorModel());
int onColor = config.getPixelOnColor();
int offColor = config.getPixelOffColor();
int[] rowPixels = new int[width];
BitArray row = new BitArray(width);
for (int y = 0; y < height; y++) {
row = matrix.getRow(y, row);
for (int x = 0; x < width; x++) {
rowPixels[x] = row.get(x) ? onColor : offColor;
}
image.setRGB(0, y, width, 1, rowPixels, 0, width);
}
return image;
} | 3.68 |
framework_VaadinService_initDependencyFilters | /**
* Updates the list of resource dependency filters to use for the
* application.
* <p>
* The filters can freely update the dependencies in any way they see fit
* (bundle, rewrite, merge).
* <p>
* The framework collects filters from the {@link SessionInitEvent} where
* session init listeners can add them. This method is called with the
* combined list to optionally modify it, and the result is then stored by
* the caller as the final list to use.
* <p>
* The filters are called in the order the session init listeners are
* called, which is undefined. If you need a specific order, you can
* override this method and alter the order.
*
* @since 8.1
* @param sessionInitFilters
* a list of dependency filters collected from the session init
* event
* @return the list of dependency filters to use for filtering resources,
* not null
* @throws ServiceException
* if something went wrong while determining the filters
*
*/
protected List<DependencyFilter> initDependencyFilters(
List<DependencyFilter> sessionInitFilters) throws ServiceException {
assert sessionInitFilters != null;
return sessionInitFilters;
} | 3.68 |
hadoop_TimelineEntity_getPrimaryFilters | /**
* Get the primary filters
*
* @return the primary filters
*/
public Map<String, Set<Object>> getPrimaryFilters() {
return primaryFilters;
} | 3.68 |
hudi_WriteProfiles_getCommitMetadataSafely | /**
* Returns the commit metadata of the given instant safely.
*
* @param tableName The table name
* @param basePath The table base path
* @param instant The hoodie instant
* @param timeline The timeline
* @return the commit metadata or empty if any error occurs
*/
public static Option<HoodieCommitMetadata> getCommitMetadataSafely(
String tableName,
Path basePath,
HoodieInstant instant,
HoodieTimeline timeline) {
try {
byte[] data = timeline.getInstantDetails(instant).get();
return Option.of(HoodieCommitMetadata.fromBytes(data, HoodieCommitMetadata.class));
} catch (FileNotFoundException fe) {
// make this fail safe.
LOG.warn("Instant {} was deleted by the cleaner, ignore", instant.getTimestamp());
return Option.empty();
} catch (Throwable throwable) {
LOG.error("Get write metadata for table {} with instant {} and path: {} error",
tableName, instant.getTimestamp(), basePath);
return Option.empty();
}
} | 3.68 |
hadoop_WebServlet_doGet | /**
* Get method is modified to support impersonation and Kerberos
* SPNEGO token by forcing client side redirect when accessing
* "/" (root) of the web application context.
*/
@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
if (request.getRequestURI().equals("/")) {
StringBuilder location = new StringBuilder();
location.append("index.html");
if (request.getQueryString()!=null) {
// echo query string but prevent HTTP response splitting
location.append("?");
location.append(request.getQueryString()
.replaceAll("\n", "").replaceAll("\r", ""));
}
response.sendRedirect(location.toString());
} else {
super.doGet(request, response);
}
} | 3.68 |
morf_AbstractSqlDialectTest_expectedUpdateUsingSourceTableInDifferentSchema | /**
* @return The expected SQL for performing an update with a source table which lives in a different schema.
*/
protected String expectedUpdateUsingSourceTableInDifferentSchema() {
return "UPDATE " + tableName("FloatingRateRate") + " A SET settlementFrequency = (SELECT settlementFrequency FROM MYSCHEMA.FloatingRateDetail B WHERE (A.floatingRateDetailId = B.id))";
} | 3.68 |
framework_ComboBoxElement_sendKeys | /**
* Use this method to simulate typing into an element, which may set its
* value.
*
* @param delay
* delay after sending each individual key (mainly needed for
* PhantomJS)
* @param keysToSend
* keys to type into the element
*/
public void sendKeys(int delay, CharSequence... keysToSend) {
WebElement input = getInputField();
for (CharSequence key : keysToSend) {
input.sendKeys(key);
try {
Thread.sleep(delay);
} catch (InterruptedException e) {
}
}
} | 3.68 |
querydsl_GeometryExpressions_ymin | /**
* Returns Y minima of a bounding box 2d or 3d or a geometry.
*
* @param expr geometry
* @return y minima
*/
public static NumberExpression<Double> ymin(GeometryExpression<?> expr) {
return Expressions.numberOperation(Double.class, SpatialOps.YMIN, expr);
} | 3.68 |
MagicPlugin_Wand_updateHotbarCount | // Update the hotbars inventory list to match the most recently configured value
// This will be followed with checkHotbarCount, after the inventories have been built
// This catches the case of the hotbar count having changed so we can preserve the location
// of spells in the main inventories.
protected void updateHotbarCount() {
int hotbarCount = 0;
if (hasProperty("hotbar_inventory_count")) {
hotbarCount = Math.max(1, getInt("hotbar_inventory_count", 1));
} else {
hotbarCount = getHotbarCount();
}
if (hotbarCount != hotbars.size()) {
if (isInventoryOpen()) {
closeInventory();
}
hotbars.clear();
while (hotbars.size() < hotbarCount) {
hotbars.add(new WandInventory(HOTBAR_INVENTORY_SIZE));
}
while (hotbars.size() > hotbarCount) {
hotbars.remove(0);
}
}
} | 3.68 |
morf_AliasedField_shallowCopy | /**
* Creates a shallow copy of the element, applying the alias.
*
* @param aliasName New alias.
* @return The shallow copy.
*/
protected AliasedField shallowCopy(String aliasName) {
throw new UnsupportedOperationException("Not refactored");
} | 3.68 |
flink_FlinkAggregateExpandDistinctAggregatesRule_convertMonopole | /**
* Converts an aggregate relational expression that contains just one distinct aggregate
* function (or perhaps several over the same arguments) and no non-distinct aggregate
* functions.
*/
private RelBuilder convertMonopole(
RelBuilder relBuilder, Aggregate aggregate, List<Integer> argList, int filterArg) {
// For example,
// SELECT deptno, COUNT(DISTINCT sal), SUM(DISTINCT sal)
// FROM emp
// GROUP BY deptno
//
// becomes
//
// SELECT deptno, COUNT(distinct_sal), SUM(distinct_sal)
// FROM (
// SELECT DISTINCT deptno, sal AS distinct_sal
// FROM EMP GROUP BY deptno)
// GROUP BY deptno
// Project the columns of the GROUP BY plus the arguments
// to the agg function.
final Map<Integer, Integer> sourceOf = new HashMap<>();
createSelectDistinct(relBuilder, aggregate, argList, filterArg, sourceOf);
// Create an aggregate on top, with the new aggregate list.
final List<AggregateCall> newAggCalls =
com.google.common.collect.Lists.newArrayList(aggregate.getAggCallList());
rewriteAggCalls(newAggCalls, argList, sourceOf);
final int cardinality = aggregate.getGroupSet().cardinality();
relBuilder.push(
aggregate.copy(
aggregate.getTraitSet(),
relBuilder.build(),
ImmutableBitSet.range(cardinality),
null,
newAggCalls));
return relBuilder;
} | 3.68 |
zxing_Code93Writer_encode | /**
* @param contents barcode contents to encode. It should not be encoded for extended characters.
* @return a {@code boolean[]} of horizontal pixels (false = white, true = black)
*/
@Override
public boolean[] encode(String contents) {
contents = convertToExtended(contents);
int length = contents.length();
if (length > 80) {
throw new IllegalArgumentException("Requested contents should be less than 80 digits long after " +
"converting to extended encoding, but got " + length);
}
//length of code + 2 start/stop characters + 2 checksums, each of 9 bits, plus a termination bar
int codeWidth = (contents.length() + 2 + 2) * 9 + 1;
boolean[] result = new boolean[codeWidth];
//start character (*)
int pos = appendPattern(result, 0, Code93Reader.ASTERISK_ENCODING);
for (int i = 0; i < length; i++) {
int indexInString = Code93Reader.ALPHABET_STRING.indexOf(contents.charAt(i));
pos += appendPattern(result, pos, Code93Reader.CHARACTER_ENCODINGS[indexInString]);
}
//add two checksums
int check1 = computeChecksumIndex(contents, 20);
pos += appendPattern(result, pos, Code93Reader.CHARACTER_ENCODINGS[check1]);
//append the contents to reflect the first checksum added
contents += Code93Reader.ALPHABET_STRING.charAt(check1);
int check2 = computeChecksumIndex(contents, 15);
pos += appendPattern(result, pos, Code93Reader.CHARACTER_ENCODINGS[check2]);
//end character (*)
pos += appendPattern(result, pos, Code93Reader.ASTERISK_ENCODING);
//termination bar (single black bar)
result[pos] = true;
return result;
} | 3.68 |
hudi_CleanPlanner_hasPendingFiles | /**
* Returns whether there are uncommitted data files under the given partition,
* the pending files are generated by the inflight instants and maybe ready to commit,
* the partition can not be deleted as a whole if any pending file exists.
*
* <p>IMPORTANT: {@code fsView.getAllFileGroups} does not return pending file groups for metadata table,
* file listing must be used instead.
*/
private boolean hasPendingFiles(String partitionPath) {
try {
HoodieTableFileSystemView fsView = new HoodieTableFileSystemView(hoodieTable.getMetaClient(), hoodieTable.getActiveTimeline());
Path fullPartitionPath = new Path(hoodieTable.getMetaClient().getBasePathV2(), partitionPath);
fsView.addFilesToView(FSUtils.getAllDataFilesInPartition(hoodieTable.getMetaClient().getFs(), fullPartitionPath));
// use #getAllFileGroups(partitionPath) instead of #getAllFileGroups() to exclude the replaced file groups.
return fsView.getAllFileGroups(partitionPath).findAny().isPresent();
} catch (Exception ex) {
// if any exception throws, assume there are existing pending files
LOG.warn("Error while checking the pending files under partition: " + partitionPath + ", assumes the files exist", ex);
return true;
}
} | 3.68 |
flink_NFAStateNameHandler_clear | /** Clear the names added during checking name uniqueness. */
public void clear() {
usedNames.clear();
} | 3.68 |
graphhopper_Path_getTime | /**
* @return time in millis
*/
public long getTime() {
return time;
} | 3.68 |
hadoop_FileIoProvider_move | /**
* Move the src file to the target using
* {@link Files#move(Path, Path, CopyOption...)}.
*
* @param volume target volume. null if unavailable.
* @param src source path.
* @param target target path.
* @param options See {@link Files#move} for a description
* of the options.
* @throws IOException
*/
public void move(
@Nullable FsVolumeSpi volume, Path src, Path target,
CopyOption... options) throws IOException {
final long begin = profilingEventHook.beforeMetadataOp(volume, MOVE);
try {
faultInjectorEventHook.beforeMetadataOp(volume, MOVE);
Files.move(src, target, options);
profilingEventHook.afterMetadataOp(volume, MOVE, begin);
} catch(Exception e) {
onFailure(volume, begin);
throw e;
}
} | 3.68 |
flink_RocksDBResourceContainer_getColumnOptions | /** Gets the RocksDB {@link ColumnFamilyOptions} to be used for all RocksDB instances. */
public ColumnFamilyOptions getColumnOptions() {
// initial options from common profile
ColumnFamilyOptions opt = createBaseCommonColumnOptions();
handlesToClose.add(opt);
// load configurable options on top of pre-defined profile
setColumnFamilyOptionsFromConfigurableOptions(opt, handlesToClose);
// add user-defined options, if specified
if (optionsFactory != null) {
opt = optionsFactory.createColumnOptions(opt, handlesToClose);
}
// if sharedResources is non-null, use the block cache from it and
// set necessary options for performance consideration with memory control
if (sharedResources != null) {
final RocksDBSharedResources rocksResources = sharedResources.getResourceHandle();
final Cache blockCache = rocksResources.getCache();
TableFormatConfig tableFormatConfig = opt.tableFormatConfig();
BlockBasedTableConfig blockBasedTableConfig;
if (tableFormatConfig == null) {
blockBasedTableConfig = new BlockBasedTableConfig();
} else {
Preconditions.checkArgument(
tableFormatConfig instanceof BlockBasedTableConfig,
"We currently only support BlockBasedTableConfig When bounding total memory.");
blockBasedTableConfig = (BlockBasedTableConfig) tableFormatConfig;
}
if (rocksResources.isUsingPartitionedIndexFilters()
&& overwriteFilterIfExist(blockBasedTableConfig)) {
blockBasedTableConfig.setIndexType(IndexType.kTwoLevelIndexSearch);
blockBasedTableConfig.setPartitionFilters(true);
blockBasedTableConfig.setPinTopLevelIndexAndFilter(true);
}
blockBasedTableConfig.setBlockCache(blockCache);
blockBasedTableConfig.setCacheIndexAndFilterBlocks(true);
blockBasedTableConfig.setCacheIndexAndFilterBlocksWithHighPriority(true);
blockBasedTableConfig.setPinL0FilterAndIndexBlocksInCache(true);
opt.setTableFormatConfig(blockBasedTableConfig);
}
return opt;
} | 3.68 |
hudi_HoodieWriteCommitPulsarCallbackConfig_setCallbackPulsarConfigIfNeeded | /**
* Set default value for {@link HoodieWriteCommitPulsarCallbackConfig} if needed.
*/
public static void setCallbackPulsarConfigIfNeeded(HoodieConfig config) {
config.setDefaultValue(PRODUCER_ROUTE_MODE);
config.setDefaultValue(OPERATION_TIMEOUT);
config.setDefaultValue(CONNECTION_TIMEOUT);
config.setDefaultValue(REQUEST_TIMEOUT);
config.setDefaultValue(KEEPALIVE_INTERVAL);
config.setDefaultValue(PRODUCER_SEND_TIMEOUT);
config.setDefaultValue(PRODUCER_PENDING_QUEUE_SIZE);
config.setDefaultValue(PRODUCER_PENDING_SIZE);
config.setDefaultValue(PRODUCER_BLOCK_QUEUE_FULL);
} | 3.68 |
framework_BeanValidator_createContext | /**
* Creates a simple message interpolation context based on the given
* constraint violation.
*
* @param violation
* the constraint violation
* @return the message interpolation context
*/
protected Context createContext(ConstraintViolation<?> violation) {
return new ContextImpl(violation);
} | 3.68 |
morf_SqlServerMetaDataProvider_fetchIdentityColumns | /**
* Fetch the column extended properties
*/
private void fetchIdentityColumns() {
try {
Statement statement = connection.createStatement();
try {
ResultSet resultSet = statement.executeQuery(String.format(AUTONUM_START_QUERY, schemaName));
try {
while (resultSet.next()) {
String tableName = resultSet.getString(1);
String columnName = resultSet.getString(2);
if (!identityColumns.containsKey(tableName)) {
identityColumns.put(tableName, new HashMap<String, Integer>());
}
identityColumns.get(tableName).put(columnName, resultSet.getInt(3));
}
} finally {
resultSet.close();
}
} finally {
statement.close();
}
} catch (SQLException e) {
throw new RuntimeSqlException("Error fetching identity columns", e);
}
} | 3.68 |
framework_SerializerHelper_writeClass | /**
* Serializes the class reference so {@link #readClass(ObjectInputStream)}
* can deserialize it. Supports null class references.
*
* @param out
* The {@link ObjectOutputStream} to serialize to.
* @param cls
* A class or null.
* @throws IOException
* Rethrows any IOExceptions from the ObjectOutputStream
*/
public static void writeClass(ObjectOutputStream out, Class<?> cls)
throws IOException {
if (cls == null) {
out.writeObject(null);
} else {
out.writeObject(cls.getName());
}
} | 3.68 |
cron-utils_CronDefinitionBuilder_withCronValidation | /**
* Adds a cron validation.
* @param validation - constraint validation
* @return this CronDefinitionBuilder instance
*/
public CronDefinitionBuilder withCronValidation(final CronConstraint validation) {
cronConstraints.add(validation);
return this;
} | 3.68 |
zxing_VCardResultParser_formatNames | /**
* Formats name fields of the form "Public;John;Q.;Reverend;III" into a form like
* "Reverend John Q. Public III".
*
* @param names name values to format, in place
*/
private static void formatNames(Iterable<List<String>> names) {
if (names != null) {
for (List<String> list : names) {
String name = list.get(0);
String[] components = new String[5];
int start = 0;
int end;
int componentIndex = 0;
while (componentIndex < components.length - 1 && (end = name.indexOf(';', start)) >= 0) {
components[componentIndex] = name.substring(start, end);
componentIndex++;
start = end + 1;
}
components[componentIndex] = name.substring(start);
StringBuilder newName = new StringBuilder(100);
maybeAppendComponent(components, 3, newName);
maybeAppendComponent(components, 1, newName);
maybeAppendComponent(components, 2, newName);
maybeAppendComponent(components, 0, newName);
maybeAppendComponent(components, 4, newName);
list.set(0, newName.toString().trim());
}
}
} | 3.68 |
framework_Window_setWindowMode | /**
* Sets the mode for the window.
*
* @see WindowMode
* @param windowMode
* The new mode
*/
public void setWindowMode(WindowMode windowMode) {
if (windowMode != getWindowMode()) {
getState().windowMode = windowMode;
fireWindowWindowModeChange();
}
} | 3.68 |
dubbo_ServiceBean_getServiceClass | // merged from dubbox
@Override
protected Class getServiceClass(T ref) {
if (AopUtils.isAopProxy(ref)) {
return AopUtils.getTargetClass(ref);
}
return super.getServiceClass(ref);
} | 3.68 |
morf_AbstractSelectStatement_getFields | /**
* Gets the list of fields
*
* @return the fields
*/
public List<AliasedField> getFields() {
return fields;
} | 3.68 |
hbase_AsyncAdminBuilder_setRetryPauseForCQTBE | /**
* Set the base pause time for retrying when {@link HBaseServerException#isServerOverloaded()}. We
* use an exponential policy to generate sleep time from this base when retrying.
* <p/>
* This value should be greater than the normal pause value which could be set with the above
* {@link #setRetryPause(long, TimeUnit)} method, as usually
* {@link HBaseServerException#isServerOverloaded()} means the server is overloaded. We just use
* the normal pause value for {@link HBaseServerException#isServerOverloaded()} if here you
* specify a smaller value.
* @see #setRetryPause(long, TimeUnit)
* @deprecated Since 2.5.0, will be removed in 4.0.0. Please use
* {@link #setRetryPauseForServerOverloaded(long, TimeUnit)} instead.
*/
@Deprecated
default AsyncAdminBuilder setRetryPauseForCQTBE(long pause, TimeUnit unit) {
return setRetryPauseForServerOverloaded(pause, unit);
} | 3.68 |
flink_RocksNativeFullSnapshotStrategy_uploadSnapshotFiles | /** upload files and return total uploaded size. */
private long uploadSnapshotFiles(
@Nonnull List<HandleAndLocalPath> privateFiles,
@Nonnull CloseableRegistry snapshotCloseableRegistry,
@Nonnull CloseableRegistry tmpResourcesRegistry)
throws Exception {
// write state data
Preconditions.checkState(localBackupDirectory.exists());
Path[] files = localBackupDirectory.listDirectory();
long uploadedSize = 0;
if (files != null) {
// all sst files are private in full snapshot
List<HandleAndLocalPath> uploadedFiles =
stateUploader.uploadFilesToCheckpointFs(
Arrays.asList(files),
checkpointStreamFactory,
CheckpointedStateScope.EXCLUSIVE,
snapshotCloseableRegistry,
tmpResourcesRegistry);
uploadedSize += uploadedFiles.stream().mapToLong(e -> e.getStateSize()).sum();
privateFiles.addAll(uploadedFiles);
}
return uploadedSize;
} | 3.68 |
hbase_DefaultVisibilityLabelServiceImpl_createModifiedVisExpression | /**
* - all the visibility tags associated with the current Cell
* @return - the modified visibility expression as byte[]
*/
private byte[] createModifiedVisExpression(final List<Tag> tags) throws IOException {
StringBuilder visibilityString = new StringBuilder();
for (Tag tag : tags) {
if (tag.getType() == TagType.VISIBILITY_TAG_TYPE) {
if (visibilityString.length() != 0) {
visibilityString.append(VisibilityConstants.CLOSED_PARAN)
.append(VisibilityConstants.OR_OPERATOR);
}
int offset = tag.getValueOffset();
int endOffset = offset + tag.getValueLength();
boolean expressionStart = true;
while (offset < endOffset) {
Pair<Integer, Integer> result = TagUtil.readVIntValuePart(tag, offset);
int currLabelOrdinal = result.getFirst();
if (currLabelOrdinal < 0) {
int temp = -currLabelOrdinal;
String label = this.labelsCache.getLabel(temp);
if (expressionStart) {
// Quote every label in case of unicode characters if present
visibilityString.append(VisibilityConstants.OPEN_PARAN)
.append(VisibilityConstants.NOT_OPERATOR).append(CellVisibility.quote(label));
} else {
visibilityString.append(VisibilityConstants.AND_OPERATOR)
.append(VisibilityConstants.NOT_OPERATOR).append(CellVisibility.quote(label));
}
} else {
String label = this.labelsCache.getLabel(currLabelOrdinal);
if (expressionStart) {
visibilityString.append(VisibilityConstants.OPEN_PARAN)
.append(CellVisibility.quote(label));
} else {
visibilityString.append(VisibilityConstants.AND_OPERATOR)
.append(CellVisibility.quote(label));
}
}
expressionStart = false;
offset += result.getSecond();
}
}
}
if (visibilityString.length() != 0) {
visibilityString.append(VisibilityConstants.CLOSED_PARAN);
// Return the string formed as byte[]
return Bytes.toBytes(visibilityString.toString());
}
return null;
} | 3.68 |
pulsar_ModularLoadManagerImpl_stop | /**
* As any broker, stop the load manager.
*
* @throws PulsarServerException
* If an unexpected error occurred when attempting to stop the load manager.
*/
@Override
public void stop() throws PulsarServerException {
executors.shutdownNow();
try {
brokersData.close();
} catch (Exception e) {
log.warn("Failed to release broker lock: {}", e.getMessage());
}
} | 3.68 |
morf_NamedParameterPreparedStatement_setBlob | /**
* Sets the value of a named blob parameter.
*
* @param parameter the parameter metadata.
* @param value the parameter value.
* @return this, for method chaining
* @exception SQLException if an error occurs when setting the parameter
*/
public NamedParameterPreparedStatement setBlob(SqlParameter parameter, final byte[] value) throws SQLException {
forEachOccurrenceOfParameter(parameter, new Operation() {
@Override
public void apply(int parameterIndex) throws SQLException {
Blob blob = statement.getConnection().createBlob();
int written = blob.setBytes(1 /* odd position thing */, value);
if (written != value.length) throw new IllegalStateException("Failed to write all bytes to BLOB (written = " + written + ", actual = " + value.length + ")");
statement.setBlob(parameterIndex, blob);
}
});
return this;
} | 3.68 |
morf_SqlDialect_likeEscapeSuffix | /**
* @return The string used to set the SQL LIKE escape character - specified after all LIKE expressions
*/
protected String likeEscapeSuffix() {
return " ESCAPE '\\'";
} | 3.68 |
hbase_CompactSplit_getLongCompactions | /** Returns the longCompactions thread pool executor */
ThreadPoolExecutor getLongCompactions() {
return longCompactions;
} | 3.68 |
flink_CheckpointStatsTracker_registerMetrics | /**
* Register the exposed metrics.
*
* @param metricGroup Metric group to use for the metrics.
*/
private void registerMetrics(MetricGroup metricGroup) {
metricGroup.gauge(NUMBER_OF_CHECKPOINTS_METRIC, new CheckpointsCounter());
metricGroup.gauge(
NUMBER_OF_IN_PROGRESS_CHECKPOINTS_METRIC, new InProgressCheckpointsCounter());
metricGroup.gauge(
NUMBER_OF_COMPLETED_CHECKPOINTS_METRIC, new CompletedCheckpointsCounter());
metricGroup.gauge(NUMBER_OF_FAILED_CHECKPOINTS_METRIC, new FailedCheckpointsCounter());
metricGroup.gauge(
LATEST_RESTORED_CHECKPOINT_TIMESTAMP_METRIC,
new LatestRestoredCheckpointTimestampGauge());
metricGroup.gauge(
LATEST_COMPLETED_CHECKPOINT_SIZE_METRIC, new LatestCompletedCheckpointSizeGauge());
metricGroup.gauge(
LATEST_COMPLETED_CHECKPOINT_FULL_SIZE_METRIC,
new LatestCompletedCheckpointFullSizeGauge());
metricGroup.gauge(
LATEST_COMPLETED_CHECKPOINT_DURATION_METRIC,
new LatestCompletedCheckpointDurationGauge());
metricGroup.gauge(
LATEST_COMPLETED_CHECKPOINT_PROCESSED_DATA_METRIC,
new LatestCompletedCheckpointProcessedDataGauge());
metricGroup.gauge(
LATEST_COMPLETED_CHECKPOINT_PERSISTED_DATA_METRIC,
new LatestCompletedCheckpointPersistedDataGauge());
metricGroup.gauge(
LATEST_COMPLETED_CHECKPOINT_EXTERNAL_PATH_METRIC,
new LatestCompletedCheckpointExternalPathGauge());
metricGroup.gauge(
LATEST_COMPLETED_CHECKPOINT_ID_METRIC, new LatestCompletedCheckpointIdGauge());
} | 3.68 |
hbase_Struct_iterator | /**
* Retrieve an {@link Iterator} over the values encoded in {@code src}. {@code src}'s position is
* consumed by consuming this iterator.
*/
public StructIterator iterator(PositionedByteRange src) {
return new StructIterator(src, fields);
} | 3.68 |
hmily_HmilyCollectionValue_combine | /**
* Put all values from another collection value into this one.
*
* @param hmilyCollectionValue collection value
*/
public void combine(final HmilyCollectionValue<T> hmilyCollectionValue) {
value.addAll(hmilyCollectionValue.value);
} | 3.68 |
pulsar_MessageListener_reachedEndOfTopic | /**
* Get the notification when a topic is terminated.
*
* @param consumer
* the Consumer object associated with the terminated topic
*/
default void reachedEndOfTopic(Consumer consumer) {
// By default ignore the notification
} | 3.68 |
graphhopper_GHMatrixBatchRequester_setMaxIterations | /**
* Internal parameter. Increase only if you have very large matrices.
*/
public GHMatrixBatchRequester setMaxIterations(int maxIterations) {
this.maxIterations = maxIterations;
return this;
} | 3.68 |
framework_DesignContext_getComponent | /**
* Returns the created component.
*
* @return the component
*/
public Component getComponent() {
return component;
} | 3.68 |
Activiti_AstRightValue_getType | /**
* according to the spec, the result is undefined for rvalues, so answer <code>null</code>
*/
public final Class<?> getType(Bindings bindings, ELContext context) {
return null;
} | 3.68 |
flink_BinarySegmentUtils_readStringData | /**
* Get binary string, if len less than 8, will be include in variablePartOffsetAndLen.
*
* <p>Note: Need to consider the ByteOrder.
*
* @param baseOffset base offset of composite binary format.
* @param fieldOffset absolute start offset of 'variablePartOffsetAndLen'.
* @param variablePartOffsetAndLen a long value, real data or offset and len.
*/
public static StringData readStringData(
MemorySegment[] segments,
int baseOffset,
int fieldOffset,
long variablePartOffsetAndLen) {
long mark = variablePartOffsetAndLen & HIGHEST_FIRST_BIT;
if (mark == 0) {
final int subOffset = (int) (variablePartOffsetAndLen >> 32);
final int len = (int) variablePartOffsetAndLen;
return BinaryStringData.fromAddress(segments, baseOffset + subOffset, len);
} else {
int len = (int) ((variablePartOffsetAndLen & HIGHEST_SECOND_TO_EIGHTH_BIT) >>> 56);
if (BinarySegmentUtils.LITTLE_ENDIAN) {
return BinaryStringData.fromAddress(segments, fieldOffset, len);
} else {
// fieldOffset + 1 to skip header.
return BinaryStringData.fromAddress(segments, fieldOffset + 1, len);
}
}
} | 3.68 |
morf_DatabaseDataSetProducer_close | /**
* @see org.alfasoftware.morf.dataset.DataSetProducer#close()
*/
@Override
public void close() {
if (connection == null) {
return;
}
try {
for (ResultSetIterator resultSetIterator : openResultSets) {
resultSetIterator.close();
}
openResultSets.clear();
schema = null;
// restore the auto-commit flag.
connection.commit();
connection.setAutoCommit(wasAutoCommit);
connection.close();
connection = null;
} catch (SQLException e) {
throw new RuntimeSqlException("Error closing result set", e);
}
} | 3.68 |
hudi_HoodieCombineHiveInputFormat_write | /**
* Writable interface.
*/
@Override
public void write(DataOutput out) throws IOException {
if (inputFormatClassName == null) {
if (pathToPartitionInfo == null) {
pathToPartitionInfo = Utilities.getMapWork(getJob()).getPathToPartitionInfo();
}
// extract all the inputFormatClass names for each chunk in the
// CombinedSplit.
PartitionDesc part = getPartitionFromPath(pathToPartitionInfo, inputSplitShim.getPath(0),
IOPrepareCache.get().getPartitionDescMap());
// create a new InputFormat instance if this is the first time to see
// this class
inputFormatClassName = part.getInputFileFormatClass().getName();
}
Text.writeString(out, inputFormatClassName);
if (HoodieParquetRealtimeInputFormat.class.getName().equals(inputFormatClassName)) {
// Write Shim Class Name
Text.writeString(out, inputSplitShim.getClass().getName());
}
inputSplitShim.write(out);
} | 3.68 |
flink_SortOperationFactory_createLimitWithFetch | /**
* Creates a valid {@link SortQueryOperation} with fetch (possibly merged into a preceding
* {@link SortQueryOperation}).
*
* @param fetch fetch to limit
* @param child relational expression on top of which to apply the sort operation
* @param postResolverFactory factory for creating resolved expressions
* @return valid sort operation with applied offset
*/
QueryOperation createLimitWithFetch(
int fetch, QueryOperation child, PostResolverFactory postResolverFactory) {
SortQueryOperation previousSort = validateAndGetChildSort(child, postResolverFactory);
if (fetch < 0) {
throw new ValidationException("Fetch should be greater or equal 0");
}
int offset = Math.max(previousSort.getOffset(), 0);
return new SortQueryOperation(
previousSort.getOrder(), previousSort.getChild(), offset, fetch);
} | 3.68 |
hadoop_MultipartUploaderBuilderImpl_create | /**
* Create an FSDataOutputStream at the specified path.
*/
@Override
public B create() {
flags.add(CreateFlag.CREATE);
return getThisBuilder();
} | 3.68 |
graphhopper_VectorTile_getLayersCount | /**
* <code>repeated .vector_tile.Tile.Layer layers = 3;</code>
*/
public int getLayersCount() {
if (layersBuilder_ == null) {
return layers_.size();
} else {
return layersBuilder_.getCount();
}
} | 3.68 |
hbase_BlockIOUtils_readFully | /**
* Read length bytes into ByteBuffers directly.
* @param buf the destination {@link ByteBuff}
* @param dis the HDFS input stream which implement the ByteBufferReadable interface.
* @param length bytes to read.
* @throws IOException exception to throw if any error happen
*/
public static void readFully(ByteBuff buf, FSDataInputStream dis, int length) throws IOException {
final Span span = Span.current();
final AttributesBuilder attributesBuilder = builderFromContext(Context.current());
if (!isByteBufferReadable(dis)) {
// If InputStream does not support the ByteBuffer read, just read to heap and copy bytes to
// the destination ByteBuff.
byte[] heapBuf = new byte[length];
IOUtils.readFully(dis, heapBuf, 0, length);
annotateHeapBytesRead(attributesBuilder, length);
span.addEvent("BlockIOUtils.readFully", attributesBuilder.build());
copyToByteBuff(heapBuf, 0, length, buf);
return;
}
int directBytesRead = 0, heapBytesRead = 0;
ByteBuffer[] buffers = buf.nioByteBuffers();
int remain = length;
int idx = 0;
ByteBuffer cur = buffers[idx];
try {
while (remain > 0) {
while (!cur.hasRemaining()) {
if (++idx >= buffers.length) {
throw new IOException(
"Not enough ByteBuffers to read the reminding " + remain + " " + "bytes");
}
cur = buffers[idx];
}
cur.limit(cur.position() + Math.min(remain, cur.remaining()));
int bytesRead = dis.read(cur);
if (bytesRead < 0) {
throw new IOException(
"Premature EOF from inputStream, but still need " + remain + " " + "bytes");
}
remain -= bytesRead;
if (cur.isDirect()) {
directBytesRead += bytesRead;
} else {
heapBytesRead += bytesRead;
}
}
} finally {
annotateBytesRead(attributesBuilder, directBytesRead, heapBytesRead);
span.addEvent("BlockIOUtils.readFully", attributesBuilder.build());
}
} | 3.68 |
pulsar_ConnectionHandler_switchClientCnx | /**
* Update the {@link ClientCnx} for the class, then increment and get the epoch value. Note that the epoch value is
* currently only used by the {@link ProducerImpl}.
* @param clientCnx - the new {@link ClientCnx}
* @return the epoch value to use for this pair of {@link ClientCnx} and {@link ProducerImpl}
*/
protected long switchClientCnx(ClientCnx clientCnx) {
setClientCnx(clientCnx);
return EPOCH_UPDATER.incrementAndGet(this);
} | 3.68 |
hbase_MiniBatchOperationInProgress_setWalEdit | /**
* Sets the walEdit for the operation(Mutation) at the specified position.
*/
public void setWalEdit(int index, WALEdit walEdit) {
this.walEditsFromCoprocessors[getAbsoluteIndex(index)] = walEdit;
} | 3.68 |
flink_HiveParserUtils_createAggregateCall | /**
* Counterpart of org.apache.calcite.rel.core.AggregateCall#create. It uses
* HiveParserOperatorBinding as SqlOperatorBinding to create AggregateCall instead, which
* enables to get literal value for operand.
*/
private static AggregateCall createAggregateCall(
SqlAggFunction aggFunction,
boolean distinct,
boolean approximate,
boolean ignoreNulls,
List<Integer> argList,
int filterArg,
RelCollation collation,
int groupCount,
RelNode input,
RelDataType type,
String name,
List<RexNode> operands) {
if (type == null) {
final RelDataTypeFactory typeFactory = input.getCluster().getTypeFactory();
final List<RelDataType> types = SqlTypeUtil.projectTypes(input.getRowType(), argList);
final HiveParserOperatorBinding callBinding =
new HiveParserAggOperatorBinding(
typeFactory, aggFunction, types, operands, groupCount, filterArg >= 0);
type = aggFunction.inferReturnType(callBinding);
}
return AggregateCall.create(
aggFunction,
distinct,
approximate,
ignoreNulls,
argList,
filterArg,
null,
collation,
type,
name);
} | 3.68 |
hadoop_UpdateContainerTokenEvent_isIncrease | /**
* Is this a container Increase.
*
* @return isIncrease.
*/
public boolean isIncrease() {
return isIncrease;
} | 3.68 |
hadoop_AzureNativeFileSystemStore_openOutputStream | /**
* Opens a new output stream to the given blob (page or block blob)
* to populate it from scratch with data.
*/
private OutputStream openOutputStream(final CloudBlobWrapper blob)
throws StorageException {
if (blob instanceof CloudPageBlobWrapper){
return new PageBlobOutputStream(
(CloudPageBlobWrapper) blob, getInstrumentedContext(), sessionConfiguration);
} else {
// Handle both ClouldBlockBlobWrapperImpl and (only for the test code path)
// MockCloudBlockBlobWrapper.
return ((CloudBlockBlobWrapper) blob).openOutputStream(getUploadOptions(),
getInstrumentedContext());
}
} | 3.68 |
flink_BinaryStringDataUtil_trim | /**
* Walk each character of current string from both ends, remove the character if it is in trim
* string. Return the new substring which both ends trim characters have been removed.
*
* @param trimStr the trim string
* @return A subString which both ends trim characters have been removed.
*/
public static BinaryStringData trim(BinaryStringData str, BinaryStringData trimStr) {
if (trimStr == null) {
return null;
}
return trimRight(trimLeft(str, trimStr), trimStr);
} | 3.68 |
hadoop_Trash_getEmptier | /**
* Return a {@link Runnable} that periodically empties the trash of all
* users, intended to be run by the superuser.
*
* @throws IOException on raised on errors performing I/O.
* @return Runnable.
*/
public Runnable getEmptier() throws IOException {
return trashPolicy.getEmptier();
} | 3.68 |
pulsar_BucketDelayedDeliveryTrackerFactory_cleanResidualSnapshots | /**
* Clean up residual snapshot data.
* If tracker has not been created or has been closed, then we can't clean up the snapshot with `tracker.clear`,
* this method can clean up the residual snapshots without creating a tracker.
*/
public CompletableFuture<Void> cleanResidualSnapshots(ManagedCursor cursor) {
Map<String, String> cursorProperties = cursor.getCursorProperties();
if (MapUtils.isEmpty(cursorProperties)) {
return CompletableFuture.completedFuture(null);
}
List<CompletableFuture<Void>> futures = new ArrayList<>();
FutureUtil.Sequencer<Void> sequencer = FutureUtil.Sequencer.create();
cursorProperties.forEach((k, v) -> {
if (k != null && v != null && k.startsWith(BucketDelayedDeliveryTracker.DELAYED_BUCKET_KEY_PREFIX)) {
CompletableFuture<Void> future = sequencer.sequential(() -> {
return cursor.removeCursorProperty(k)
.thenCompose(__ -> bucketSnapshotStorage.deleteBucketSnapshot(Long.parseLong(v)));
});
futures.add(future);
}
});
return FutureUtil.waitForAll(futures);
} | 3.68 |
flink_FlinkRelMetadataQuery_getColumnInterval | /**
* Returns the {@link FlinkMetadata.ColumnInterval} statistic.
*
* @param rel the relational expression
* @param index the index of the given column
* @return the interval of the given column of a specified relational expression. Returns null
* if interval cannot be estimated, Returns {@link
* org.apache.flink.table.planner.plan.stats.EmptyValueInterval} if column values does not
* contains any value except for null.
*/
public ValueInterval getColumnInterval(RelNode rel, int index) {
for (; ; ) {
try {
return columnIntervalHandler.getColumnInterval(rel, this, index);
} catch (JaninoRelMetadataProvider.NoHandler e) {
columnIntervalHandler = revise(e.relClass, FlinkMetadata.ColumnInterval.DEF);
}
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.