name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hadoop_AbfsClient_appendSASTokenToQuery | /**
* If configured for SAS AuthType, appends SAS token to queryBuilder.
* @param path
* @param operation
* @param queryBuilder
* @param cachedSasToken - previously acquired SAS token to be reused.
* @return sasToken - returned for optional re-use.
* @throws SASTokenProviderException
*/
private String appendSASTokenToQuery(String path,
String operation,
AbfsUriQueryBuilder queryBuilder,
String cachedSasToken)
throws SASTokenProviderException {
String sasToken = null;
if (this.authType == AuthType.SAS) {
try {
LOG.trace("Fetch SAS token for {} on {}", operation, path);
if (cachedSasToken == null) {
sasToken = sasTokenProvider.getSASToken(this.accountName,
this.filesystem, path, operation);
if ((sasToken == null) || sasToken.isEmpty()) {
throw new UnsupportedOperationException("SASToken received is empty or null");
}
} else {
sasToken = cachedSasToken;
LOG.trace("Using cached SAS token.");
}
// if SAS Token contains a prefix of ?, it should be removed
if (sasToken.charAt(0) == '?') {
sasToken = sasToken.substring(1);
}
queryBuilder.setSASToken(sasToken);
LOG.trace("SAS token fetch complete for {} on {}", operation, path);
} catch (Exception ex) {
throw new SASTokenProviderException(String.format("Failed to acquire a SAS token for %s on %s due to %s",
operation,
path,
ex.toString()));
}
}
return sasToken;
} | 3.68 |
flink_StreamGraphHasherV2_generateNodeLocalHash | /**
* Applies the {@link Hasher} to the {@link StreamNode} . The hasher encapsulates the current
* state of the hash.
*
* <p>The specified ID is local to this node. We cannot use the {@link StreamNode#id}, because
* it is incremented in a static counter. Therefore, the IDs for identical jobs will otherwise
* be different.
*/
private void generateNodeLocalHash(Hasher hasher, int id) {
// This resolves conflicts for otherwise identical source nodes. BUT
// the generated hash codes depend on the ordering of the nodes in the
// stream graph.
hasher.putInt(id);
} | 3.68 |
hadoop_BinaryRecordOutput_get | /**
* Get a thread-local record output for the supplied DataOutput.
* @param out data output stream
* @return binary record output corresponding to the supplied DataOutput.
*/
public static BinaryRecordOutput get(DataOutput out) {
BinaryRecordOutput bout = B_OUT.get();
bout.setDataOutput(out);
return bout;
} | 3.68 |
flink_VertexFlameGraph_waiting | // Indicates that it is waiting for the first samples to creating the flame graph
public static VertexFlameGraph waiting() {
return new VertexFlameGraph(-3, null);
} | 3.68 |
MagicPlugin_Mage_sendMessage | /**
* Send a message to this Mage.
* <p/>
* Use this to send messages to the player that are important.
*
* @param message The message to send
*/
@Override
public void sendMessage(String message) {
sendMessage(controller.getMessagePrefix(), message);
} | 3.68 |
flink_LeaderInformationRegister_of | /** Creates a single-entry instance containing only the passed information. */
public static LeaderInformationRegister of(
String componentId, LeaderInformation leaderInformation) {
return new LeaderInformationRegister(
Collections.singletonMap(componentId, leaderInformation));
} | 3.68 |
hbase_Result_setStatistics | /**
* Set load information about the region to the information about the result
* @param loadStats statistics about the current region from which this was returned
*/
@InterfaceAudience.Private
public void setStatistics(RegionLoadStats loadStats) {
this.stats = loadStats;
} | 3.68 |
flink_RichFunction_open | /**
* Initialization method for the function. It is called before the actual working methods (like
* <i>map</i> or <i>join</i>) and thus suitable for one time setup work. For functions that are
* part of an iteration, this method will be invoked at the beginning of each iteration
* superstep.
*
* <p>The openContext object passed to the function can be used for configuration and
* initialization. The openContext contains some necessary information that were configured on
* the function in the program composition.
*
* <pre>{@code
* public class MyFilter extends RichFilterFunction<String> {
*
* private String searchString;
*
* public void open(OpenContext openContext) {
* // initialize the value of searchString
* }
*
* public boolean filter(String value) {
* return value.equals(searchString);
* }
* }
* }</pre>
*
* <p>By default, this method does nothing.
*
* <p>1. If you implement {@code open(OpenContext openContext)}, the {@code open(OpenContext
* openContext)} will be invoked and the {@code open(Configuration parameters)} won't be
* invoked. 2. If you don't implement {@code open(OpenContext openContext)}, the {@code
* open(Configuration parameters)} will be invoked in the default implementation of the {@code
* open(OpenContext openContext)}.
*
* @param openContext The context containing information about the context in which the function
* is opened.
* @throws Exception Implementations may forward exceptions, which are caught by the runtime.
* When the runtime catches an exception, it aborts the task and lets the fail-over logic
* decide whether to retry the task execution.
*/
@PublicEvolving
default void open(OpenContext openContext) throws Exception {
open(new Configuration());
} | 3.68 |
hbase_TableDescriptorBuilder_setRegionMemStoreReplication | /**
* Enable or Disable the memstore replication from the primary region to the replicas. The
* replication will be used only for meta operations (e.g. flush, compaction, ...)
* @param memstoreReplication true if the new data written to the primary region should be
* replicated. false if the secondaries can tollerate to have new
* data only when the primary flushes the memstore.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) {
return setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication));
} | 3.68 |
morf_HumanReadableStatementHelper_generateChangePrimaryKeyColumnsString | /**
* @param tableName - the table name which needs its primary key columns changed
* @param newPrimaryKeyColumns - the list of table names to primary key columns will become
* @return a string containing the human-readable version of the action
*/
public static String generateChangePrimaryKeyColumnsString(String tableName, List<String> newPrimaryKeyColumns) {
StringBuilder changePrimaryKeyColumnsBuilder = new StringBuilder();
changePrimaryKeyColumnsBuilder.append(String.format("Change primary key columns on %s to become %s",
tableName,
"(" + Joiner.on(", ").join(newPrimaryKeyColumns) + ")"));
return changePrimaryKeyColumnsBuilder.toString();
} | 3.68 |
flink_JobMasterPartitionTracker_getAllTrackedNonClusterPartitions | /** Gets all the non-cluster partitions under tracking. */
default Collection<ResultPartitionDeploymentDescriptor> getAllTrackedNonClusterPartitions() {
return getAllTrackedPartitions().stream()
.filter(descriptor -> !descriptor.getPartitionType().isPersistent())
.collect(Collectors.toList());
} | 3.68 |
hadoop_RouterQuotaUsage_verifyQuotaByStorageType | /**
* Verify space quota by storage type is violated once quota is set. Relevant
* method {@link DirectoryWithQuotaFeature#verifyQuotaByStorageType}.
* @throws DSQuotaExceededException If the quota is exceeded.
*/
public void verifyQuotaByStorageType() throws DSQuotaExceededException {
for (StorageType t: StorageType.getTypesSupportingQuota()) {
long typeQuota = getTypeQuota(t);
if (typeQuota == HdfsConstants.QUOTA_RESET) {
continue;
}
long typeConsumed = getTypeConsumed(t);
if (Quota.isViolated(typeQuota, typeConsumed)) {
throw new DSQuotaExceededException(typeQuota, typeConsumed);
}
}
} | 3.68 |
streampipes_AbstractConfigurablePipelineElementBuilder_requiredSecret | /**
* Assigns a new secret text-based configuration parameter (e.g., a password) which is required
* by the processing element.
*
* @param label The {@link org.apache.streampipes.sdk.helpers.Label} that describes why this parameter is needed in a
* user-friendly manner.
* @return
*/
public K requiredSecret(Label label) {
SecretStaticProperty secretStaticProperty = new SecretStaticProperty(label.getInternalId(),
label.getLabel(), label.getDescription());
this.staticProperties.add(secretStaticProperty);
return me();
} | 3.68 |
rocketmq-connect_WorkerSinkTask_preCommit | /**
* reset offset by custom
*/
private void preCommit() {
Map<MessageQueue, Long> offsets = sinkTaskContext.queuesOffsets();
if (offsets.isEmpty()) {
return;
}
for (Map.Entry<MessageQueue, Long> entry : offsets.entrySet()) {
MessageQueue queue = entry.getKey();
Long offset = entry.getValue();
if (offset != null) {
log.trace("{} Rewind {} to offset {}", this, queue, offset);
try {
consumer.seek(queue, offset);
lastCommittedOffsets.put(queue, offset);
currentOffsets.put(queue, offset);
} catch (MQClientException e) {
// NO-op
}
}
}
sinkTaskContext.cleanQueuesOffsets();
} | 3.68 |
hmily_EtcdClient_setClient | /**
* set client.
*
* @param client client
*/
public void setClient(final Client client) {
this.client = client;
} | 3.68 |
hadoop_DatanodeAdminProperties_getMaintenanceExpireTimeInMS | /**
* Get the maintenance expiration time in milliseconds.
* @return the maintenance expiration time in milliseconds.
*/
public long getMaintenanceExpireTimeInMS() {
return this.maintenanceExpireTimeInMS;
} | 3.68 |
hbase_ZKWatcher_getQuorum | /**
* Get the quorum address of this instance.
* @return quorum string of this zookeeper connection instance
*/
public String getQuorum() {
return quorum;
} | 3.68 |
flink_LogicalTypeChecks_hasScale | /** Checks the scale of all types that define a scale implicitly or explicitly. */
public static boolean hasScale(LogicalType logicalType, int scale) {
return getScale(logicalType) == scale;
} | 3.68 |
hbase_ByteBufferKeyOnlyKeyValue_setKey | /**
* A setter that helps to avoid object creation every time and whenever there is a need to create
* new OffheapKeyOnlyKeyValue.
* @param key - the key part of the cell
* @param offset - offset of the cell
* @param length - length of the cell
* @param rowLen - the rowlen part of the cell
*/
public void setKey(ByteBuffer key, int offset, int length, short rowLen) {
this.buf = key;
this.offset = offset;
this.length = length;
this.rowLen = rowLen;
} | 3.68 |
morf_UpgradeGraph_isPackageNameValid | //Determine if the package name version is valid
private boolean isPackageNameValid(Class<? extends UpgradeStep> stepClass) {
if (stepClass.getPackage() == null) {
return false;
}
return stepClass.getPackage().getName().matches(".*\\.upgrade\\.v[0-9]+_[0-9]+(_[0-9]+[a-z]?)*$");
} | 3.68 |
flink_SavepointWriter_removeOperator | /**
* Drop an existing operator from the savepoint.
*
* @param identifier The identifier of the operator.
* @return A modified savepoint.
*/
public SavepointWriter removeOperator(OperatorIdentifier identifier) {
metadata.removeOperator(identifier);
return this;
} | 3.68 |
hadoop_BlockData_getStartOffset | /**
* Gets the start offset of the given block.
* @param blockNumber the id of the given block.
* @return the start offset of the given block.
* @throws IllegalArgumentException if blockNumber is invalid.
*/
public long getStartOffset(int blockNumber) {
throwIfInvalidBlockNumber(blockNumber);
return blockNumber * (long) blockSize;
} | 3.68 |
pulsar_OffloadIndexBlock_getStreamSize | /**
* @return the number of bytes in the stream.
*/
public long getStreamSize() {
return streamSize;
} | 3.68 |
hadoop_ConnectionPool_getNumIdleConnections | /**
* Number of usable i.e. no active thread connections.
*
* @return Number of idle connections
*/
protected int getNumIdleConnections() {
int ret = 0;
List<ConnectionContext> tmpConnections = this.connections;
for (ConnectionContext conn : tmpConnections) {
if (conn.isIdle()) {
ret++;
}
}
return ret;
} | 3.68 |
streampipes_DataStreamBuilder_property | /**
* Assigns a new event property to the stream's schema.
*
* @param property The event property that should be added.
* Use {@link org.apache.streampipes.sdk.helpers.EpProperties}
* for defining simple property definitions or
* {@link org.apache.streampipes.sdk.builder.PrimitivePropertyBuilder}
* for defining more complex definitions.
* @return this
*/
public DataStreamBuilder property(EventProperty property) {
this.eventProperties.add(property);
return me();
} | 3.68 |
hbase_ProcedureExecutor_init | /**
* Initialize the procedure executor, but do not start workers. We will start them later.
* <p/>
* It calls ProcedureStore.recoverLease() and ProcedureStore.load() to recover the lease, and
* ensure a single executor, and start the procedure replay to resume and recover the previous
* pending and in-progress procedures.
* @param numThreads number of threads available for procedure execution.
* @param abortOnCorruption true if you want to abort your service in case a corrupted procedure
* is found on replay. otherwise false.
*/
public void init(int numThreads, boolean abortOnCorruption) throws IOException {
// We have numThreads executor + one timer thread used for timing out
// procedures and triggering periodic procedures.
this.corePoolSize = numThreads;
this.maxPoolSize = 10 * numThreads;
LOG.info("Starting {} core workers (bigger of cpus/4 or 16) with max (burst) worker count={}",
corePoolSize, maxPoolSize);
this.threadGroup = new ThreadGroup("PEWorkerGroup");
this.timeoutExecutor = new TimeoutExecutorThread<>(this, threadGroup, "ProcExecTimeout");
this.workerMonitorExecutor = new TimeoutExecutorThread<>(this, threadGroup, "WorkerMonitor");
// Create the workers
workerId.set(0);
workerThreads = new CopyOnWriteArrayList<>();
for (int i = 0; i < corePoolSize; ++i) {
workerThreads.add(new WorkerThread(threadGroup));
}
long st, et;
// Acquire the store lease.
st = System.nanoTime();
store.recoverLease();
et = System.nanoTime();
LOG.info("Recovered {} lease in {}", store.getClass().getSimpleName(),
StringUtils.humanTimeDiff(TimeUnit.NANOSECONDS.toMillis(et - st)));
// start the procedure scheduler
scheduler.start();
// TODO: Split in two steps.
// TODO: Handle corrupted procedures (currently just a warn)
// The first one will make sure that we have the latest id,
// so we can start the threads and accept new procedures.
// The second step will do the actual load of old procedures.
st = System.nanoTime();
load(abortOnCorruption);
et = System.nanoTime();
LOG.info("Loaded {} in {}", store.getClass().getSimpleName(),
StringUtils.humanTimeDiff(TimeUnit.NANOSECONDS.toMillis(et - st)));
} | 3.68 |
framework_AbstractComponent_getCaption | /*
* Get's the component's caption. Don't add a JavaDoc comment here, we use
* the default documentation from implemented interface.
*/
@Override
public String getCaption() {
return getState(false).caption;
} | 3.68 |
flink_DataSetUtils_checksumHashCode | /**
* Convenience method to get the count (number of elements) of a DataSet as well as the checksum
* (sum over element hashes).
*
* @return A ChecksumHashCode that represents the count and checksum of elements in the data
* set.
* @deprecated This method will be removed at some point.
*/
@Deprecated
public static <T> Utils.ChecksumHashCode checksumHashCode(DataSet<T> input) throws Exception {
final String id = new AbstractID().toString();
input.output(new Utils.ChecksumHashCodeHelper<T>(id)).name("ChecksumHashCode");
JobExecutionResult res = input.getExecutionEnvironment().execute();
return res.<Utils.ChecksumHashCode>getAccumulatorResult(id);
} | 3.68 |
flink_FlinkDatabaseMetaData_nullsAreSortedLow | /** In flink null value will be used as low value for sort. */
@Override
public boolean nullsAreSortedLow() throws SQLException {
return true;
} | 3.68 |
flink_AbstractCollectResultBuffer_reset | /** Clear the whole buffer and discard all results. */
protected void reset() {
buffer.clear();
userVisibleHead = 0;
userVisibleTail = 0;
offset = 0;
} | 3.68 |
streampipes_DataLakeResourceV4_isIgnoreMissingValues | // Checks if the parameter for missing value behaviour is set
private boolean isIgnoreMissingValues(String missingValueBehaviour) {
boolean ignoreMissingValues;
if ("ignore".equals(missingValueBehaviour)) {
ignoreMissingValues = true;
} else {
ignoreMissingValues = false;
}
return ignoreMissingValues;
} | 3.68 |
hadoop_Endpoint_newAddresses | /**
* Create a new address structure of the requested size
* @param size size to create
* @return the new list
*/
private List<Map<String, String>> newAddresses(int size) {
return new ArrayList<Map<String, String>>(size);
} | 3.68 |
hbase_BufferedDataBlockEncoder_copyFromNext | /**
* Copy the state from the next one into this instance (the previous state placeholder). Used to
* save the previous state when we are advancing the seeker to the next key/value.
*/
protected void copyFromNext(SeekerState nextState) {
if (keyBuffer.length != nextState.keyBuffer.length) {
keyBuffer = nextState.keyBuffer.clone();
} else if (!isValid()) {
// Note: we can only call isValid before we override our state, so this
// comes before all the assignments at the end of this method.
System.arraycopy(nextState.keyBuffer, 0, keyBuffer, 0, nextState.keyLength);
} else {
// don't copy the common prefix between this key and the previous one
System.arraycopy(nextState.keyBuffer, nextState.lastCommonPrefix, keyBuffer,
nextState.lastCommonPrefix, nextState.keyLength - nextState.lastCommonPrefix);
}
currentKey.set(nextState.currentKey);
valueOffset = nextState.valueOffset;
keyLength = nextState.keyLength;
valueLength = nextState.valueLength;
lastCommonPrefix = nextState.lastCommonPrefix;
nextKvOffset = nextState.nextKvOffset;
memstoreTS = nextState.memstoreTS;
currentBuffer = nextState.currentBuffer;
tagsOffset = nextState.tagsOffset;
tagsLength = nextState.tagsLength;
if (nextState.tagCompressionContext != null) {
tagCompressionContext = nextState.tagCompressionContext;
}
} | 3.68 |
hbase_RefCnt_hasRecycler | /**
* Returns true if this refCnt has a recycler.
*/
public boolean hasRecycler() {
return recycler != ByteBuffAllocator.NONE;
} | 3.68 |
hbase_FsDelegationToken_releaseDelegationToken | /**
* Releases a previously acquired delegation token.
*/
public void releaseDelegationToken() {
if (userProvider.isHadoopSecurityEnabled()) {
if (userToken != null && !hasForwardedToken) {
try {
userToken.cancel(this.fs.getConf());
} catch (Exception e) {
LOG.warn("Failed to cancel HDFS delegation token: " + userToken, e);
}
}
this.userToken = null;
this.fs = null;
}
} | 3.68 |
flink_DataStream_getPreferredResources | /**
* Gets the preferred resources for this operator.
*
* @return The preferred resources set for this operator.
*/
@PublicEvolving
public ResourceSpec getPreferredResources() {
return transformation.getPreferredResources();
} | 3.68 |
morf_MySqlDialect_alterAutoincrementStatement | /**
* Returns a statement which will update the statistics for a specific table.
*/
private String alterAutoincrementStatement(Table table,Column autoIncrementColumn) {
return "ALTER TABLE " + table.getName() + " AUTO_INCREMENT = " + autoIncrementColumn.getAutoNumberStart();
} | 3.68 |
flink_AbstractPythonFunctionOperator_getConfiguration | /** Returns the {@link Configuration}. */
public Configuration getConfiguration() {
return config;
} | 3.68 |
framework_HasHierarchicalDataProvider_setTreeData | /**
* Sets a new {@link TreeDataProvider} wrapping the given {@link TreeData}.
*
* @param treeData
* the tree data to set
*/
public default void setTreeData(TreeData<T> treeData) {
setDataProvider(new TreeDataProvider<>(treeData));
} | 3.68 |
hbase_ChoreService_getNumberOfChoresMissingStartTime | /**
* Return number of chores that this service currently has scheduled that are missing their
* scheduled start time
*/
int getNumberOfChoresMissingStartTime() {
return choresMissingStartTime.size();
} | 3.68 |
hadoop_ComponentContainers_addContainer | /**
* Add a container.
* @param container container
*/
public void addContainer(Container container) {
containers.add(container);
} | 3.68 |
framework_VTooltip_getFinalY | /**
* Return the final Y-coordinate of the tooltip based on cursor
* position, size of the tooltip, size of the page and necessary
* margins.
*
* @param offsetHeight
* @return The final y-coordinate
*
*/
private int getFinalY(int offsetHeight) {
int y = 0;
int heightNeeded = 10 + offsetHeight;
int roomAbove = tooltipEventMouseY;
int roomBelow = Window.getClientHeight() - roomAbove;
if (roomBelow > heightNeeded) {
y = tooltipEventMouseY + 10 + Window.getScrollTop();
} else {
y = tooltipEventMouseY + Window.getScrollTop() - 10
- offsetHeight;
}
if (y + offsetHeight + MARGIN
- Window.getScrollTop() > Window
.getClientHeight()) {
y = tooltipEventMouseY - 5 - offsetHeight
+ Window.getScrollTop();
if (y - Window.getScrollTop() < 0) {
// tooltip does not fit on top of the mouse either,
// put it at the top of the screen
y = Window.getScrollTop();
}
}
if (tooltipEventMouseY != EVENT_XY_POSITION_OUTSIDE) {
// Do not allow y to be zero, for otherwise the tooltip
// does not close when the mouse is moved (see
// isTooltipOpen()). #15129
int minY = Window.getScrollTop() + MARGIN;
y = Math.max(y, minY);
}
return y;
} | 3.68 |
framework_Buffered_getSource | /**
* Gets a source of the exception.
*
* @return the Buffered object which generated this exception.
*/
public Buffered getSource() {
return source;
} | 3.68 |
flink_WindowedOperatorTransformation_evictor | /**
* Sets the {@code Evictor} that should be used to evict elements from a window before emission.
*
* <p>Note: When using an evictor window performance will degrade significantly, since
* incremental aggregation of window results cannot be used.
*/
@PublicEvolving
public WindowedOperatorTransformation<T, K, W> evictor(Evictor<? super T, ? super W> evictor) {
builder.evictor(evictor);
return this;
} | 3.68 |
zilla_HpackContext_staticIndex19 | // Index in static table for the given name of length 19
private static int staticIndex19(DirectBuffer name)
{
switch (name.getByte(18))
{
case 'e':
if (STATIC_TABLE[43].name.equals(name)) // if-unmodified-since
{
return 43;
}
break;
case 'n':
if (STATIC_TABLE[25].name.equals(name)) // content-disposition
{
return 25;
}
if (STATIC_TABLE[49].name.equals(name)) // proxy-authorization
{
return 49;
}
}
return -1;
} | 3.68 |
hadoop_SnappyCompressor_needsInput | /**
* Returns true if the input data buffer is empty and
* #setInput() should be called to provide more input.
*
* @return <code>true</code> if the input data buffer is empty and
* #setInput() should be called in order to provide more input.
*/
@Override
public boolean needsInput() {
return !(compressedDirectBuf.remaining() > 0
|| uncompressedDirectBuf.remaining() == 0 || userBufLen > 0);
} | 3.68 |
framework_ApplicationConfiguration_getUIId | /**
* Gets the UI id of the server-side UI associated with this client-side
* instance. The UI id should be included in every request originating from
* this instance in order to associate the request with the right UI
* instance on the server.
*
* @return the UI id
*/
public int getUIId() {
return uiId;
} | 3.68 |
hadoop_StorageReceivedDeletedBlocks_getStorageID | /**
* @deprecated Use {@link #getStorage()} instead
*/
@Deprecated
public String getStorageID() {
return storage.getStorageID();
} | 3.68 |
framework_ContainerOrderedWrapper_addToOrderWrapper | /**
* Registers the specified Item after the specified itemId in the wrapper's
* internal ordering. The underlying container is not modified. Given item
* id must be in the container, or must be null.
*
* @param id
* the ID of the Item to be added to the ordering.
* @param previousItemId
* the Id of the previous item.
*/
private void addToOrderWrapper(Object id, Object previousItemId) {
if (last == previousItemId || last == null) {
addToOrderWrapper(id);
} else {
if (previousItemId == null) {
next.put(id, first);
prev.put(first, id);
first = id;
} else {
prev.put(id, previousItemId);
next.put(id, next.get(previousItemId));
prev.put(next.get(previousItemId), id);
next.put(previousItemId, id);
}
}
} | 3.68 |
flink_ColumnOperationUtils_renameColumns | /**
* Creates a projection list that renames existing columns to new names.
*
* <p><b>NOTE:</b> Resulting expression are still unresolved.
*
* @param inputFields names of current columns
* @param newAliases new aliases for current columns
* @return projection expressions
*/
static List<Expression> renameColumns(List<String> inputFields, List<Expression> newAliases) {
LinkedHashMap<String, Expression> finalFields = new LinkedHashMap<>();
inputFields.forEach(field -> finalFields.put(field, unresolvedRef(field)));
newAliases.forEach(
expr -> {
String name = expr.accept(renameColumnExtractor);
finalFields.put(name, expr);
});
return new ArrayList<>(finalFields.values());
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_helpGetValueLen | /** Returns the length of the value. */
int helpGetValueLen(long valuePointer) {
return SkipListUtils.helpGetValueLen(valuePointer, spaceAllocator);
} | 3.68 |
hbase_QuotaTableUtil_getNamespaceSnapshotSize | /**
* Fetches the computed size of all snapshots against tables in a namespace for space quotas.
*/
static long getNamespaceSnapshotSize(Connection conn, String namespace) throws IOException {
try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) {
Result r = quotaTable.get(createGetNamespaceSnapshotSize(namespace));
if (r.isEmpty()) {
return 0L;
}
r.advance();
return parseSnapshotSize(r.current());
} catch (InvalidProtocolBufferException e) {
throw new IOException("Could not parse snapshot size value for namespace " + namespace, e);
}
} | 3.68 |
hadoop_DistributedCache_addLocalArchives | /**
* Add a archive that has been localized to the conf. Used
* by internal DistributedCache code.
* @param conf The conf to modify to contain the localized caches
* @param str a comma separated list of local archives
*/
@Deprecated
public static void addLocalArchives(Configuration conf, String str) {
String archives = conf.get(CACHE_LOCALARCHIVES);
conf.set(CACHE_LOCALARCHIVES, archives == null ? str
: archives + "," + str);
} | 3.68 |
pulsar_AbstractTopic_updateBrokerSubscriptionTypesEnabled | // subscriptionTypesEnabled is dynamic and can be updated online.
public void updateBrokerSubscriptionTypesEnabled() {
topicPolicies.getSubscriptionTypesEnabled().updateBrokerValue(
subTypeStringsToEnumSet(brokerService.pulsar().getConfiguration().getSubscriptionTypesEnabled()));
} | 3.68 |
morf_SqlServerDialect_getSqlforLength | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlforLength(Function)
*/
@Override
protected String getSqlforLength(Function function){
return String.format("LEN(%s)", getSqlFrom(function.getArguments().get(0)));
} | 3.68 |
flink_VertexThreadInfoTrackerBuilder_setNumSamples | /**
* Sets {@code numSamples}.
*
* @param numSamples Number of thread info samples to collect for each subtask.
* @return Builder.
*/
public VertexThreadInfoTrackerBuilder setNumSamples(int numSamples) {
this.numSamples = numSamples;
return this;
} | 3.68 |
framework_FocusableHTML_focus | /**
* Focus the panel.
*/
@Override
public void focus() {
setFocus(true);
} | 3.68 |
hadoop_OBSInputStream_toString | /**
* String value includes statistics as well as stream state. <b>Important:
* there are no guarantees as to the stability of this value.</b>
*
* @return a string value for printing in logs/diagnostics
*/
@Override
@InterfaceStability.Unstable
public String toString() {
synchronized (this) {
return "OBSInputStream{" + uri
+ " wrappedStream=" + (wrappedStream != null
? "open"
: "closed")
+ " streamCurrentPos=" + streamCurrentPos
+ " nextReadPos=" + nextReadPos
+ " contentLength=" + contentLength
+ " contentRangeStart=" + contentRangeStart
+ " contentRangeFinish=" + contentRangeFinish
+ " remainingInCurrentRequest=" + remainingInCurrentRequest()
+ '}';
}
} | 3.68 |
framework_VTabsheet_setTabIndex | /**
* For internal use only. May be renamed or removed in a future release.
* <p>
* Sets the tabulator index for the active tab of the tab sheet. The active
* tab represents the entire tab sheet in the browser's focus cycle
* (excluding any focusable elements within the content panel).
* <p>
* This value is delegated from the TabsheetState.
*
* @param tabIndex
* tabulator index for the active tab of the tab sheet
* @since 8.1.7
*/
public void setTabIndex(int tabIndex) {
tabulatorIndex = tabIndex;
Tab activeTab = getActiveTab();
if (activeTab != null) {
activeTab.setTabulatorIndex(tabIndex);
}
} | 3.68 |
morf_XmlDataSetProducer_getView | /**
* @see org.alfasoftware.morf.metadata.Schema#getView(java.lang.String)
*/
@Override
public View getView(String name) {
throw new IllegalArgumentException("No view named [" + name + "]. Views not supported in XML datasets");
} | 3.68 |
flink_FlinkAggregateExpandDistinctAggregatesRule_convertSingletonDistinct | /**
* Converts an aggregate with one distinct aggregate and one or more non-distinct aggregates to
* multi-phase aggregates (see reference example below).
*
* @param relBuilder Contains the input relational expression
* @param aggregate Original aggregate
* @param argLists Arguments and filters to the distinct aggregate function
*/
private RelBuilder convertSingletonDistinct(
RelBuilder relBuilder,
Aggregate aggregate,
Set<Pair<List<Integer>, Integer>> argLists) {
// In this case, we are assuming that there is a single distinct function.
// So make sure that argLists is of size one.
Preconditions.checkArgument(argLists.size() == 1);
// For example,
// SELECT deptno, COUNT(*), SUM(bonus), MIN(DISTINCT sal)
// FROM emp
// GROUP BY deptno
//
// becomes
//
// SELECT deptno, SUM(cnt), SUM(bonus), MIN(sal)
// FROM (
// SELECT deptno, COUNT(*) as cnt, SUM(bonus), sal
// FROM EMP
// GROUP BY deptno, sal) // Aggregate B
// GROUP BY deptno // Aggregate A
relBuilder.push(aggregate.getInput());
final List<AggregateCall> originalAggCalls = aggregate.getAggCallList();
final ImmutableBitSet originalGroupSet = aggregate.getGroupSet();
// Add the distinct aggregate column(s) to the group-by columns,
// if not already a part of the group-by
final SortedSet<Integer> bottomGroupSet = new TreeSet<>();
bottomGroupSet.addAll(aggregate.getGroupSet().asList());
for (AggregateCall aggCall : originalAggCalls) {
if (aggCall.isDistinct()) {
bottomGroupSet.addAll(aggCall.getArgList());
break; // since we only have single distinct call
}
}
// Generate the intermediate aggregate B, the one on the bottom that converts
// a distinct call to group by call.
// Bottom aggregate is the same as the original aggregate, except that
// the bottom aggregate has converted the DISTINCT aggregate to a group by clause.
final List<AggregateCall> bottomAggregateCalls = new ArrayList<>();
for (AggregateCall aggCall : originalAggCalls) {
// Project the column corresponding to the distinct aggregate. Project
// as-is all the non-distinct aggregates
if (!aggCall.isDistinct()) {
final AggregateCall newCall =
AggregateCall.create(
aggCall.getAggregation(),
false,
aggCall.isApproximate(),
false,
aggCall.getArgList(),
-1,
aggCall.distinctKeys,
RelCollations.EMPTY,
ImmutableBitSet.of(bottomGroupSet).cardinality(),
relBuilder.peek(),
null,
aggCall.name);
bottomAggregateCalls.add(newCall);
}
}
// Generate the aggregate B (see the reference example above)
relBuilder.push(
aggregate.copy(
aggregate.getTraitSet(),
relBuilder.build(),
ImmutableBitSet.of(bottomGroupSet),
null,
bottomAggregateCalls));
// Add aggregate A (see the reference example above), the top aggregate
// to handle the rest of the aggregation that the bottom aggregate hasn't handled
final List<AggregateCall> topAggregateCalls =
com.google.common.collect.Lists.newArrayList();
// Use the remapped arguments for the (non)distinct aggregate calls
int nonDistinctAggCallProcessedSoFar = 0;
for (AggregateCall aggCall : originalAggCalls) {
final AggregateCall newCall;
if (aggCall.isDistinct()) {
List<Integer> newArgList = new ArrayList<>();
for (int arg : aggCall.getArgList()) {
newArgList.add(bottomGroupSet.headSet(arg).size());
}
newCall =
AggregateCall.create(
aggCall.getAggregation(),
false,
aggCall.isApproximate(),
false,
newArgList,
-1,
aggCall.distinctKeys,
RelCollations.EMPTY,
originalGroupSet.cardinality(),
relBuilder.peek(),
aggCall.getType(),
aggCall.name);
} else {
// If aggregate B had a COUNT aggregate call the corresponding aggregate at
// aggregate A must be SUM. For other aggregates, it remains the same.
final List<Integer> newArgs =
com.google.common.collect.Lists.newArrayList(
bottomGroupSet.size() + nonDistinctAggCallProcessedSoFar);
if (aggCall.getAggregation().getKind() == SqlKind.COUNT) {
newCall =
AggregateCall.create(
new SqlSumEmptyIsZeroAggFunction(),
false,
aggCall.isApproximate(),
false,
newArgs,
-1,
aggCall.distinctKeys,
RelCollations.EMPTY,
originalGroupSet.cardinality(),
relBuilder.peek(),
aggCall.getType(),
aggCall.getName());
} else {
newCall =
AggregateCall.create(
aggCall.getAggregation(),
false,
aggCall.isApproximate(),
false,
newArgs,
-1,
aggCall.distinctKeys,
RelCollations.EMPTY,
originalGroupSet.cardinality(),
relBuilder.peek(),
aggCall.getType(),
aggCall.name);
}
nonDistinctAggCallProcessedSoFar++;
}
topAggregateCalls.add(newCall);
}
// Populate the group-by keys with the remapped arguments for aggregate A
// The top groupset is basically an identity (first X fields of aggregate B's
// output), minus the distinct aggCall's input.
final Set<Integer> topGroupSet = new HashSet<>();
int groupSetToAdd = 0;
for (int bottomGroup : bottomGroupSet) {
if (originalGroupSet.get(bottomGroup)) {
topGroupSet.add(groupSetToAdd);
}
groupSetToAdd++;
}
relBuilder.push(
aggregate.copy(
aggregate.getTraitSet(),
relBuilder.build(),
ImmutableBitSet.of(topGroupSet),
null,
topAggregateCalls));
return relBuilder;
} | 3.68 |
rocketmq-connect_DatabaseDialect_buildDropTableStatement | // drop table
default String buildDropTableStatement(TableId table, boolean ifExists, boolean cascade) {
ExpressionBuilder builder = expressionBuilder();
builder.append("DROP TABLE ");
builder.append(table);
if (ifExists) {
builder.append(" IF EXISTS");
}
if (cascade) {
builder.append(" CASCADE");
}
return builder.toString();
} | 3.68 |
flink_IterativeDataSet_getAggregators | /**
* Gets the registry for aggregators. On the registry, one can add {@link Aggregator}s and an
* aggregator-based {@link ConvergenceCriterion}. This method offers an alternative way to
* registering the aggregators via {@link #registerAggregator(String, Aggregator)} and {@link
* #registerAggregationConvergenceCriterion(String, Aggregator, ConvergenceCriterion)}.
*
* @return The registry for aggregators.
*/
@PublicEvolving
public AggregatorRegistry getAggregators() {
return aggregators;
} | 3.68 |
flink_DeclarativeSlotPoolService_onStart | /**
* This method is called when the slot pool service is started. It can be overridden by
* subclasses.
*
* @param componentMainThreadExecutor componentMainThreadExecutor used by this slot pool service
*/
protected void onStart(ComponentMainThreadExecutor componentMainThreadExecutor) {} | 3.68 |
framework_ConnectorTracker_notifyMarkedAsDirtyListeners | /**
* Notify all registered MarkedAsDirtyListeners the given client connector
* has been marked as dirty.
*
* @param connector
* client connector marked as dirty
* @since 8.4
*/
public void notifyMarkedAsDirtyListeners(ClientConnector connector) {
MarkedAsDirtyConnectorEvent event = new MarkedAsDirtyConnectorEvent(
connector, uI);
new ArrayList<>(markedDirtyListeners).forEach(listener -> {
listener.connectorMarkedAsDirty(event);
});
} | 3.68 |
flink_CallExpression_permanent | /**
* Creates a {@link CallExpression} to a resolved built-in function. It assumes that the {@link
* BuiltInFunctionDefinition} instance is provided by the framework (usually the core module).
*/
@Internal
public static CallExpression permanent(
BuiltInFunctionDefinition builtInFunctionDefinition,
List<ResolvedExpression> args,
DataType dataType) {
return new CallExpression(
false,
FunctionIdentifier.of(builtInFunctionDefinition.getName()),
builtInFunctionDefinition,
args,
dataType);
} | 3.68 |
framework_Calendar_getMonthNamesShort | /**
* Localized display names for months starting from January. Returned
* array's length is always 12.
*
* @return Array of localized month names.
*/
protected String[] getMonthNamesShort() {
DateFormatSymbols s = new DateFormatSymbols(getLocale());
return Arrays.copyOf(s.getShortMonths(), 12);
} | 3.68 |
hbase_MetricsConnection_getHedgedReadWin | /** hedgedReadWin metric */
public Counter getHedgedReadWin() {
return hedgedReadWin;
} | 3.68 |
flink_StructuredOptionsSplitter_escapeWithSingleQuote | /**
* Escapes the given string with single quotes, if the input string contains a double quote or
* any of the given {@code charsToEscape}. Any single quotes in the input string will be escaped
* by doubling.
*
* <p>Given that the escapeChar is (;)
*
* <p>Examples:
*
* <ul>
* <li>A,B,C,D => A,B,C,D
* <li>A'B'C'D => 'A''B''C''D'
* <li>A;BCD => 'A;BCD'
* <li>AB"C"D => 'AB"C"D'
* <li>AB'"D:B => 'AB''"D:B'
* </ul>
*
* @param string a string which needs to be escaped
* @param charsToEscape escape chars for the escape conditions
* @return escaped string by single quote
*/
static String escapeWithSingleQuote(String string, String... charsToEscape) {
boolean escape =
Arrays.stream(charsToEscape).anyMatch(string::contains)
|| string.contains("\"")
|| string.contains("'");
if (escape) {
return "'" + string.replaceAll("'", "''") + "'";
}
return string;
} | 3.68 |
flink_JoinInputSideSpec_getUniqueKeySelector | /**
* Returns the {@link KeySelector} to extract unique key from the input row. Returns null if the
* input hasn't unique key.
*/
@Nullable
public KeySelector<RowData, RowData> getUniqueKeySelector() {
return uniqueKeySelector;
} | 3.68 |
hadoop_SingleFilePerBlockCache_size | /**
* Gets the number of blocks in this cache.
*/
@Override
public int size() {
return blocks.size();
} | 3.68 |
flink_TimestampUtil_isHiveTimestampColumnVector | // whether a ColumnVector is the new TimestampColumnVector
public static boolean isHiveTimestampColumnVector(ColumnVector vector) {
return hiveTSColVectorClz != null && hiveTSColVectorClz.isAssignableFrom(vector.getClass());
} | 3.68 |
dubbo_DefaultApplicationDeployer_supportsExtension | /**
* Supports the extension with the specified class and name
*
* @param extensionClass the {@link Class} of extension
* @param name the name of extension
* @return if supports, return <code>true</code>, or <code>false</code>
* @since 2.7.8
*/
private boolean supportsExtension(Class<?> extensionClass, String name) {
if (isNotEmpty(name)) {
ExtensionLoader<?> extensionLoader = getExtensionLoader(extensionClass);
return extensionLoader.hasExtension(name);
}
return false;
} | 3.68 |
querydsl_MultiCurveExpression_isClosed | /**
* Returns 1 (TRUE) if this MultiCurve is closed [StartPoint ( ) = EndPoint ( ) for each
* Curve in this MultiCurve].
*
* @return closed
*/
public BooleanExpression isClosed() {
if (closed == null) {
closed = Expressions.booleanOperation(SpatialOps.IS_CLOSED, mixin);
}
return closed;
} | 3.68 |
flink_OneInputTransformation_setStateKeySelector | /**
* Sets the {@link KeySelector} that must be used for partitioning keyed state of this
* operation.
*
* @param stateKeySelector The {@code KeySelector} to set
*/
public void setStateKeySelector(KeySelector<IN, ?> stateKeySelector) {
this.stateKeySelector = stateKeySelector;
updateManagedMemoryStateBackendUseCase(stateKeySelector != null);
} | 3.68 |
flink_MemorySegment_getChar | /**
* Reads a char value from the given position, in the system's native byte order.
*
* @param index The position from which the memory will be read.
* @return The char value at the given position.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
*/
@SuppressWarnings("restriction")
public char getChar(int index) {
final long pos = address + index;
if (index >= 0 && pos <= addressLimit - 2) {
return UNSAFE.getChar(heapMemory, pos);
} else if (address > addressLimit) {
throw new IllegalStateException("This segment has been freed.");
} else {
// index is in fact invalid
throw new IndexOutOfBoundsException();
}
} | 3.68 |
hbase_LruBlockCache_clearCache | /** Clears the cache. Used in tests. */
public void clearCache() {
this.map.clear();
this.elements.set(0);
} | 3.68 |
hbase_RegionServerSnapshotManager_waitForOutstandingTasks | /**
* Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)}.
* This *must* be called after all tasks are submitted via submitTask.
* @return <tt>true</tt> on success, <tt>false</tt> otherwise
* @throws SnapshotCreationException if the snapshot failed while we were waiting
*/
boolean waitForOutstandingTasks() throws ForeignException, InterruptedException {
LOG.debug("Waiting for local region snapshots to finish.");
int sz = futures.size();
try {
// Using the completion service to process the futures that finish first first.
for (int i = 0; i < sz; i++) {
Future<Void> f = taskPool.take();
f.get();
if (!futures.remove(f)) {
LOG.warn("unexpected future" + f);
}
LOG.debug("Completed " + (i + 1) + "/" + sz + " local region snapshots.");
}
LOG.debug("Completed " + sz + " local region snapshots.");
return true;
} catch (InterruptedException e) {
LOG.warn("Got InterruptedException in SnapshotSubprocedurePool", e);
if (!stopped) {
Thread.currentThread().interrupt();
throw new ForeignException("SnapshotSubprocedurePool", e);
}
// we are stopped so we can just exit.
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof ForeignException) {
LOG.warn("Rethrowing ForeignException from SnapshotSubprocedurePool", e);
throw (ForeignException) e.getCause();
} else if (cause instanceof DroppedSnapshotException) {
// we have to abort the region server according to contract of flush
abortable.abort("Received DroppedSnapshotException, aborting", cause);
}
LOG.warn("Got Exception in SnapshotSubprocedurePool", e);
throw new ForeignException(name, e.getCause());
} finally {
cancelTasks();
}
return false;
} | 3.68 |
framework_DefaultDeploymentConfiguration_getHeartbeatInterval | /**
* {@inheritDoc}
* <p>
* The default interval is 300 seconds (5 minutes).
*/
@Override
public int getHeartbeatInterval() {
return heartbeatInterval;
} | 3.68 |
framework_ColumnProperty_getOldValue | /**
* Returns the original non-modified value of this property if it has been
* modified.
*
* @return The original value if <code>isModified()</code> is true,
* <code>getValue()</code> otherwise.
*/
public Object getOldValue() {
return value;
} | 3.68 |
flink_HighAvailabilityServices_getWebMonitorLeaderElection | /**
* Gets the {@link LeaderElection} for the cluster's rest endpoint.
*
* @deprecated Use {@link #getClusterRestEndpointLeaderElection()} instead.
*/
@Deprecated
default LeaderElection getWebMonitorLeaderElection() {
throw new UnsupportedOperationException(
"getWebMonitorLeaderElectionService should no longer be used. Instead use "
+ "#getClusterRestEndpointLeaderElectionService to instantiate the cluster "
+ "rest endpoint's leader election service. If you called this method, then "
+ "make sure that #getClusterRestEndpointLeaderElectionService has been "
+ "implemented by your HighAvailabilityServices implementation.");
} | 3.68 |
hbase_MetaTableAccessor_put | /**
* @param t Table to use
* @param p put to make
*/
private static void put(Table t, Put p) throws IOException {
debugLogMutation(p);
t.put(p);
} | 3.68 |
hadoop_Check_notNullElements | /**
* Verifies a list does not have any NULL elements.
*
* @param list the list to check.
* @param name the name to use in the exception message.
*
* @return the list.
*
* @throws IllegalArgumentException if the list has NULL elements.
*/
public static <T> List<T> notNullElements(List<T> list, String name) {
notNull(list, name);
for (int i = 0; i < list.size(); i++) {
notNull(list.get(i), MessageFormat.format("list [{0}] element [{1}]", name, i));
}
return list;
} | 3.68 |
flink_StatusWatermarkValve_inputWatermarkStatus | /**
* Feed a {@link WatermarkStatus} into the valve. This may trigger the valve to output either a
* new Watermark Status, for which {@link DataOutput#emitWatermarkStatus(WatermarkStatus)} will
* be called, or a new Watermark, for which {@link DataOutput#emitWatermark(Watermark)} will be
* called.
*
* @param watermarkStatus the watermark status to feed to the valve
* @param channelIndex the index of the channel that the fed watermark status belongs to (index
* starting from 0)
*/
public void inputWatermarkStatus(
WatermarkStatus watermarkStatus, int channelIndex, DataOutput<?> output)
throws Exception {
// only account for watermark status inputs that will result in a status change for the
// input
// channel
if (watermarkStatus.isIdle() && channelStatuses[channelIndex].watermarkStatus.isActive()) {
// handle active -> idle toggle for the input channel
channelStatuses[channelIndex].watermarkStatus = WatermarkStatus.IDLE;
// the channel is now idle, therefore not aligned
markWatermarkUnaligned(channelStatuses[channelIndex]);
// if all input channels of the valve are now idle, we need to output an idle stream
// status from the valve (this also marks the valve as idle)
if (!InputChannelStatus.hasActiveChannels(channelStatuses)) {
// now that all input channels are idle and no channels will continue to advance its
// watermark,
// we should "flush" all watermarks across all channels; effectively, this means
// emitting
// the max watermark across all channels as the new watermark. Also, since we
// already try to advance
// the min watermark as channels individually become IDLE, here we only need to
// perform the flush
// if the watermark of the last active channel that just became idle is the current
// min watermark.
if (channelStatuses[channelIndex].watermark == lastOutputWatermark) {
findAndOutputMaxWatermarkAcrossAllChannels(output);
}
lastOutputWatermarkStatus = WatermarkStatus.IDLE;
output.emitWatermarkStatus(lastOutputWatermarkStatus);
} else if (channelStatuses[channelIndex].watermark == lastOutputWatermark) {
// if the watermark of the channel that just became idle equals the last output
// watermark (the previous overall min watermark), we may be able to find a new
// min watermark from the remaining aligned channels
findAndOutputNewMinWatermarkAcrossAlignedChannels(output);
}
} else if (watermarkStatus.isActive()
&& channelStatuses[channelIndex].watermarkStatus.isIdle()) {
// handle idle -> active toggle for the input channel
channelStatuses[channelIndex].watermarkStatus = WatermarkStatus.ACTIVE;
// if the last watermark of the input channel, before it was marked idle, is still
// larger than
// the overall last output watermark of the valve, then we can set the channel to be
// aligned already.
if (channelStatuses[channelIndex].watermark >= lastOutputWatermark) {
markWatermarkAligned(channelStatuses[channelIndex]);
}
// if the valve was previously marked to be idle, mark it as active and output an active
// stream
// status because at least one of the input channels is now active
if (lastOutputWatermarkStatus.isIdle()) {
lastOutputWatermarkStatus = WatermarkStatus.ACTIVE;
output.emitWatermarkStatus(lastOutputWatermarkStatus);
}
}
} | 3.68 |
hadoop_Lz4Codec_createOutputStream | /**
* Create a {@link CompressionOutputStream} that will write to the given
* {@link OutputStream} with the given {@link Compressor}.
*
* @param out the location for the final output stream
* @param compressor compressor to use
* @return a stream the user can write uncompressed data to have it compressed
* @throws IOException raised on errors performing I/O.
*/
@Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor)
throws IOException {
int bufferSize = conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT);
int compressionOverhead = bufferSize/255 + 16;
return new BlockCompressorStream(out, compressor, bufferSize,
compressionOverhead);
} | 3.68 |
hadoop_NativeRuntime_createNativeObject | /**
* create native object We use it to create native handlers
*/
public synchronized static long createNativeObject(String clazz) {
assertNativeLibraryLoaded();
final long ret = JNICreateNativeObject(clazz.getBytes(StandardCharsets.UTF_8));
if (ret == 0) {
LOG.warn("Can't create NativeObject for class " + clazz + ", probably not exist.");
}
return ret;
} | 3.68 |
flink_FileLock_init | /**
* Check whether the locking file exists in the file system. Create it if it does not exist.
* Then create a FileOutputStream for it.
*
* @throws IOException If the file path is invalid or the parent dir does not exist
*/
private void init() throws IOException {
if (!this.file.exists()) {
this.file.createNewFile();
}
outputStream = new FileOutputStream(this.file);
} | 3.68 |
hadoop_RMAppKillByClientEvent_getCallerUGI | /**
* returns the {@link UserGroupInformation} information.
* @return UserGroupInformation
*/
public final UserGroupInformation getCallerUGI() {
return callerUGI;
} | 3.68 |
hbase_ProcedureMember_defaultPool | /**
* Default thread pool for the procedure
* @param procThreads the maximum number of threads to allow in the pool
* @param keepAliveMillis the maximum time (ms) that excess idle threads will wait for new tasks
*/
public static ThreadPoolExecutor defaultPool(String memberName, int procThreads,
long keepAliveMillis) {
return new ThreadPoolExecutor(1, procThreads, keepAliveMillis, TimeUnit.MILLISECONDS,
new SynchronousQueue<>(),
new ThreadFactoryBuilder().setNameFormat("member: '" + memberName + "' subprocedure-pool-%d")
.setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
} | 3.68 |
flink_Types_LIST | /**
* Returns type information for a Java {@link java.util.List}. A list must not be null. Null
* values in elements are not supported.
*
* <p>By default, lists are untyped and treated as a generic type in Flink; therefore, it is
* useful to pass type information whenever a list is used.
*
* <p><strong>Note:</strong> Flink does not preserve the concrete {@link List} type. It converts
* a list into {@link ArrayList} when copying or deserializing.
*
* @param elementType type information for the list's elements
*/
public static <E> TypeInformation<List<E>> LIST(TypeInformation<E> elementType) {
return new ListTypeInfo<>(elementType);
} | 3.68 |
hadoop_AppPriorityACLConfigurationParser_getUserOrGroupACLStringFromConfig | /*
* This method will help to append user/group acl string against given
* priority. For example "user1,user2 group1,group2"
*/
private StringBuilder getUserOrGroupACLStringFromConfig(String value) {
// ACL strings could be generate for USER or GRUOP.
// aclList in map contains two entries. 1. USER, 2. GROUP.
StringBuilder aclTypeName = new StringBuilder();
if (value.trim().equals(ALL_ACL)) {
aclTypeName.setLength(0);
aclTypeName.append(ALL_ACL);
return aclTypeName;
}
aclTypeName.append(value.trim());
return aclTypeName;
} | 3.68 |
morf_UpgradePath_upgradeInProgress | /**
* Returns whether or not this upgrade knew that an upgrade was in progress
* at the point it was created.
*
* @return true if there was an upgrade in progress.
*/
public boolean upgradeInProgress() {
return upgradeStatus != null && upgradeStatus != UpgradeStatus.NONE;
} | 3.68 |
hbase_RequestConverter_buildSetCleanerChoreRunningRequest | /**
* Creates a request for enabling/disabling the cleaner chore
* @return A {@link SetCleanerChoreRunningRequest}
*/
public static SetCleanerChoreRunningRequest buildSetCleanerChoreRunningRequest(boolean on) {
return SetCleanerChoreRunningRequest.newBuilder().setOn(on).build();
} | 3.68 |
hadoop_DatanodeLocalInfo_getDatanodeLocalReport | /** A formatted string for printing the status of the DataNode. */
public String getDatanodeLocalReport() {
return ("Uptime: " + getUptime())
+ ", Software version: " + getSoftwareVersion()
+ ", Config version: " + getConfigVersion();
} | 3.68 |
flink_DeltaTrigger_of | /**
* Creates a delta trigger from the given threshold and {@code DeltaFunction}.
*
* @param threshold The threshold at which to trigger.
* @param deltaFunction The delta function to use
* @param stateSerializer TypeSerializer for the data elements.
* @param <T> The type of elements on which this trigger can operate.
* @param <W> The type of {@link Window Windows} on which this trigger can operate.
*/
public static <T, W extends Window> DeltaTrigger<T, W> of(
double threshold, DeltaFunction<T> deltaFunction, TypeSerializer<T> stateSerializer) {
return new DeltaTrigger<>(threshold, deltaFunction, stateSerializer);
} | 3.68 |
hudi_HadoopConfigurations_getHadoopConf | /**
* Creates a new hadoop configuration that is initialized with the given flink configuration.
*/
public static org.apache.hadoop.conf.Configuration getHadoopConf(Configuration conf) {
org.apache.hadoop.conf.Configuration hadoopConf = FlinkClientUtil.getHadoopConf();
Map<String, String> options = FlinkOptions.getPropertiesWithPrefix(conf.toMap(), HADOOP_PREFIX);
options.forEach(hadoopConf::set);
return hadoopConf;
} | 3.68 |
morf_GraphBasedUpgradeNode_isRoot | /**
* @return true if this node is a no-op root node of the graph
*/
public boolean isRoot() {
return sequence == 0;
} | 3.68 |
hbase_VisibilityController_buildException | /** Returns NameValuePair of the exception name to stringified version os exception. */
// Copied from ResponseConverter and made private. Only used in here.
private static NameBytesPair buildException(final Throwable t) {
NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder();
parameterBuilder.setName(t.getClass().getName());
parameterBuilder.setValue(ByteString.copyFromUtf8(StringUtils.stringifyException(t)));
return parameterBuilder.build();
} | 3.68 |
flink_HiveDDLUtils_defaultTrait | // a constraint is by default ENABLE NOVALIDATE RELY
public static byte defaultTrait() {
byte res = enableConstraint((byte) 0);
res = relyConstraint(res);
return res;
} | 3.68 |
pulsar_SchemaStorage_put | /**
* Put the schema to the schema storage.
*
* @param key The schema ID
* @param fn The function to calculate the value and hash that need to put to the schema storage
* The input of the function is all the existing schemas that used to do the schemas compatibility check
* @return The schema version of the stored schema
*/
default CompletableFuture<SchemaVersion> put(String key,
Function<CompletableFuture<List<CompletableFuture<StoredSchema>>>,
CompletableFuture<Pair<byte[], byte[]>>> fn) {
return fn.apply(getAll(key)).thenCompose(pair -> put(key, pair.getLeft(), pair.getRight()));
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.