name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hudi_HoodieFlinkWriteClient_cleanHandles | /**
* Clean the write handles within a checkpoint interval.
* All the handles should have been closed already.
*/
public void cleanHandles() {
this.bucketToHandles.clear();
} | 3.68 |
flink_ResultPartitionFactory_isOverdraftBufferNeeded | /** Return whether this result partition need overdraft buffer. */
private static boolean isOverdraftBufferNeeded(ResultPartitionType resultPartitionType) {
// Only pipelined / pipelined-bounded partition needs overdraft buffer. More
// specifically, there is no reason to request more buffers for non-pipelined (i.e.
// batch) shuffle. The reasons are as follows:
// 1. For BoundedBlockingShuffle, each full buffer will be directly released.
// 2. For SortMergeShuffle, the maximum capacity of buffer pool is 4 * numSubpartitions. It
// is efficient enough to spill this part of memory to disk.
// 3. For Hybrid Shuffle, the buffer pool is unbounded. If it can't get a normal buffer, it
// also can't get an overdraft buffer.
return resultPartitionType.isPipelinedOrPipelinedBoundedResultPartition();
} | 3.68 |
hbase_ExcludeDatanodeManager_tryAddExcludeDN | /**
* Try to add a datanode to the regionserver excluding cache
* @param datanodeInfo the datanode to be added to the excluded cache
* @param cause the cause that the datanode is hope to be excluded
* @return True if the datanode is added to the regionserver excluding cache, false otherwise
*/
public boolean tryAddExcludeDN(DatanodeInfo datanodeInfo, String cause) {
boolean alreadyMarkedSlow = getExcludeDNs().containsKey(datanodeInfo);
if (!alreadyMarkedSlow) {
excludeDNsCache.put(datanodeInfo, EnvironmentEdgeManager.currentTime());
LOG.info(
"Added datanode: {} to exclude cache by [{}] success, current excludeDNsCache size={}",
datanodeInfo, cause, excludeDNsCache.size());
return true;
}
LOG.debug(
"Try add datanode {} to exclude cache by [{}] failed, " + "current exclude DNs are {}",
datanodeInfo, cause, getExcludeDNs().keySet());
return false;
} | 3.68 |
hadoop_AbfsManifestStoreOperations_bindToFileSystem | /**
* Bind to the store.
*
* @param filesystem FS.
* @param path path to work under
* @throws IOException binding problems.
*/
@Override
public void bindToFileSystem(FileSystem filesystem, Path path) throws IOException {
if (!(filesystem instanceof AzureBlobFileSystem)) {
throw new PathIOException(path.toString(),
"Not an abfs filesystem: " + filesystem.getClass());
}
super.bindToFileSystem(filesystem, path);
try {
resilientCommitByRename = getFileSystem().createResilientCommitSupport(path);
// this also means that etags are preserved.
etagsPreserved = true;
LOG.debug("Bonded to filesystem with resilient commits under path {}", path);
} catch (UnsupportedOperationException e) {
LOG.debug("No resilient commit support under path {}", path);
}
} | 3.68 |
dubbo_ServiceConfig_exportLocal | /**
* always export injvm
*/
private void exportLocal(URL url) {
URL local = URLBuilder.from(url)
.setProtocol(LOCAL_PROTOCOL)
.setHost(LOCALHOST_VALUE)
.setPort(0)
.build();
local = local.setScopeModel(getScopeModel()).setServiceModel(providerModel);
local = local.addParameter(EXPORTER_LISTENER_KEY, LOCAL_PROTOCOL);
doExportUrl(local, false, RegisterTypeEnum.AUTO_REGISTER);
logger.info("Export dubbo service " + interfaceClass.getName() + " to local registry url : " + local);
} | 3.68 |
morf_RemoveIndex_apply | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.upgrade.SchemaChange#apply(org.alfasoftware.morf.metadata.Schema)
*/
@Override
public Schema apply(Schema schema) {
Table original = schema.getTable(tableName);
boolean foundIndex = false;
List<String> indexes = new ArrayList<>();
for (Index index : original.indexes()) {
// So long as this is not the index we're supposed to be removing
if (index.getName().equalsIgnoreCase(indexToBeRemoved.getName())) {
foundIndex = true;
continue;
}
// Add the column to the schema
indexes.add(index.getName());
}
if (!foundIndex) {
throw new IllegalArgumentException("Cannot remove index [" + indexToBeRemoved.getName() + "] as it does not exist on table [" + tableName + "]");
}
return new TableOverrideSchema(schema, new AlteredTable(original, null, null, indexes, null));
} | 3.68 |
druid_MySqlStatementParser_parseWhile | /**
* parse while statement with label
*
* @return MySqlWhileStatement
*/
public SQLWhileStatement parseWhile(String label) {
accept(Token.WHILE);
SQLWhileStatement stmt = new SQLWhileStatement();
stmt.setLabelName(label);
stmt.setCondition(this.exprParser.expr());
accept(Token.DO);
this.parseStatementList(stmt.getStatements(), -1, stmt);
accept(Token.END);
accept(Token.WHILE);
acceptIdentifier(label);
accept(Token.SEMI);
stmt.setAfterSemi(true);
return stmt;
} | 3.68 |
hadoop_ActiveAuditManagerS3A_modifyResponse | /**
* Forward to the inner span.
* {@inheritDoc}
*/
@Override
public SdkResponse modifyResponse(Context.ModifyResponse context,
ExecutionAttributes executionAttributes) {
return span.modifyResponse(context, executionAttributes);
} | 3.68 |
hbase_OrderedBytes_length | /**
* Return the number of encoded entries remaining in {@code buff}. The state of {@code buff} is
* not modified through use of this method.
*/
public static int length(PositionedByteRange buff) {
PositionedByteRange b =
new SimplePositionedMutableByteRange(buff.getBytes(), buff.getOffset(), buff.getLength());
b.setPosition(buff.getPosition());
int cnt = 0;
for (; isEncodedValue(b); skip(b), cnt++)
;
return cnt;
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_directoryDeleted | /**
* Indicate that we just deleted a directory through WASB.
*/
public void directoryDeleted() {
numberOfDirectoriesDeleted.incr();
} | 3.68 |
hbase_ClaimReplicationQueueRemoteProcedure_shouldSkip | // check whether ReplicationSyncUp has already done the work for us, if so, we should skip
// claiming the replication queues and deleting them instead.
private boolean shouldSkip(MasterProcedureEnv env) throws IOException {
MasterFileSystem mfs = env.getMasterFileSystem();
Path syncUpDir = new Path(mfs.getRootDir(), ReplicationSyncUp.INFO_DIR);
return mfs.getFileSystem().exists(new Path(syncUpDir, getServerName().getServerName()));
} | 3.68 |
hadoop_MagicCommitTracker_outputImmediatelyVisible | /**
* Flag to indicate that output is not visible after the stream
* is closed.
* @return true
*/
@Override
public boolean outputImmediatelyVisible() {
return false;
} | 3.68 |
framework_Potus_getParty | /**
* @return the party
*/
public String getParty() {
return party;
} | 3.68 |
hadoop_ServletUtil_htmlFooter | /**
* HTML footer to be added in the jsps.
* @return the HTML footer.
*/
public static String htmlFooter() {
return HTML_TAIL;
} | 3.68 |
zxing_GenericGF_exp | /**
* @return 2 to the power of a in GF(size)
*/
int exp(int a) {
return expTable[a];
} | 3.68 |
hbase_RegionMover_stripServer | /**
* Remove the servername whose hostname and port portion matches from the passed array of servers.
* Returns as side-effect the servername removed.
* @return server removed from list of Region Servers
*/
private ServerName stripServer(List<ServerName> regionServers, String hostname, int port) {
for (Iterator<ServerName> iter = regionServers.iterator(); iter.hasNext();) {
ServerName server = iter.next();
if (
server.getAddress().getHostName().equalsIgnoreCase(hostname)
&& server.getAddress().getPort() == port
) {
iter.remove();
return server;
}
}
return null;
} | 3.68 |
flink_TextElement_wrap | /** Wraps a list of {@link InlineElement}s into a single {@link TextElement}. */
public static InlineElement wrap(InlineElement... elements) {
return text(Strings.repeat("%s", elements.length), elements);
} | 3.68 |
hudi_BaseHoodieQueueBasedExecutor_startConsumingAsync | /**
* Start consumer
*/
private CompletableFuture<Void> startConsumingAsync() {
return consumer.map(consumer ->
CompletableFuture.supplyAsync(() -> {
doConsume(queue, consumer);
return (Void) null;
}, consumerExecutorService)
)
.orElse(CompletableFuture.completedFuture(null));
} | 3.68 |
hbase_MasterObserver_postRevoke | /**
* Called after revoking user permissions.
* @param ctx the coprocessor instance's environment
* @param userPermission the user and permissions
*/
default void postRevoke(ObserverContext<MasterCoprocessorEnvironment> ctx,
UserPermission userPermission) throws IOException {
} | 3.68 |
framework_SerializerHelper_readClassArray | /**
* Deserializes a class references serialized by
* {@link #writeClassArray(ObjectOutputStream, Class[])}. Supports null
* class arrays.
*
* @param in
* {@link ObjectInputStream} to read from.
* @return Class array with the class references or null.
* @throws ClassNotFoundException
* If one of the classes could not be resolved.
* @throws IOException
* Rethrows IOExceptions from the ObjectInputStream
*/
public static Class<?>[] readClassArray(ObjectInputStream in)
throws ClassNotFoundException, IOException {
String[] classNames = (String[]) in.readObject();
if (classNames == null) {
return null;
}
Class<?>[] classes = new Class<?>[classNames.length];
for (int i = 0; i < classNames.length; i++) {
classes[i] = resolveClass(classNames[i]);
}
return classes;
} | 3.68 |
framework_AbstractInMemoryContainer_removeAllFilters | /**
* Remove all container filters for all properties and re-filter the view.
*
* This can be used to implement
* {@link Filterable#removeAllContainerFilters()}.
*/
protected void removeAllFilters() {
if (getFilters().isEmpty()) {
return;
}
getFilters().clear();
filterAll();
} | 3.68 |
hudi_Option_orElse | /**
* Identical to {@code Optional.orElse}
*/
public T orElse(T other) {
return val != null ? val : other;
} | 3.68 |
pulsar_AuthorizationService_allowTopicPolicyOperation | /**
* @deprecated - will be removed after 2.12. Use async variant.
*/
@Deprecated
public Boolean allowTopicPolicyOperation(TopicName topicName,
PolicyName policy,
PolicyOperation operation,
String originalRole,
String role,
AuthenticationDataSource authData) throws Exception {
try {
return allowTopicPolicyOperationAsync(
topicName, policy, operation, originalRole, role, authData).get(
conf.getMetadataStoreOperationTimeoutSeconds(), SECONDS);
} catch (InterruptedException e) {
throw new RestException(e);
} catch (ExecutionException e) {
throw new RestException(e.getCause());
}
} | 3.68 |
pulsar_PortManager_releaseLockedPort | /**
* Returns whether the port was released successfully.
*
* @return whether the release is successful.
*/
public static synchronized boolean releaseLockedPort(int lockedPort) {
return PORTS.remove(lockedPort);
} | 3.68 |
druid_IPRange_computeNetworkPrefixFromMask | /**
* Compute the extended network prefix from the IP subnet mask.
*
* @param mask Reference to the subnet mask IP number.
* @return Return the extended network prefix. Return -1 if the specified mask cannot be converted into a extended
* prefix network.
*/
private int computeNetworkPrefixFromMask(IPAddress mask) {
int result = 0;
int tmp = mask.getIPAddress();
while ((tmp & 0x00000001) == 0x00000001) {
result++;
tmp = tmp >>> 1;
}
if (tmp != 0) {
return -1;
}
return result;
} | 3.68 |
flink_PythonOperatorUtils_setCurrentKeyForStreaming | /** Set the current key for streaming operator. */
public static <K> void setCurrentKeyForStreaming(
KeyedStateBackend<K> stateBackend, K currentKey) {
if (!inBatchExecutionMode(stateBackend)) {
stateBackend.setCurrentKey(currentKey);
}
} | 3.68 |
flink_DataSet_count | /**
* Convenience method to get the count (number of elements) of a DataSet.
*
* @return A long integer that represents the number of elements in the data set.
*/
public long count() throws Exception {
final String id = new AbstractID().toString();
output(new Utils.CountHelper<T>(id)).name("count()");
JobExecutionResult res = getExecutionEnvironment().execute();
return res.<Long>getAccumulatorResult(id);
} | 3.68 |
morf_SqlUtils_truncate | /**
* Constructs a Truncate Statement.
*
* <p>Usage is discouraged; this method will be deprecated at some point. Use
* {@link TruncateStatement#truncate(TableReference)} for preference.</p>
*
* @param table The table to truncate.
* @return The statement.
*/
public static TruncateStatement truncate(TableReference table) {
return new TruncateStatement(table);
} | 3.68 |
flink_MemorySegment_putFloatLittleEndian | /**
* Writes the given single-precision float value (32bit, 4 bytes) to the given position in
* little endian byte order. This method's speed depends on the system's native byte order, and
* it is possibly slower than {@link #putFloat(int, float)}. For most cases (such as transient
* storage in memory or serialization for I/O and network), it suffices to know that the byte
* order in which the value is written is the same as the one in which it is read, and {@link
* #putFloat(int, float)} is the preferable choice.
*
* @param index The position at which the value will be written.
* @param value The long value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 4.
*/
public void putFloatLittleEndian(int index, float value) {
putIntLittleEndian(index, Float.floatToRawIntBits(value));
} | 3.68 |
hadoop_LocalResolver_chooseFirstNamespace | /**
* Get the local name space. This relies on the RPC Server to get the address
* from the client.
*
* TODO we only support DN and NN locations, we need to add others like
* Resource Managers.
*
* @param path Path ignored by this policy.
* @param loc Federated location with multiple destinations.
* @return Local name space. Null if we don't know about this machine.
*/
@Override
protected String chooseFirstNamespace(String path, PathLocation loc) {
String localSubcluster = null;
String clientAddr = getClientAddr();
Map<String, String> subclusterInfo = getSubclusterMapping();
if (subclusterInfo != null) {
localSubcluster = subclusterInfo.get(clientAddr);
if (localSubcluster != null) {
LOG.debug("Local namespace for {} is {}", clientAddr, localSubcluster);
} else {
LOG.error("Cannot get local namespace for {}", clientAddr);
}
} else {
LOG.error("Cannot get node mapping when resolving {} at {} from {}",
path, loc, clientAddr);
}
return localSubcluster;
} | 3.68 |
rocketmq-connect_Deserializer_deserialize | /**
* Deserialize a record value from a byte array into a value or object.
*/
default T deserialize(String topic, KeyValue extensions, byte[] data) {
return deserialize(topic, data);
} | 3.68 |
dubbo_FileCacheStore_getCacheFilePath | /**
* for unit test only
*/
@Deprecated
protected String getCacheFilePath() {
return cacheFilePath;
} | 3.68 |
flink_StreamProjection_projectTuple18 | /**
* Projects a {@link Tuple} {@link DataStream} to the previously selected fields.
*
* @return The projected DataStream.
* @see Tuple
* @see DataStream
*/
public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>
SingleOutputStreamOperator<
Tuple18<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17>>
projectTuple18() {
TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType());
TupleTypeInfo<
Tuple18<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17>>
tType =
new TupleTypeInfo<
Tuple18<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17>>(fTypes);
return dataStream.transform(
"Projection",
tType,
new StreamProject<
IN,
Tuple18<
T0,
T1,
T2,
T3,
T4,
T5,
T6,
T7,
T8,
T9,
T10,
T11,
T12,
T13,
T14,
T15,
T16,
T17>>(
fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig())));
} | 3.68 |
flink_NettyMessage_allocateBuffer | /**
* Allocates a new buffer and adds some header information for the frame decoder.
*
* <p>If the <tt>contentLength</tt> is unknown, you must write the actual length after adding
* the contents as an integer to position <tt>0</tt>!
*
* @param allocator byte buffer allocator to use
* @param id {@link NettyMessage} subclass ID
* @param messageHeaderLength additional header length that should be part of the allocated
* buffer and is written outside of this method
* @param contentLength content length (or <tt>-1</tt> if unknown)
* @param allocateForContent whether to make room for the actual content in the buffer
* (<tt>true</tt>) or whether to only return a buffer with the header information
* (<tt>false</tt>)
* @return a newly allocated direct buffer with header data written for {@link
* NettyMessageEncoder}
*/
private static ByteBuf allocateBuffer(
ByteBufAllocator allocator,
byte id,
int messageHeaderLength,
int contentLength,
boolean allocateForContent) {
checkArgument(contentLength <= Integer.MAX_VALUE - FRAME_HEADER_LENGTH);
final ByteBuf buffer;
if (!allocateForContent) {
buffer = allocator.directBuffer(FRAME_HEADER_LENGTH + messageHeaderLength);
} else if (contentLength != -1) {
buffer =
allocator.directBuffer(
FRAME_HEADER_LENGTH + messageHeaderLength + contentLength);
} else {
// content length unknown -> start with the default initial size (rather than
// FRAME_HEADER_LENGTH only):
buffer = allocator.directBuffer();
}
buffer.writeInt(
FRAME_HEADER_LENGTH
+ messageHeaderLength
+ contentLength); // may be updated later, e.g. if contentLength == -1
buffer.writeInt(MAGIC_NUMBER);
buffer.writeByte(id);
return buffer;
} | 3.68 |
flink_SqlTimestampParser_parseField | /**
* Static utility to parse a field of type Timestamp from a byte sequence that represents text
* characters (such as when read from a file stream).
*
* @param bytes The bytes containing the text data that should be parsed.
* @param startPos The offset to start the parsing.
* @param length The length of the byte sequence (counting from the offset).
* @param delimiter The delimiter that terminates the field.
* @return The parsed value.
* @throws IllegalArgumentException Thrown when the value cannot be parsed because the text
* represents not a correct number.
*/
public static final Timestamp parseField(
byte[] bytes, int startPos, int length, char delimiter) {
final int limitedLen = nextStringLength(bytes, startPos, length, delimiter);
if (limitedLen > 0
&& (Character.isWhitespace(bytes[startPos])
|| Character.isWhitespace(bytes[startPos + limitedLen - 1]))) {
throw new NumberFormatException(
"There is leading or trailing whitespace in the numeric field.");
}
final String str = new String(bytes, startPos, limitedLen, ConfigConstants.DEFAULT_CHARSET);
return Timestamp.valueOf(str);
} | 3.68 |
hudi_FlinkInMemoryStateIndex_isImplicitWithStorage | /**
* Index needs to be explicitly updated after storage write.
*/
@Override
public boolean isImplicitWithStorage() {
return true;
} | 3.68 |
hbase_MasterObserver_postHasUserPermissions | /**
* Called after checking if user has permissions.
* @param ctx the coprocessor instance's environment
* @param userName the user name
* @param permissions the permission list
*/
default void postHasUserPermissions(ObserverContext<MasterCoprocessorEnvironment> ctx,
String userName, List<Permission> permissions) throws IOException {
} | 3.68 |
framework_Page_getCurrent | /**
* Gets the Page to which the current uI belongs. This is automatically
* defined when processing requests to the server. In other cases, (e.g.
* from background threads), the current uI is not automatically defined.
*
* @see UI#getCurrent()
*
* @return the current page instance if available, otherwise
* <code>null</code>
*/
public static Page getCurrent() {
UI currentUI = UI.getCurrent();
if (currentUI == null) {
return null;
}
return currentUI.getPage();
} | 3.68 |
framework_Tree_accept | /*
* Uses enhanced server side check
*/
@Override
public boolean accept(DragAndDropEvent dragEvent) {
try {
// must be over tree node and in the middle of it (not top or
// bottom
// part)
TreeTargetDetails eventDetails = (TreeTargetDetails) dragEvent
.getTargetDetails();
Object itemIdOver = eventDetails.getItemIdOver();
if (!eventDetails.getTarget().areChildrenAllowed(itemIdOver)) {
return false;
}
// return true if directly over
return eventDetails
.getDropLocation() == VerticalDropLocation.MIDDLE;
} catch (Exception e) {
return false;
}
} | 3.68 |
hadoop_SystemErasureCodingPolicies_getReplicationPolicy | /**
* Get the special REPLICATION policy.
*/
public static ErasureCodingPolicy getReplicationPolicy() {
return REPLICATION_POLICY;
} | 3.68 |
flink_CompositeTypeSerializerSnapshot_writeOuterSnapshot | /**
* Writes the outer snapshot, i.e. any information beyond the nested serializers of the outer
* serializer.
*
* <p>The base implementation of this methods writes nothing, i.e. it assumes that the outer
* serializer only has nested serializers and no extra information. Otherwise, if the outer
* serializer contains some extra information that needs to be persisted as part of the
* serializer snapshot, this must be overridden. Note that this method and the corresponding
* methods {@link #readOuterSnapshot(int, DataInputView, ClassLoader)}, {@link
* #resolveOuterSchemaCompatibility(TypeSerializer)} needs to be implemented.
*
* @param out the {@link DataOutputView} to write the outer snapshot to.
*/
protected void writeOuterSnapshot(DataOutputView out) throws IOException {} | 3.68 |
hbase_HFileReaderImpl_shouldUseHeap | /**
* Whether we use heap or not depends on our intent to cache the block. We want to avoid
* allocating to off-heap if we intend to cache into the on-heap L1 cache. Otherwise, it's more
* efficient to allocate to off-heap since we can control GC ourselves for those. So our decision
* here breaks down as follows: <br>
* If block cache is disabled, don't use heap. If we're not using the CombinedBlockCache, use heap
* unless caching is disabled for the request. Otherwise, only use heap if caching is enabled and
* the expected block type is not DATA (which goes to off-heap L2 in combined cache).
* @see org.apache.hadoop.hbase.io.hfile.HFileBlock.FSReader#readBlockData(long, long, boolean,
* boolean, boolean)
*/
private boolean shouldUseHeap(BlockType expectedBlockType, boolean cacheBlock) {
if (!cacheConf.getBlockCache().isPresent()) {
return false;
}
// we only cache a block if cacheBlock is true and caching-on-read is enabled in CacheConfig
// we can really only check for that if have an expectedBlockType
if (expectedBlockType != null) {
cacheBlock &= cacheConf.shouldCacheBlockOnRead(expectedBlockType.getCategory());
}
if (!cacheConf.isCombinedBlockCache()) {
// Block to cache in LruBlockCache must be an heap one, if caching enabled. So just allocate
// block memory from heap for saving an extra off-heap to heap copying in that case.
return cacheBlock;
}
return cacheBlock && expectedBlockType != null && !expectedBlockType.isData();
} | 3.68 |
morf_OracleDialect_buildSQLToStopTracing | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#buildSQLToStopTracing()
*/
@Override
public List<String> buildSQLToStopTracing() {
return Arrays.asList("ALTER SESSION SET EVENTS '10046 TRACE NAME CONTEXT OFF'");
} | 3.68 |
framework_DefaultFieldGroupFieldFactory_anySelect | /**
* @since 7.4
* @param fieldType
* the type of the field
* @return true if any AbstractSelect can be assigned to the field
*/
@SuppressWarnings("rawtypes")
protected boolean anySelect(Class<? extends Field> fieldType) {
return anyField(fieldType) || fieldType == AbstractSelect.class;
} | 3.68 |
hudi_FlinkConsistentBucketUpdateStrategy_patchFileIdToRecords | /**
* Rewrite the first record with given fileID
*/
private void patchFileIdToRecords(List<HoodieRecord> records, String fileId) {
HoodieRecord first = records.get(0);
HoodieRecord record = new HoodieAvroRecord<>(first.getKey(), (HoodieRecordPayload) first.getData(), first.getOperation());
HoodieRecordLocation newLoc = new HoodieRecordLocation("U", fileId);
record.setCurrentLocation(newLoc);
records.set(0, record);
} | 3.68 |
hbase_RestoreTool_checkAndCreateTable | /**
* Prepare the table for bulkload, most codes copied from {@code createTable} method in
* {@code BulkLoadHFilesTool}.
* @param conn connection
* @param targetTableName target table name
* @param regionDirList region directory list
* @param htd table descriptor
* @param truncateIfExists truncates table if exists
* @throws IOException exception
*/
private void checkAndCreateTable(Connection conn, TableName targetTableName,
ArrayList<Path> regionDirList, TableDescriptor htd, boolean truncateIfExists)
throws IOException {
try (Admin admin = conn.getAdmin()) {
boolean createNew = false;
if (admin.tableExists(targetTableName)) {
if (truncateIfExists) {
LOG.info(
"Truncating exising target table '" + targetTableName + "', preserving region splits");
admin.disableTable(targetTableName);
admin.truncateTable(targetTableName, true);
} else {
LOG.info("Using exising target table '" + targetTableName + "'");
}
} else {
createNew = true;
}
if (createNew) {
LOG.info("Creating target table '" + targetTableName + "'");
byte[][] keys = null;
try {
if (regionDirList == null || regionDirList.size() == 0) {
admin.createTable(htd);
} else {
keys = generateBoundaryKeys(regionDirList);
// create table using table descriptor and region boundaries
admin.createTable(htd, keys);
}
} catch (NamespaceNotFoundException e) {
LOG.warn("There was no namespace and the same will be created");
String namespaceAsString = targetTableName.getNamespaceAsString();
LOG.info("Creating target namespace '" + namespaceAsString + "'");
admin.createNamespace(NamespaceDescriptor.create(namespaceAsString).build());
if (null == keys) {
admin.createTable(htd);
} else {
admin.createTable(htd, keys);
}
}
}
long startTime = EnvironmentEdgeManager.currentTime();
while (!admin.isTableAvailable(targetTableName)) {
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
if (EnvironmentEdgeManager.currentTime() - startTime > TABLE_AVAILABILITY_WAIT_TIME) {
throw new IOException("Time out " + TABLE_AVAILABILITY_WAIT_TIME + "ms expired, table "
+ targetTableName + " is still not available");
}
}
}
} | 3.68 |
hadoop_HttpReferrerAuditHeader_build | /**
* Build.
* @return an HttpReferrerAuditHeader
*/
public HttpReferrerAuditHeader build() {
return new HttpReferrerAuditHeader(this);
} | 3.68 |
framework_ConnectorBundleLoader_notice | // Not using Vaadin notifications (#14597)
private void notice(String productName) {
if (notice == null) {
notice = new HTML();
notice.addClickHandler(event -> notice.removeFromParent());
notice.addTouchStartHandler(event -> notice.removeFromParent());
}
String msg = notice.getText().trim();
msg += msg.isEmpty() ? "Using Evaluation License of: " : ", ";
notice.setText(msg + productName);
RootPanel.get().add(notice);
notice.getElement().setClassName("");
Style s = notice.getElement().getStyle();
s.setPosition(Position.FIXED);
s.setTextAlign(TextAlign.CENTER);
s.setRight(0, Unit.PX);
s.setLeft(0, Unit.PX);
s.setBottom(0, Unit.PX);
s.setProperty("padding", "0.5em 1em");
s.setProperty("font-family", "sans-serif");
s.setFontSize(12, Unit.PX);
s.setLineHeight(1.1, Unit.EM);
s.setColor("white");
s.setBackgroundColor("black");
s.setOpacity(0.7);
s.setZIndex(2147483646);
s.setProperty("top", "auto");
s.setProperty("width", "auto");
s.setDisplay(Display.BLOCK);
s.setWhiteSpace(WhiteSpace.NORMAL);
s.setVisibility(Visibility.VISIBLE);
s.setMargin(0, Unit.PX);
} | 3.68 |
AreaShop_GithubUpdateCheck_getLatestVersion | /**
* Get the latest version.
* @return Latest version of the plugin (if checking is complete)
*/
public String getLatestVersion() {
return latestVersion;
} | 3.68 |
framework_ConnectorTracker_setWritingResponse | /**
* Sets the current response write status. Connectors can not be marked as
* dirty when the response is written.
* <p>
* This method has a side-effect of incrementing the sync id by one (see
* {@link #getCurrentSyncId()}), if {@link #isWritingResponse()} returns
* <code>true</code> and <code>writingResponse</code> is set to
* <code>false</code>.
*
* @param writingResponse
* the new response status.
*
* @see #markDirty(ClientConnector)
* @see #isWritingResponse()
* @see #getCurrentSyncId()
*
* @throws IllegalArgumentException
* if the new response status is the same as the previous value.
* This is done to help detecting problems caused by missed
* invocations of this method.
*/
public void setWritingResponse(boolean writingResponse) {
if (this.writingResponse == writingResponse) {
throw new IllegalArgumentException(
"The old value is same as the new value");
}
/*
* the right hand side of the && is unnecessary here because of the
* if-clause above, but rigorous coding is always rigorous coding.
*/
if (!writingResponse && this.writingResponse) {
// Bump sync id when done writing - the client is not expected to
// know about anything happening after this moment.
currentSyncId++;
}
this.writingResponse = writingResponse;
} | 3.68 |
hadoop_Configured_getConf | // inherit javadoc
@Override
public Configuration getConf() {
return conf;
} | 3.68 |
hadoop_ConfigurationWithLogging_getBoolean | /**
* See {@link Configuration#getBoolean(String, boolean)}.
*/
@Override
public boolean getBoolean(String name, boolean defaultValue) {
boolean value = super.getBoolean(name, defaultValue);
log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
return value;
} | 3.68 |
flink_MultiShotLatch_trigger | /** Fires the latch. Code that is blocked on {@link #await()} will now return. */
public void trigger() {
synchronized (lock) {
triggered = true;
lock.notifyAll();
}
} | 3.68 |
flink_OneInputTransformation_getStateKeySelector | /**
* Returns the {@code KeySelector} that must be used for partitioning keyed state in this
* Operation.
*
* @see #setStateKeySelector
*/
public KeySelector<IN, ?> getStateKeySelector() {
return stateKeySelector;
} | 3.68 |
shardingsphere-elasticjob_JobConfiguration_timeZone | /**
* time zone.
*
* @param timeZone the time zone
* @return job configuration builder
*/
public Builder timeZone(final String timeZone) {
if (null != timeZone) {
this.timeZone = timeZone;
}
return this;
} | 3.68 |
pulsar_TopicEventsDispatcher_removeTopicEventListener | /**
* Removes listeners.
* @param listeners
*/
public void removeTopicEventListener(TopicEventsListener... listeners) {
Objects.requireNonNull(listeners);
Arrays.stream(listeners)
.filter(x -> x != null)
.forEach(topicEventListeners::remove);
} | 3.68 |
shardingsphere-elasticjob_GuaranteeService_executeInLeaderForLastStarted | /**
* Invoke doBeforeJobExecutedAtLastStarted method once after last started.
*
* @param listener AbstractDistributeOnceElasticJobListener instance
* @param shardingContexts sharding contexts
*/
public void executeInLeaderForLastStarted(final AbstractDistributeOnceElasticJobListener listener,
final ShardingContexts shardingContexts) {
jobNodeStorage.executeInLeader(GuaranteeNode.STARTED_LATCH_ROOT,
new LeaderExecutionCallbackForLastStarted(listener, shardingContexts));
} | 3.68 |
hadoop_BondedS3AStatisticsContext_getInstanceStatistics | /**
* The filesystem statistics: know this is thread-local.
* @return FS statistics.
*/
private FileSystem.Statistics getInstanceStatistics() {
return statisticsSource.getInstanceStatistics();
} | 3.68 |
flink_HiveParserUtils_projectNonColumnEquiConditions | /**
* Push any equi join conditions that are not column references as Projections on top of the
* children.
*/
public static RexNode projectNonColumnEquiConditions(
RelFactories.ProjectFactory factory,
RelNode[] inputRels,
List<RexNode> leftJoinKeys,
List<RexNode> rightJoinKeys,
int systemColCount,
List<Integer> leftKeys,
List<Integer> rightKeys) {
RelNode leftRel = inputRels[0];
RelNode rightRel = inputRels[1];
RexBuilder rexBuilder = leftRel.getCluster().getRexBuilder();
RexNode outJoinCond = null;
int origLeftInputSize = leftRel.getRowType().getFieldCount();
int origRightInputSize = rightRel.getRowType().getFieldCount();
List<RexNode> newLeftFields = new ArrayList<>();
List<String> newLeftFieldNames = new ArrayList<>();
List<RexNode> newRightFields = new ArrayList<>();
List<String> newRightFieldNames = new ArrayList<>();
int leftKeyCount = leftJoinKeys.size();
int i;
for (i = 0; i < origLeftInputSize; i++) {
final RelDataTypeField field = leftRel.getRowType().getFieldList().get(i);
newLeftFields.add(rexBuilder.makeInputRef(field.getType(), i));
newLeftFieldNames.add(field.getName());
}
for (i = 0; i < origRightInputSize; i++) {
final RelDataTypeField field = rightRel.getRowType().getFieldList().get(i);
newRightFields.add(rexBuilder.makeInputRef(field.getType(), i));
newRightFieldNames.add(field.getName());
}
ImmutableBitSet.Builder origColEqCondsPosBuilder = ImmutableBitSet.builder();
int newKeyCount = 0;
List<Pair<Integer, Integer>> origColEqConds = new ArrayList<>();
for (i = 0; i < leftKeyCount; i++) {
RexNode leftKey = leftJoinKeys.get(i);
RexNode rightKey = rightJoinKeys.get(i);
if (leftKey instanceof RexInputRef && rightKey instanceof RexInputRef) {
origColEqConds.add(
Pair.of(
((RexInputRef) leftKey).getIndex(),
((RexInputRef) rightKey).getIndex()));
origColEqCondsPosBuilder.set(i);
} else {
newLeftFields.add(leftKey);
newLeftFieldNames.add(null);
newRightFields.add(rightKey);
newRightFieldNames.add(null);
newKeyCount++;
}
}
ImmutableBitSet origColEqCondsPos = origColEqCondsPosBuilder.build();
for (i = 0; i < origColEqConds.size(); i++) {
Pair<Integer, Integer> p = origColEqConds.get(i);
int condPos = origColEqCondsPos.nth(i);
RexNode leftKey = leftJoinKeys.get(condPos);
RexNode rightKey = rightJoinKeys.get(condPos);
leftKeys.add(p.left);
rightKeys.add(p.right);
RexNode cond =
rexBuilder.makeCall(
SqlStdOperatorTable.EQUALS,
rexBuilder.makeInputRef(leftKey.getType(), systemColCount + p.left),
rexBuilder.makeInputRef(
rightKey.getType(),
systemColCount + origLeftInputSize + newKeyCount + p.right));
if (outJoinCond == null) {
outJoinCond = cond;
} else {
outJoinCond = rexBuilder.makeCall(SqlStdOperatorTable.AND, outJoinCond, cond);
}
}
if (newKeyCount == 0) {
return outJoinCond;
}
int newLeftOffset = systemColCount + origLeftInputSize;
int newRightOffset = systemColCount + origLeftInputSize + origRightInputSize + newKeyCount;
for (i = 0; i < newKeyCount; i++) {
leftKeys.add(origLeftInputSize + i);
rightKeys.add(origRightInputSize + i);
RexNode cond =
rexBuilder.makeCall(
SqlStdOperatorTable.EQUALS,
rexBuilder.makeInputRef(
newLeftFields.get(origLeftInputSize + i).getType(),
newLeftOffset + i),
rexBuilder.makeInputRef(
newRightFields.get(origRightInputSize + i).getType(),
newRightOffset + i));
if (outJoinCond == null) {
outJoinCond = cond;
} else {
outJoinCond = rexBuilder.makeCall(SqlStdOperatorTable.AND, outJoinCond, cond);
}
}
// added project if need to produce new keys than the original input fields
if (newKeyCount > 0) {
leftRel =
factory.createProject(
leftRel,
Collections.emptyList(),
newLeftFields,
SqlValidatorUtil.uniquify(newLeftFieldNames, false));
rightRel =
factory.createProject(
rightRel,
Collections.emptyList(),
newRightFields,
SqlValidatorUtil.uniquify(newRightFieldNames, false));
}
inputRels[0] = leftRel;
inputRels[1] = rightRel;
return outJoinCond;
} | 3.68 |
dubbo_ReferenceBean_getObject | /**
* Create bean instance.
*
* <p></p>
* Why we need a lazy proxy?
*
* <p/>
* When Spring searches beans by type, if Spring cannot determine the type of a factory bean, it may try to initialize it.
* The ReferenceBean is also a FactoryBean.
* <br/>
* (This has already been resolved by decorating the BeanDefinition: {@link DubboBeanDefinitionParser#configReferenceBean})
*
* <p/>
* In addition, if some ReferenceBeans are dependent on beans that are initialized very early,
* and dubbo config beans are not ready yet, there will be many unexpected problems if initializing the dubbo reference immediately.
*
* <p/>
* When it is initialized, only a lazy proxy object will be created,
* and dubbo reference-related resources will not be initialized.
* <br/>
* In this way, the influence of Spring is eliminated, and the dubbo configuration initialization is controllable.
*
*
* @see DubboConfigBeanInitializer
* @see ReferenceBeanManager#initReferenceBean(ReferenceBean)
* @see DubboBeanDefinitionParser#configReferenceBean
*/
@Override
public T getObject() {
if (lazyProxy == null) {
createLazyProxy();
}
return (T) lazyProxy;
} | 3.68 |
hbase_TaskMonitor_get | /**
* Get singleton instance. TODO this would be better off scoped to a single daemon
*/
public static synchronized TaskMonitor get() {
if (instance == null) {
instance = new TaskMonitor(HBaseConfiguration.create());
}
return instance;
} | 3.68 |
hbase_BucketAllocator_freeBlock | /**
* Free a block with the offset
* @param offset block's offset
* @return size freed
*/
public synchronized int freeBlock(long offset, int length) {
int bucketNo = (int) (offset / bucketCapacity);
assert bucketNo >= 0 && bucketNo < buckets.length;
Bucket targetBucket = buckets[bucketNo];
bucketSizeInfos[targetBucket.sizeIndex()].freeBlock(targetBucket, offset, length);
usedSize -= targetBucket.getItemAllocationSize();
return targetBucket.getItemAllocationSize();
} | 3.68 |
flink_CheckpointConfig_setCheckpointIdOfIgnoredInFlightData | /**
* Setup the checkpoint id for which the in-flight data will be ignored for all operators in
* case of the recovery from this checkpoint.
*
* @param checkpointIdOfIgnoredInFlightData Checkpoint id for which in-flight data should be
* ignored.
* @see #setCheckpointIdOfIgnoredInFlightData
*/
@PublicEvolving
public void setCheckpointIdOfIgnoredInFlightData(long checkpointIdOfIgnoredInFlightData) {
configuration.set(
ExecutionCheckpointingOptions.CHECKPOINT_ID_OF_IGNORED_IN_FLIGHT_DATA,
checkpointIdOfIgnoredInFlightData);
} | 3.68 |
flink_TimeUtils_parseDuration | /**
* Parse the given string to a java {@link Duration}. The string is in format "{length
* value}{time unit label}", e.g. "123ms", "321 s". If no time unit label is specified, it will
* be considered as milliseconds.
*
* <p>Supported time unit labels are:
*
* <ul>
* <li>DAYS: "d", "day"
* <li>HOURS: "h", "hour"
* <li>MINUTES: "m", "min", "minute"
* <li>SECONDS: "s", "sec", "second"
* <li>MILLISECONDS: "ms", "milli", "millisecond"
* <li>MICROSECONDS: "µs", "micro", "microsecond"
* <li>NANOSECONDS: "ns", "nano", "nanosecond"
* </ul>
*
* @param text string to parse.
*/
public static Duration parseDuration(String text) {
checkNotNull(text);
final String trimmed = text.trim();
checkArgument(!trimmed.isEmpty(), "argument is an empty- or whitespace-only string");
final int len = trimmed.length();
int pos = 0;
char current;
while (pos < len && (current = trimmed.charAt(pos)) >= '0' && current <= '9') {
pos++;
}
final String number = trimmed.substring(0, pos);
final String unitLabel = trimmed.substring(pos).trim().toLowerCase(Locale.US);
if (number.isEmpty()) {
throw new NumberFormatException("text does not start with a number");
}
final long value;
try {
value = Long.parseLong(number); // this throws a NumberFormatException on overflow
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
"The value '"
+ number
+ "' cannot be re represented as 64bit number (numeric overflow).");
}
if (unitLabel.isEmpty()) {
return Duration.of(value, ChronoUnit.MILLIS);
}
ChronoUnit unit = LABEL_TO_UNIT_MAP.get(unitLabel);
if (unit != null) {
return Duration.of(value, unit);
} else {
throw new IllegalArgumentException(
"Time interval unit label '"
+ unitLabel
+ "' does not match any of the recognized units: "
+ TimeUnit.getAllUnits());
}
} | 3.68 |
flink_HiveParserCalcitePlanner_genJoinLogicalPlan | // Generate Join Logical Plan Relnode by walking through the join AST.
private RelNode genJoinLogicalPlan(
HiveParserASTNode joinParseTree, Map<String, RelNode> aliasToRel)
throws SemanticException {
RelNode leftRel = null;
RelNode rightRel = null;
JoinType hiveJoinType;
if (joinParseTree.getToken().getType() == HiveASTParser.TOK_UNIQUEJOIN) {
String msg =
"UNIQUE JOIN is currently not supported in CBO, turn off cbo to use UNIQUE JOIN.";
throw new SemanticException(msg);
}
// 1. Determine Join Type
switch (joinParseTree.getToken().getType()) {
case HiveASTParser.TOK_LEFTOUTERJOIN:
hiveJoinType = JoinType.LEFTOUTER;
break;
case HiveASTParser.TOK_RIGHTOUTERJOIN:
hiveJoinType = JoinType.RIGHTOUTER;
break;
case HiveASTParser.TOK_FULLOUTERJOIN:
hiveJoinType = JoinType.FULLOUTER;
break;
case HiveASTParser.TOK_LEFTSEMIJOIN:
hiveJoinType = JoinType.LEFTSEMI;
break;
default:
hiveJoinType = JoinType.INNER;
break;
}
// 2. Get Left Table Alias
HiveParserASTNode left = (HiveParserASTNode) joinParseTree.getChild(0);
String leftTableAlias = null;
if (left.getToken().getType() == HiveASTParser.TOK_TABREF
|| (left.getToken().getType() == HiveASTParser.TOK_SUBQUERY)
|| (left.getToken().getType() == HiveASTParser.TOK_PTBLFUNCTION)) {
String tableName =
HiveParserBaseSemanticAnalyzer.getUnescapedUnqualifiedTableName(
(HiveParserASTNode) left.getChild(0))
.toLowerCase();
leftTableAlias =
left.getChildCount() == 1
? tableName
: unescapeIdentifier(
left.getChild(left.getChildCount() - 1)
.getText()
.toLowerCase());
leftTableAlias =
left.getToken().getType() == HiveASTParser.TOK_PTBLFUNCTION
? unescapeIdentifier(left.getChild(1).getText().toLowerCase())
: leftTableAlias;
leftRel = aliasToRel.get(leftTableAlias);
} else if (HiveParserUtils.isJoinToken(left)) {
leftRel = genJoinLogicalPlan(left, aliasToRel);
} else {
assert (false);
}
// 3. Get Right Table Alias
HiveParserASTNode right = (HiveParserASTNode) joinParseTree.getChild(1);
String rightTableAlias = null;
if (right.getToken().getType() == HiveASTParser.TOK_TABREF
|| right.getToken().getType() == HiveASTParser.TOK_SUBQUERY
|| right.getToken().getType() == HiveASTParser.TOK_PTBLFUNCTION) {
String tableName =
HiveParserBaseSemanticAnalyzer.getUnescapedUnqualifiedTableName(
(HiveParserASTNode) right.getChild(0))
.toLowerCase();
rightTableAlias =
right.getChildCount() == 1
? tableName
: unescapeIdentifier(
right.getChild(right.getChildCount() - 1)
.getText()
.toLowerCase());
rightTableAlias =
right.getToken().getType() == HiveASTParser.TOK_PTBLFUNCTION
? unescapeIdentifier(right.getChild(1).getText().toLowerCase())
: rightTableAlias;
rightRel = aliasToRel.get(rightTableAlias);
} else {
assert (false);
}
// 4. Get Join Condn
HiveParserASTNode joinCond = (HiveParserASTNode) joinParseTree.getChild(2);
// 5. Create Join rel
return genJoinRelNode(
leftRel, leftTableAlias, rightRel, rightTableAlias, hiveJoinType, joinCond);
} | 3.68 |
morf_ColumnTypeBean_getWidth | /**
* @return the width
*/
@Override
public int getWidth() {
return width;
} | 3.68 |
hbase_HBaseFsckRepair_closeRegionSilentlyAndWait | /**
* Contacts a region server and waits up to hbase.hbck.close.timeout ms (default 120s) to close
* the region. This bypasses the active hmaster.
*/
public static void closeRegionSilentlyAndWait(Connection connection, ServerName server,
RegionInfo region) throws IOException, InterruptedException {
long timeout = connection.getConfiguration().getLong("hbase.hbck.close.timeout", 120000);
// this is a bit ugly but it is only used in the old hbck and tests, so I think it is fine.
try (AsyncClusterConnection asyncConn = ClusterConnectionFactory
.createAsyncClusterConnection(connection.getConfiguration(), null, User.getCurrent())) {
ServerManager.closeRegionSilentlyAndWait(asyncConn, server, region, timeout);
}
} | 3.68 |
druid_DruidPooledConnection_getVariables | /**
* @since 1.0.28
*/
public Map<String, Object> getVariables() {
return this.holder.variables;
} | 3.68 |
hbase_SpaceLimitSettings_validateSizeLimit | // Helper function to validate sizeLimit
private void validateSizeLimit(long sizeLimit) {
if (sizeLimit < 0L) {
throw new IllegalArgumentException("Size limit must be a non-negative value.");
}
} | 3.68 |
framework_GenericFontIcon_getMIMEType | /*
* (non-Javadoc)
*
* @see com.vaadin.server.Resource#getMIMEType()
*/
@Override
public String getMIMEType() {
throw new UnsupportedOperationException(FontIcon.class.getSimpleName()
+ " should not be used where a MIME type is needed.");
} | 3.68 |
flink_BlockInfo_getRecordCount | /**
* Returns the recordCount.
*
* @return the recordCount
*/
public long getRecordCount() {
return this.recordCount;
} | 3.68 |
framework_VCalendar_isEventMoveAllowed | /**
* Is moving an event allowed.
*/
public boolean isEventMoveAllowed() {
return eventMoveAllowed;
} | 3.68 |
hudi_HoodieTableFactory_setupSortOptions | /**
* Sets up the table exec sort options.
*/
private void setupSortOptions(Configuration conf, ReadableConfig contextConfig) {
if (contextConfig.getOptional(TABLE_EXEC_SORT_MAX_NUM_FILE_HANDLES).isPresent()) {
conf.set(TABLE_EXEC_SORT_MAX_NUM_FILE_HANDLES,
contextConfig.get(TABLE_EXEC_SORT_MAX_NUM_FILE_HANDLES));
}
if (contextConfig.getOptional(TABLE_EXEC_SPILL_COMPRESSION_ENABLED).isPresent()) {
conf.set(TABLE_EXEC_SPILL_COMPRESSION_ENABLED,
contextConfig.get(TABLE_EXEC_SPILL_COMPRESSION_ENABLED));
}
if (contextConfig.getOptional(TABLE_EXEC_SPILL_COMPRESSION_BLOCK_SIZE).isPresent()) {
conf.set(TABLE_EXEC_SPILL_COMPRESSION_BLOCK_SIZE,
contextConfig.get(TABLE_EXEC_SPILL_COMPRESSION_BLOCK_SIZE));
}
if (contextConfig.getOptional(TABLE_EXEC_SORT_ASYNC_MERGE_ENABLED).isPresent()) {
conf.set(TABLE_EXEC_SORT_ASYNC_MERGE_ENABLED,
contextConfig.get(TABLE_EXEC_SORT_ASYNC_MERGE_ENABLED));
}
} | 3.68 |
flink_FlinkContainersSettings_fullConfiguration | /**
* Sets the {@code flinkConfiguration} value to {@code config} and returns a reference to
* this Builder enabling method chaining.
*
* @param <T> the type parameter
* @param config The {@code config} to set.
* @return A reference to this Builder.
*/
public <T> Builder fullConfiguration(Configuration config) {
this.flinkConfiguration = config;
return this;
} | 3.68 |
hadoop_ContentCounts_getSnapshotableDirectoryCount | // Get the number of snapshottable directories.
public long getSnapshotableDirectoryCount() {
return contents.get(Content.SNAPSHOTTABLE_DIRECTORY);
} | 3.68 |
framework_VaadinPortletResponse_getPortletResponse | /**
* Gets the original, unwrapped portlet response.
*
* @return the unwrapped portlet response
*/
public PortletResponse getPortletResponse() {
return response;
} | 3.68 |
querydsl_DateTimeExpression_dayOfMonth | /**
* Create a day of month expression (range 1-31)
*
* @return day of month
*/
public NumberExpression<Integer> dayOfMonth() {
if (dayOfMonth == null) {
dayOfMonth = Expressions.numberOperation(Integer.class, Ops.DateTimeOps.DAY_OF_MONTH, mixin);
}
return dayOfMonth;
} | 3.68 |
flink_DynamicSourceUtils_convertDataStreamToRel | /**
* Converts a given {@link DataStream} to a {@link RelNode}. It adds helper projections if
* necessary.
*/
public static RelNode convertDataStreamToRel(
boolean isBatchMode,
ReadableConfig config,
FlinkRelBuilder relBuilder,
ContextResolvedTable contextResolvedTable,
DataStream<?> dataStream,
DataType physicalDataType,
boolean isTopLevelRecord,
ChangelogMode changelogMode) {
final DynamicTableSource tableSource =
new ExternalDynamicSource<>(
contextResolvedTable.getIdentifier(),
dataStream,
physicalDataType,
isTopLevelRecord,
changelogMode);
final FlinkStatistic statistic =
FlinkStatistic.unknown(contextResolvedTable.getResolvedSchema()).build();
return convertSourceToRel(
isBatchMode,
config,
relBuilder,
contextResolvedTable,
statistic,
Collections.emptyList(),
tableSource);
} | 3.68 |
flink_TypeSerializerSnapshotSerializationUtil_write | /**
* Binary format layout of a written serializer snapshot is as follows:
*
* <ul>
* <li>1. Format version of this util.
* <li>2. Name of the TypeSerializerSnapshot class.
* <li>3. The version of the TypeSerializerSnapshot's binary format.
* <li>4. The actual serializer snapshot data.
* </ul>
*/
@SuppressWarnings("deprecation")
@Override
public void write(DataOutputView out) throws IOException {
// write the format version of this utils format
super.write(out);
TypeSerializerSnapshot.writeVersionedSnapshot(out, serializerSnapshot);
} | 3.68 |
hudi_TimelineUtils_handleHollowCommitIfNeeded | /**
* Handles hollow commit as per {@link HoodieCommonConfig#INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT}
* and return filtered or non-filtered timeline for incremental query to run against.
*/
public static HoodieTimeline handleHollowCommitIfNeeded(HoodieTimeline completedCommitTimeline,
HoodieTableMetaClient metaClient, HollowCommitHandling handlingMode) {
if (handlingMode == HollowCommitHandling.USE_TRANSITION_TIME) {
return completedCommitTimeline;
}
Option<HoodieInstant> firstIncompleteCommit = metaClient.getCommitsTimeline()
.filterInflightsAndRequested()
.filter(instant ->
!HoodieTimeline.REPLACE_COMMIT_ACTION.equals(instant.getAction())
|| !ClusteringUtils.getClusteringPlan(metaClient, instant).isPresent())
.firstInstant();
boolean noHollowCommit = firstIncompleteCommit
.map(i -> completedCommitTimeline.findInstantsAfter(i.getTimestamp()).empty())
.orElse(true);
if (noHollowCommit) {
return completedCommitTimeline;
}
String hollowCommitTimestamp = firstIncompleteCommit.get().getTimestamp();
switch (handlingMode) {
case FAIL:
throw new HoodieException(String.format(
"Found hollow commit: '%s'. Adjust config `%s` accordingly if to avoid throwing this exception.",
hollowCommitTimestamp, INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT.key()));
case BLOCK:
LOG.warn(String.format(
"Found hollow commit '%s'. Config `%s` was set to `%s`: no data will be returned beyond '%s' until it's completed.",
hollowCommitTimestamp, INCREMENTAL_READ_HANDLE_HOLLOW_COMMIT.key(), handlingMode, hollowCommitTimestamp));
return completedCommitTimeline.findInstantsBefore(hollowCommitTimestamp);
default:
throw new HoodieException("Unexpected handling mode: " + handlingMode);
}
} | 3.68 |
graphhopper_AngleCalc_calcOrientation | /**
* Return orientation of line relative to east.
* <p>
*
* @param exact If false the atan gets calculated faster, but it might contain small errors
* @return Orientation in interval -pi to +pi where 0 is east
*/
public double calcOrientation(double lat1, double lon1, double lat2, double lon2, boolean exact) {
double shrinkFactor = cos(toRadians((lat1 + lat2) / 2));
if (exact)
return Math.atan2(lat2 - lat1, shrinkFactor * (lon2 - lon1));
else
return atan2(lat2 - lat1, shrinkFactor * (lon2 - lon1));
} | 3.68 |
framework_DesignContext_setCustomAttribute | /**
* Sets a custom attribute not handled by the component. These attributes
* are directly written to the component tag.
*
* @since 7.7
* @param component
* the component to set the attribute for
* @param attribute
* the attribute to set
* @param value
* the value of the attribute
*/
public void setCustomAttribute(Component component, String attribute,
String value) {
Map<String, String> map = customAttributes.get(component);
if (map == null) {
map = new HashMap<>();
customAttributes.put(component, map);
}
map.put(attribute, value);
} | 3.68 |
hadoop_AbfsOutputStreamStatisticsImpl_blockAllocated | /**
* Increment the counter to indicate a block has been allocated.
*/
@Override
public void blockAllocated() {
blocksAllocated.incrementAndGet();
} | 3.68 |
hbase_RSGroupAdminClient_removeRSGroup | /**
* Removes RegionServer group associated with the given name.
*/
public void removeRSGroup(String name) throws IOException {
RemoveRSGroupRequest request = RemoveRSGroupRequest.newBuilder().setRSGroupName(name).build();
try {
stub.removeRSGroup(null, request);
} catch (ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
} | 3.68 |
framework_DesignContext_mapCaption | /**
* Creates a mapping between the given caption and the component. Returns
* true if caption was already mapped to some component.
*
* Note that unlike mapGlobalId, if some component already has the given
* caption, the caption is not cleared from the component. This allows
* non-unique captions. However, only one of the components corresponding to
* a given caption can be found using the map captionToComponent. Hence, any
* captions that are used to identify an object should be unique.
*
* @param caption
* The new caption of the component.
* @param component
* The component whose caption is to be set.
* @return true, if there already was a caption mapping from the string to
* some component.
*/
private boolean mapCaption(String caption, Component component) {
return captionToComponent.put(caption, component) != null;
} | 3.68 |
flink_FlinkContainersSettings_numSlotsPerTaskManager | /**
* Sets the {@code numSlotsPerTaskManager} and returns a reference to this Builder enabling
* method chaining. It also adds this property into the {@code flinkConfiguration} field.
*
* @param numSlotsPerTaskManager The {@code numSlotsPerTaskManager} to set.
* @return A reference to this Builder.
*/
public Builder numSlotsPerTaskManager(int numSlotsPerTaskManager) {
this.numSlotsPerTaskManager = numSlotsPerTaskManager;
return setConfigOption(TaskManagerOptions.NUM_TASK_SLOTS, numSlotsPerTaskManager);
} | 3.68 |
framework_DragHandle_addTo | /**
* Adds this drag handle to an HTML element.
*
* @param elem
* an element
*/
public void addTo(Element elem) {
removeFromParent();
parent = elem;
parent.appendChild(element);
} | 3.68 |
flink_FlinkContainersSettings_getCheckpointPath | /**
* Gets checkpoint path.
*
* @return The checkpoint path.
*/
public String getCheckpointPath() {
return checkpointPath;
} | 3.68 |
graphhopper_Path_getDistance | /**
* @return distance in meter
*/
public double getDistance() {
return distance;
} | 3.68 |
framework_Table_removeColumnReorderListener | /**
* Removes a column reorder listener from the Table.
*
* @param listener
* The listener to remove
*/
public void removeColumnReorderListener(ColumnReorderListener listener) {
removeListener(TableConstants.COLUMN_REORDER_EVENT_ID,
ColumnReorderEvent.class, listener);
} | 3.68 |
flink_JoinHintsResolver_matchIdentifier | /**
* Check whether the given hint option matches the table qualified names. For convenience, we
* follow a simple rule: the matching is successful if the option is the suffix of the table
* qualified names.
*/
private boolean matchIdentifier(String option, String tableIdentifier) {
String[] optionNames = option.split("\\.");
int optionNameLength = optionNames.length;
String[] tableNames = tableIdentifier.split("\\.");
int tableNameLength = tableNames.length;
for (int i = 0; i < Math.min(optionNameLength, tableNameLength); i++) {
String currOptionName = optionNames[optionNameLength - 1 - i];
String currTableName = tableNames[tableNameLength - 1 - i];
if (!currOptionName.equals(currTableName)) {
return false;
}
}
return true;
} | 3.68 |
zxing_URIParsedResult_isPossiblyMaliciousURI | /**
* @return true if the URI contains suspicious patterns that may suggest it intends to
* mislead the user about its true nature
* @deprecated see {@link URIResultParser#isPossiblyMaliciousURI(String)}
*/
@Deprecated
public boolean isPossiblyMaliciousURI() {
return URIResultParser.isPossiblyMaliciousURI(uri);
} | 3.68 |
hmily_Binder_of | /**
* Of binder.
*
* @param source the source
* @return the binder
*/
public static Binder of(final ConfigPropertySource source) {
return new Binder(source);
} | 3.68 |
flink_HadoopConfigLoader_getOrLoadHadoopConfig | /** get the loaded Hadoop config (or fall back to one loaded from the classpath). */
public org.apache.hadoop.conf.Configuration getOrLoadHadoopConfig() {
org.apache.hadoop.conf.Configuration hadoopConfig = this.hadoopConfig;
if (hadoopConfig == null) {
if (flinkConfig != null) {
hadoopConfig = mirrorCertainHadoopConfig(loadHadoopConfigFromFlink());
} else {
LOG.warn(
"Flink configuration is not set prior to loading this configuration."
+ " Cannot forward configuration keys from Flink configuration.");
hadoopConfig = new org.apache.hadoop.conf.Configuration();
}
}
this.hadoopConfig = hadoopConfig;
return hadoopConfig;
} | 3.68 |
flink_Router_removePathPattern | /** Removes the route specified by the path pattern. */
public void removePathPattern(String pathPattern) {
for (MethodlessRouter<T> router : routers.values()) {
router.removePathPattern(pathPattern);
}
anyMethodRouter.removePathPattern(pathPattern);
} | 3.68 |
hadoop_LocalityMulticastAMRMProxyPolicy_routeNodeRequestIfNeeded | /**
* When certain subcluster is too loaded, reroute Node requests going there.
*
* @param targetId current subClusterId where request is sent
* @param maxThreshold threshold for Pending count
* @param activeAndEnabledSCs list of active sc
* @return subClusterId target sc id
*/
protected SubClusterId routeNodeRequestIfNeeded(SubClusterId targetId,
int maxThreshold, Set<SubClusterId> activeAndEnabledSCs) {
// If targetId is not in the active and enabled SC list, reroute the traffic
if (activeAndEnabledSCs.contains(targetId)) {
int targetPendingCount = getSubClusterLoad(targetId);
if (targetPendingCount == -1 || targetPendingCount < maxThreshold) {
return targetId;
}
}
SubClusterId scId = chooseSubClusterIdForMaxLoadSC(targetId, maxThreshold, activeAndEnabledSCs);
return scId;
} | 3.68 |
hadoop_ServerWebApp_isSslEnabled | /**
*
*/
public boolean isSslEnabled() {
return Boolean.parseBoolean(
System.getProperty(getName() + SSL_ENABLED, "false"));
} | 3.68 |
hbase_ZkSplitLogWorkerCoordination_init | /**
* Override setter from {@link SplitLogWorkerCoordination}
*/
@Override
public void init(RegionServerServices server, Configuration conf, TaskExecutor splitExecutor,
SplitLogWorker worker) {
this.server = server;
this.worker = worker;
this.splitTaskExecutor = splitExecutor;
maxConcurrentTasks =
conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER);
reportPeriod = conf.getInt("hbase.splitlog.report.period",
conf.getInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT,
ZKSplitLogManagerCoordination.DEFAULT_TIMEOUT) / 3);
} | 3.68 |
framework_CheckBoxGroup_setItemDescriptionGenerator | /**
* Sets the description generator that is used for generating descriptions
* for items. Description is shown as a tooltip when hovering on
* corresponding element. If the generator returns {@code null}, no tooltip
* is shown.
*
*
* @param descriptionGenerator
* the item description generator to set, not {@code null}
*
* @since 8.2
*/
public void setItemDescriptionGenerator(
DescriptionGenerator<T> descriptionGenerator) {
Objects.requireNonNull(descriptionGenerator);
if (this.descriptionGenerator != descriptionGenerator) {
this.descriptionGenerator = descriptionGenerator;
getDataProvider().refreshAll();
}
} | 3.68 |
hudi_RDDConsistentBucketBulkInsertPartitioner_generateFileIdPfx | /**
* Initialize fileIdPfx for each data partition. Specifically, the following fields is constructed:
* - fileIdPfxList: the Nth element corresponds to the Nth data partition, indicating its fileIdPfx
* - partitionToFileIdPfxIdxMap (return value): (table partition) -> (fileIdPfx -> idx) mapping
* - doAppend: represents if the Nth data partition should use AppendHandler
*
* @param partitionToIdentifier Mapping from table partition to bucket identifier
*/
private Map<String, Map<String, Integer>> generateFileIdPfx(Map<String, ConsistentBucketIdentifier> partitionToIdentifier) {
Map<String, Map<String, Integer>> partitionToFileIdPfxIdxMap = ConsistentBucketIndexUtils.generatePartitionToFileIdPfxIdxMap(partitionToIdentifier);
int count = 0;
for (ConsistentBucketIdentifier identifier : partitionToIdentifier.values()) {
fileIdPfxList.addAll(identifier.getNodes().stream().map(ConsistentHashingNode::getFileIdPrefix).collect(Collectors.toList()));
Map<String, Integer> fileIdPfxToIdx = new HashMap();
for (ConsistentHashingNode node : identifier.getNodes()) {
fileIdPfxToIdx.put(node.getFileIdPrefix(), count++);
}
if (identifier.getMetadata().isFirstCreated()) {
// Create new file group when the hashing metadata is new (i.e., first write to the partition)
doAppend.addAll(Collections.nCopies(identifier.getNodes().size(), false));
} else {
// Child node requires generating a fresh new base file, rather than log file
doAppend.addAll(identifier.getNodes().stream().map(n -> n.getTag() == ConsistentHashingNode.NodeTag.NORMAL).collect(Collectors.toList()));
}
partitionToFileIdPfxIdxMap.put(identifier.getMetadata().getPartitionPath(), fileIdPfxToIdx);
}
ValidationUtils.checkState(fileIdPfxList.size() == partitionToIdentifier.values().stream().mapToInt(ConsistentBucketIdentifier::getNumBuckets).sum(),
"Error state after constructing fileId & idx mapping");
return partitionToFileIdPfxIdxMap;
} | 3.68 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.