name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
---|---|---|
hbase_ServerNonceManager_getMvccFromOperationContext | /**
* Return the write point of the previous succeed operation.
* @param group Nonce group.
* @param nonce Nonce.
* @return write point of the previous succeed operation.
*/
public long getMvccFromOperationContext(long group, long nonce) {
if (nonce == HConstants.NO_NONCE) {
return Long.MAX_VALUE;
}
NonceKey nk = new NonceKey(group, nonce);
OperationContext result = nonces.get(nk);
return result == null ? Long.MAX_VALUE : result.getMvcc();
} | 3.68 |
hbase_StorageClusterStatusModel_getStores | /** Returns the number of stores */
@XmlAttribute
public int getStores() {
return stores;
} | 3.68 |
hbase_ZKUtil_positionToByteArray | /**
* @param position the position to serialize
* @return Serialized protobuf of <code>position</code> with pb magic prefix prepended suitable
* for use as content of an wal position in a replication queue.
*/
public static byte[] positionToByteArray(final long position) {
byte[] bytes = ReplicationProtos.ReplicationHLogPosition.newBuilder().setPosition(position)
.build().toByteArray();
return ProtobufUtil.prependPBMagic(bytes);
} | 3.68 |
hbase_HMaster_decorateMasterConfiguration | /**
* This method modifies the master's configuration in order to inject replication-related features
*/
@InterfaceAudience.Private
public static void decorateMasterConfiguration(Configuration conf) {
String plugins = conf.get(HBASE_MASTER_LOGCLEANER_PLUGINS);
String cleanerClass = ReplicationLogCleaner.class.getCanonicalName();
if (plugins == null || !plugins.contains(cleanerClass)) {
conf.set(HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + cleanerClass);
}
if (ReplicationUtils.isReplicationForBulkLoadDataEnabled(conf)) {
plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
cleanerClass = ReplicationHFileCleaner.class.getCanonicalName();
if (!plugins.contains(cleanerClass)) {
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, plugins + "," + cleanerClass);
}
}
} | 3.68 |
hbase_ReplicationSyncUp_listRegionServers | // Find region servers under wal directory
// Here we only care about the region servers which may still be alive, as we need to add
// replications for them if missing. The dead region servers which have already been processed
// fully do not need to add their replication queues again, as the operation has already been done
// in SCP.
private Set<ServerName> listRegionServers(FileSystem walFs, Path walDir) throws IOException {
FileStatus[] statuses;
try {
statuses = walFs.listStatus(walDir);
} catch (FileNotFoundException e) {
System.out.println("WAL directory " + walDir + " does not exists, ignore");
return Collections.emptySet();
}
Set<ServerName> regionServers = new HashSet<>();
for (FileStatus status : statuses) {
// All wal files under the walDir is within its region server's directory
if (!status.isDirectory()) {
continue;
}
ServerName sn = AbstractFSWALProvider.getServerNameFromWALDirectoryName(status.getPath());
if (sn != null) {
regionServers.add(sn);
}
}
return regionServers;
} | 3.68 |
Activiti_SimpleContext_setVariable | /**
* Define a variable.
*/
public ValueExpression setVariable(
String name,
ValueExpression expression
) {
if (variables == null) {
variables = new Variables();
}
return variables.setVariable(name, expression);
} | 3.68 |
hbase_UserProvider_getCurrentUserName | /**
* Returns the userName for the current logged-in user.
* @throws IOException if the underlying user cannot be obtained
*/
public String getCurrentUserName() throws IOException {
User user = getCurrent();
return user == null ? null : user.getName();
} | 3.68 |
hbase_QuotaTableUtil_createDeletesForExistingSnapshotsFromScan | /**
* Returns a list of {@code Delete} to remove all entries returned by the passed scanner.
* @param connection connection to re-use
* @param scan the scanner to use to generate the list of deletes
*/
static List<Delete> createDeletesForExistingSnapshotsFromScan(Connection connection, Scan scan)
throws IOException {
List<Delete> deletes = new ArrayList<>();
try (Table quotaTable = connection.getTable(QUOTA_TABLE_NAME);
ResultScanner rs = quotaTable.getScanner(scan)) {
for (Result r : rs) {
CellScanner cs = r.cellScanner();
while (cs.advance()) {
Cell c = cs.current();
byte[] family = Bytes.copy(c.getFamilyArray(), c.getFamilyOffset(), c.getFamilyLength());
byte[] qual =
Bytes.copy(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength());
Delete d = new Delete(r.getRow());
d.addColumns(family, qual);
deletes.add(d);
}
}
return deletes;
}
} | 3.68 |
hadoop_LogAggregationWebUtils_verifyAndGetAppOwner | /**
* Verify and parse the application owner.
* @param html the html
* @param appOwner the Application owner
* @return the appOwner
*/
public static String verifyAndGetAppOwner(Block html, String appOwner) {
if (appOwner == null || appOwner.isEmpty()) {
html.h1().__("Cannot get container logs without an app owner").__();
}
return appOwner;
} | 3.68 |
morf_OracleDialect_getDeleteLimitWhereClause | /**
* @see SqlDialect#getDeleteLimitWhereClause(int)
*/
@Override
protected Optional<String> getDeleteLimitWhereClause(int limit) {
return Optional.of("ROWNUM <= " + limit);
} | 3.68 |
hadoop_HeaderProcessing_getXAttr | /**
* Get an XAttr name and value for a file or directory.
* @param path Path to get extended attribute
* @param name XAttr name.
* @return byte[] XAttr value or null
* @throws IOException IO failure
*/
public byte[] getXAttr(Path path, String name) throws IOException {
return retrieveHeaders(path, INVOCATION_XATTR_GET_NAMED).get(name);
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_deleteNodeMeta | /**
* Physically delte the meta of the node, including the node level index, the node key, and
* reduce the total size of the skip list.
*
* @param node node to remove.
* @param prevNode previous node at the level 0.
* @param nextNode next node at the level 0.
* @return value pointer of the node.
*/
private long deleteNodeMeta(long node, long prevNode, long nextNode) {
// set next node of prevNode at level 0 to nextNode
helpSetNextNode(prevNode, nextNode, 0);
// remove the level index for the node
SkipListUtils.removeLevelIndex(node, spaceAllocator, levelIndexHeader);
// free space used by key
long valuePointer = SkipListUtils.helpGetValuePointer(node, spaceAllocator);
this.spaceAllocator.free(node);
// reduce total size of the skip list
// note that we regard the node to be removed once its meta is deleted
totalSize--;
return valuePointer;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithExceptStatement | /**
* Tests the generation of SQL string for a query with EXCEPT operator.
*/
@Test
public void testSelectWithExceptStatement() {
assumeTrue("for dialects with no EXCEPT operation support the test will be skipped.", expectedSelectWithExcept() != null);
SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD))
.from(new TableReference(TEST_TABLE))
.except(new SelectStatement(new FieldReference(STRING_FIELD)).from(new TableReference(OTHER_TABLE)))
.orderBy(new FieldReference(STRING_FIELD));
String result = testDialect.convertStatementToSQL(stmt);
assertEquals("Select script should match expected", expectedSelectWithExcept(), result);
} | 3.68 |
flink_LinkedOptionalMap_hasAbsentKeysOrValues | /** Checks whether there are entries with absent keys or values. */
public boolean hasAbsentKeysOrValues() {
for (Entry<String, KeyValue<K, V>> entry : underlyingMap.entrySet()) {
if (keyOrValueIsAbsent(entry)) {
return true;
}
}
return false;
} | 3.68 |
hadoop_CommonAuditContext_containsKey | /**
* Does the context contain a specific key?
* @param key key
* @return true if it is in the context.
*/
public boolean containsKey(String key) {
return evaluatedEntries.containsKey(key);
} | 3.68 |
hbase_TableSchemaModel_getAttribute | /**
* Return a table descriptor value as a string. Calls toString() on the object stored in the
* descriptor value map.
* @param name the attribute name
* @return the attribute value
*/
public String getAttribute(String name) {
Object o = attrs.get(new QName(name));
return o != null ? o.toString() : null;
} | 3.68 |
flink_LookupFunctionProvider_of | /** Helper function for creating a static provider. */
static LookupFunctionProvider of(LookupFunction lookupFunction) {
return () -> lookupFunction;
} | 3.68 |
flink_FileSource_forRecordFileFormat | /**
* Builds a new {@code FileSource} using a {@link FileRecordFormat} to read record-by-record
* from a a file path.
*
* <p>A {@code FileRecordFormat} is more general than the {@link StreamFormat}, but also
* requires often more careful parametrization.
*
* @deprecated Please use {@link #forRecordStreamFormat(StreamFormat, Path...)} instead.
*/
@Deprecated
public static <T> FileSourceBuilder<T> forRecordFileFormat(
final FileRecordFormat<T> recordFormat, final Path... paths) {
return forBulkFileFormat(new FileRecordFormatAdapter<>(recordFormat), paths);
} | 3.68 |
hadoop_RMAuthenticationFilter_doFilter | /**
* {@inheritDoc}
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain filterChain) throws IOException, ServletException {
HttpServletRequest req = (HttpServletRequest) request;
String newHeader =
req.getHeader(DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER);
if (newHeader == null || newHeader.isEmpty()) {
// For backward compatibility, allow use of the old header field
// only when the new header doesn't exist
final String oldHeader = req.getHeader(OLD_HEADER);
if (oldHeader != null && !oldHeader.isEmpty()) {
request = new HttpServletRequestWrapper(req) {
@Override
public String getHeader(String name) {
if (name
.equals(DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER)) {
return oldHeader;
}
return super.getHeader(name);
}
};
}
}
super.doFilter(request, response, filterChain);
} | 3.68 |
hadoop_AbfsClientContextBuilder_build | /**
* Build the context and get the instance with the properties selected.
*
* @return an instance of AbfsClientContext.
*/
public AbfsClientContext build() {
//validate the values
return new AbfsClientContext(exponentialRetryPolicy, abfsPerfTracker,
abfsCounters);
} | 3.68 |
dubbo_Proxy_getProxy | /**
* Get proxy.
*
* @param ics interface class array.
* @return Proxy instance.
*/
public static Proxy getProxy(Class<?>... ics) {
if (ics.length > MAX_PROXY_COUNT) {
throw new IllegalArgumentException("interface limit exceeded");
}
// ClassLoader from App Interface should support load some class from Dubbo
ClassLoader cl = ics[0].getClassLoader();
ProtectionDomain domain = ics[0].getProtectionDomain();
// use interface class name list as key.
String key = buildInterfacesKey(cl, ics);
// get cache by class loader.
final Map<String, Proxy> cache;
synchronized (PROXY_CACHE_MAP) {
cache = PROXY_CACHE_MAP.computeIfAbsent(cl, k -> new ConcurrentHashMap<>());
}
Proxy proxy = cache.get(key);
if (proxy == null) {
synchronized (ics[0]) {
proxy = cache.get(key);
if (proxy == null) {
// create Proxy class.
proxy = new Proxy(buildProxyClass(cl, ics, domain));
cache.put(key, proxy);
}
}
}
return proxy;
} | 3.68 |
framework_AsyncPushUpdates_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Make sure there are no duplicates on the table.";
} | 3.68 |
hadoop_HistoryServerStateStoreService_serviceStart | /**
* Start the state storage for use
*
* @throws IOException
*/
@Override
public void serviceStart() throws IOException {
startStorage();
} | 3.68 |
hbase_TableSplit_getEndRow | /**
* Returns the end row.
* @return The end row.
*/
public byte[] getEndRow() {
return endRow;
} | 3.68 |
framework_StaticSection_removeCell | /**
* Removes the cell from this section that corresponds to the given
* column id. If there is no such cell, does nothing.
*
* @param columnId
* the id of the column from which to remove the cell
*/
protected void removeCell(String columnId) {
CELL cell = cells.remove(columnId);
if (cell != null) {
rowState.cells.remove(columnId);
for (Iterator<Set<String>> iterator = rowState.cellGroups
.values().iterator(); iterator.hasNext();) {
Set<String> group = iterator.next();
group.remove(columnId);
if (group.size() < 2) {
iterator.remove();
}
}
cell.detach();
}
} | 3.68 |
hbase_CacheConfig_enableCacheOnWrite | /**
* Enable cache on write including: cacheDataOnWrite cacheIndexesOnWrite cacheBloomsOnWrite
*/
public void enableCacheOnWrite() {
this.cacheDataOnWrite = true;
this.cacheIndexesOnWrite = true;
this.cacheBloomsOnWrite = true;
} | 3.68 |
hbase_EncryptionTest_testEncryption | /**
* Check that the specified cipher can be loaded and initialized, or throw an exception. Verifies
* key and cipher provider configuration as a prerequisite for cipher verification. Also verifies
* if encryption is enabled globally.
* @param conf HBase configuration
* @param cipher chiper algorith to use for the column family
* @param key encryption key
* @throws IOException in case of encryption configuration error
*/
public static void testEncryption(final Configuration conf, final String cipher, byte[] key)
throws IOException {
if (cipher == null) {
return;
}
if (!Encryption.isEncryptionEnabled(conf)) {
String message =
String.format("Cipher %s failed test: encryption is disabled on the cluster", cipher);
throw new IOException(message);
}
testKeyProvider(conf);
testCipherProvider(conf);
Boolean result = cipherResults.get(cipher);
if (result == null) {
try {
Encryption.Context context = Encryption.newContext(conf);
context.setCipher(Encryption.getCipher(conf, cipher));
if (key == null) {
// Make a random key since one was not provided
context.setKey(context.getCipher().getRandomKey());
} else {
// This will be a wrapped key from schema
context.setKey(EncryptionUtil.unwrapKey(conf,
conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase"), key));
}
byte[] iv = null;
if (context.getCipher().getIvLength() > 0) {
iv = new byte[context.getCipher().getIvLength()];
Bytes.secureRandom(iv);
}
byte[] plaintext = new byte[1024];
Bytes.random(plaintext);
ByteArrayOutputStream out = new ByteArrayOutputStream();
Encryption.encrypt(out, new ByteArrayInputStream(plaintext), context, iv);
byte[] ciphertext = out.toByteArray();
out.reset();
Encryption.decrypt(out, new ByteArrayInputStream(ciphertext), plaintext.length, context,
iv);
byte[] test = out.toByteArray();
if (!Bytes.equals(plaintext, test)) {
throw new IOException("Did not pass encrypt/decrypt test");
}
cipherResults.put(cipher, true);
} catch (Exception e) {
cipherResults.put(cipher, false);
throw new IOException("Cipher " + cipher + " failed test: " + e.getMessage(), e);
}
} else if (!result) {
throw new IOException("Cipher " + cipher + " previously failed test");
}
} | 3.68 |
pulsar_NettyChannelUtil_writeAndFlushWithVoidPromise | /**
* Write and flush the message to the channel.
*
* The promise is an instance of {@link VoidChannelPromise} that properly propagates exceptions up to the pipeline.
* Netty has many ad-hoc optimization if the promise is an instance of {@link VoidChannelPromise}.
* Lastly, it reduces pollution of useless {@link io.netty.channel.ChannelPromise} objects created
* by the default write and flush method {@link ChannelOutboundInvoker#writeAndFlush(Object)}.
* See https://stackoverflow.com/q/54169262 and https://stackoverflow.com/a/9030420 for more details.
*
* @param ctx channel's context
* @param msg buffer to write in the channel
*/
public static void writeAndFlushWithVoidPromise(ChannelOutboundInvoker ctx, ByteBuf msg) {
ctx.writeAndFlush(msg, ctx.voidPromise());
} | 3.68 |
hbase_LockProcedure_setTimeoutFailure | /**
* Re run the procedure after every timeout to write new WAL entries so we don't hold back old
* WALs.
* @return false, so procedure framework doesn't mark this procedure as failure.
*/
@Override
protected synchronized boolean setTimeoutFailure(final MasterProcedureEnv env) {
synchronized (event) {
if (LOG.isDebugEnabled()) LOG.debug("Timeout failure " + this.event);
if (!event.isReady()) { // Maybe unlock() awakened the event.
setState(ProcedureProtos.ProcedureState.RUNNABLE);
if (LOG.isDebugEnabled()) LOG.debug("Calling wake on " + this.event);
event.wake(env.getProcedureScheduler());
}
}
return false; // false: do not mark the procedure as failed.
} | 3.68 |
hadoop_CommitUtilsWithMR_formatJobDir | /**
* Build the name of the job directory, without
* app attempt.
* This is the path to use for cleanup.
* @param jobUUID unique Job ID.
* @return the directory name for the job
*/
public static String formatJobDir(
String jobUUID) {
return JOB_ID_PREFIX + jobUUID;
} | 3.68 |
hadoop_ParsedTaskAttempt_obtainDiagnosticInfo | /**
* @return the diagnostic-info of this task attempt.
* If the attempt is successful, returns null.
*/
public String obtainDiagnosticInfo() {
return diagnosticInfo;
} | 3.68 |
hbase_Result_compareResults | /**
* Does a deep comparison of two Results, down to the byte arrays.
* @param res1 first result to compare
* @param res2 second result to compare
* @param verbose includes string representation for all cells in the exception if true; otherwise
* include rowkey only
* @throws Exception Every difference is throwing an exception
*/
public static void compareResults(Result res1, Result res2, boolean verbose) throws Exception {
if (res2 == null) {
throw new Exception(
"There wasn't enough rows, we stopped at " + Bytes.toStringBinary(res1.getRow()));
}
if (res1.size() != res2.size()) {
if (verbose) {
throw new Exception(
"This row doesn't have the same number of KVs: " + res1 + " compared to " + res2);
} else {
throw new Exception(
"This row doesn't have the same number of KVs: row=" + Bytes.toStringBinary(res1.getRow())
+ ", " + res1.size() + " cells are compared to " + res2.size() + " cells");
}
}
Cell[] ourKVs = res1.rawCells();
Cell[] replicatedKVs = res2.rawCells();
for (int i = 0; i < res1.size(); i++) {
if (
!ourKVs[i].equals(replicatedKVs[i]) || !CellUtil.matchingValue(ourKVs[i], replicatedKVs[i])
|| !CellUtil.matchingTags(ourKVs[i], replicatedKVs[i])
) {
if (verbose) {
throw new Exception("This result was different: " + res1 + " compared to " + res2);
} else {
throw new Exception(
"This result was different: row=" + Bytes.toStringBinary(res1.getRow()));
}
}
}
} | 3.68 |
hbase_BackupManager_addIncrementalBackupTableSet | /**
* Adds set of tables to overall incremental backup table set
* @param tables tables
* @throws IOException exception
*/
public void addIncrementalBackupTableSet(Set<TableName> tables) throws IOException {
systemTable.addIncrementalBackupTableSet(tables, backupInfo.getBackupRootDir());
} | 3.68 |
hbase_ScannerContext_incrementBatchProgress | /**
* Progress towards the batch limit has been made. Increment internal tracking of batch progress
*/
void incrementBatchProgress(int batch) {
if (skippingRow) {
return;
}
int currentBatch = progress.getBatch();
progress.setBatch(currentBatch + batch);
} | 3.68 |
hadoop_CosNFileSystem_getScheme | /**
* Return the protocol scheme for the FileSystem.
*
* @return <code>cosn</code>
*/
@Override
public String getScheme() {
return CosNFileSystem.SCHEME;
} | 3.68 |
rocketmq-connect_WorkerErrorRecordReporter_report | /**
* report record
*
* @param record
* @param error
* @return
*/
@Override
public void report(ConnectRecord record, Throwable error) {
RecordPartition partition = record.getPosition().getPartition();
String topic = partition.getPartition().containsKey("topic") ? String.valueOf(partition.getPartition().get("topic")) : null;
Integer queueId = partition.getPartition().containsKey("queueId") ? (Integer) partition.getPartition().get("queueId") : null;
Long queueOffset = partition.getPartition().containsKey("queueOffset") ? (Long) partition.getPartition().get("queueOffset") : null;
String brokerName = partition.getPartition().containsKey("brokerName") ? String.valueOf(partition.getPartition().get("topic")) : null;
MessageExt consumerRecord = new MessageExt();
if (converter != null && converter instanceof RecordConverter) {
byte[] value = converter.fromConnectData(topic, record.getSchema(), record.getData());
consumerRecord.setBody(value);
consumerRecord.setBrokerName(brokerName);
consumerRecord.setQueueId(queueId);
consumerRecord.setQueueOffset(queueOffset);
} else {
byte[] messageBody = JSON.toJSONString(record).getBytes();
consumerRecord.setBody(messageBody);
}
// add extensions
record.getExtensions().keySet().forEach(key -> {
consumerRecord.putUserProperty(key, record.getExtensions().getString(key));
});
retryWithToleranceOperator.executeFailed(ErrorReporter.Stage.TASK_PUT, SinkTask.class, consumerRecord, error);
} | 3.68 |
hudi_HoodieWriteHandle_makeWriteToken | /**
* Generate a write token based on the currently running spark task and its place in the spark dag.
*/
private String makeWriteToken() {
return FSUtils.makeWriteToken(getPartitionId(), getStageId(), getAttemptId());
} | 3.68 |
hadoop_RouterAuditLogger_addRemoteIP | /**
* A helper api to add remote IP address.
*/
static void addRemoteIP(StringBuilder b) {
InetAddress ip = Server.getRemoteIp();
// ip address can be null for testcases
if (ip != null) {
add(Keys.IP, ip.getHostAddress(), b);
}
} | 3.68 |
hadoop_AclUtil_isMinimalAcl | /**
* Checks if the given entries represent a minimal ACL (contains exactly 3
* entries).
*
* @param entries List<AclEntry> entries to check
* @return boolean true if the entries represent a minimal ACL
*/
public static boolean isMinimalAcl(List<AclEntry> entries) {
return entries.size() == 3;
} | 3.68 |
pulsar_BinaryProtoLookupService_getBroker | /**
* Calls broker binaryProto-lookup api to find broker-service address which can serve a given topic.
*
* @param topicName
* topic-name
* @return broker-socket-address that serves given topic
*/
public CompletableFuture<Pair<InetSocketAddress, InetSocketAddress>> getBroker(TopicName topicName) {
final MutableObject<CompletableFuture> newFutureCreated = new MutableObject<>();
try {
return lookupInProgress.computeIfAbsent(topicName, tpName -> {
CompletableFuture<Pair<InetSocketAddress, InetSocketAddress>> newFuture =
findBroker(serviceNameResolver.resolveHost(), false, topicName, 0);
newFutureCreated.setValue(newFuture);
return newFuture;
});
} finally {
if (newFutureCreated.getValue() != null) {
newFutureCreated.getValue().whenComplete((v, ex) -> {
lookupInProgress.remove(topicName, newFutureCreated.getValue());
});
}
}
} | 3.68 |
hudi_AbstractTableFileSystemView_clear | /**
* Clear the resource.
*/
protected void clear() {
addedPartitions.clear();
resetViewState();
bootstrapIndex = null;
} | 3.68 |
hbase_JenkinsHash_hash | /**
* taken from hashlittle() -- hash a variable-length key into a 32-bit value
* @param hashKey the key to extract the bytes for hash algo
* @param initval can be any integer value
* @return a 32-bit value. Every bit of the key affects every bit of the return value. Two keys
* differing by one or two bits will have totally different hash values.
* <p>
* The best hash table sizes are powers of 2. There is no need to do mod a prime (mod is
* sooo slow!). If you need less than 32 bits, use a bitmask. For example, if you need
* only 10 bits, do <code>h = (h & hashmask(10));</code> In which case, the hash table
* should have hashsize(10) elements.
* <p>
* If you are hashing n strings byte[][] k, do it like this: for (int i = 0, h = 0; i <
* n; ++i) h = hash( k[i], h);
* <p>
* By Bob Jenkins, 2006. [email protected]. You may use this code any way you
* wish, private, educational, or commercial. It's free.
* <p>
* Use for hash table lookup, or anything where one collision in 2^^32 is acceptable. Do
* NOT use for cryptographic purposes.
*/
@SuppressWarnings({ "fallthrough", "MissingDefault" })
@Override
public <T> int hash(HashKey<T> hashKey, int initval) {
int length = hashKey.length();
int a, b, c;
a = b = c = 0xdeadbeef + length + initval;
int offset = 0;
for (; length > 12; offset += 12, length -= 12) {
a += (hashKey.get(offset) & BYTE_MASK);
a += ((hashKey.get(offset + 1) & BYTE_MASK) << 8);
a += ((hashKey.get(offset + 2) & BYTE_MASK) << 16);
a += ((hashKey.get(offset + 3) & BYTE_MASK) << 24);
b += (hashKey.get(offset + 4) & BYTE_MASK);
b += ((hashKey.get(offset + 5) & BYTE_MASK) << 8);
b += ((hashKey.get(offset + 6) & BYTE_MASK) << 16);
b += ((hashKey.get(offset + 7) & BYTE_MASK) << 24);
c += (hashKey.get(offset + 8) & BYTE_MASK);
c += ((hashKey.get(offset + 9) & BYTE_MASK) << 8);
c += ((hashKey.get(offset + 10) & BYTE_MASK) << 16);
c += ((hashKey.get(offset + 11) & BYTE_MASK) << 24);
/*
* mix -- mix 3 32-bit values reversibly. This is reversible, so any information in (a,b,c)
* before mix() is still in (a,b,c) after mix(). If four pairs of (a,b,c) inputs are run
* through mix(), or through mix() in reverse, there are at least 32 bits of the output that
* are sometimes the same for one pair and different for another pair. This was tested for: -
* pairs that differed by one bit, by two bits, in any combination of top bits of (a,b,c), or
* in any combination of bottom bits of (a,b,c). - "differ" is defined as +, -, ^, or ~^. For
* + and -, I transformed the output delta to a Gray code (a^(a>>1)) so a string of 1's (as is
* commonly produced by subtraction) look like a single 1-bit difference. - the base values
* were pseudorandom, all zero but one bit set, or all zero plus a counter that starts at
* zero. Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that satisfy this are 4 6
* 8 16 19 4 9 15 3 18 27 15 14 9 3 7 17 3 Well, "9 15 3 18 27 15" didn't quite get 32 bits
* diffing for "differ" defined as + with a one-bit base and a two-bit delta. I used
* http://burtleburtle.net/bob/hash/avalanche.html to choose the operations, constants, and
* arrangements of the variables. This does not achieve avalanche. There are input bits of
* (a,b,c) that fail to affect some output bits of (a,b,c), especially of a. The most
* thoroughly mixed value is c, but it doesn't really even achieve avalanche in c. This allows
* some parallelism. Read-after-writes are good at doubling the number of bits affected, so
* the goal of mixing pulls in the opposite direction as the goal of parallelism. I did what I
* could. Rotates seem to cost as much as shifts on every machine I could lay my hands on, and
* rotates are much kinder to the top and bottom bits, so I used rotates. #define mix(a,b,c) \
* { \ a -= c; a ^= rot(c, 4); c += b; \ b -= a; b ^= rot(a, 6); a += c; \ c -= b; c ^= rot(b,
* 8); b += a; \ a -= c; a ^= rot(c,16); c += b; \ b -= a; b ^= rot(a,19); a += c; \ c -= b; c
* ^= rot(b, 4); b += a; \ } mix(a,b,c);
*/
a -= c;
a ^= rotateLeft(c, 4);
c += b;
b -= a;
b ^= rotateLeft(a, 6);
a += c;
c -= b;
c ^= rotateLeft(b, 8);
b += a;
a -= c;
a ^= rotateLeft(c, 16);
c += b;
b -= a;
b ^= rotateLeft(a, 19);
a += c;
c -= b;
c ^= rotateLeft(b, 4);
b += a;
}
// -------------------------------- last block: affect all 32 bits of (c)
switch (length) { // all the case statements fall through
case 12:
c += ((hashKey.get(offset + 11) & BYTE_MASK) << 24);
case 11:
c += ((hashKey.get(offset + 10) & BYTE_MASK) << 16);
case 10:
c += ((hashKey.get(offset + 9) & BYTE_MASK) << 8);
case 9:
c += (hashKey.get(offset + 8) & BYTE_MASK);
case 8:
b += ((hashKey.get(offset + 7) & BYTE_MASK) << 24);
case 7:
b += ((hashKey.get(offset + 6) & BYTE_MASK) << 16);
case 6:
b += ((hashKey.get(offset + 5) & BYTE_MASK) << 8);
case 5:
b += (hashKey.get(offset + 4) & BYTE_MASK);
case 4:
a += ((hashKey.get(offset + 3) & BYTE_MASK) << 24);
case 3:
a += ((hashKey.get(offset + 2) & BYTE_MASK) << 16);
case 2:
a += ((hashKey.get(offset + 1) & BYTE_MASK) << 8);
case 1:
// noinspection PointlessArithmeticExpression
a += (hashKey.get(offset + 0) & BYTE_MASK);
break;
case 0:
return c;
}
/*
* final -- final mixing of 3 32-bit values (a,b,c) into c Pairs of (a,b,c) values differing in
* only a few bits will usually produce values of c that look totally different. This was tested
* for - pairs that differed by one bit, by two bits, in any combination of top bits of (a,b,c),
* or in any combination of bottom bits of (a,b,c). - "differ" is defined as +, -, ^, or ~^. For
* + and -, I transformed the output delta to a Gray code (a^(a>>1)) so a string of 1's (as is
* commonly produced by subtraction) look like a single 1-bit difference. - the base values were
* pseudorandom, all zero but one bit set, or all zero plus a counter that starts at zero. These
* constants passed: 14 11 25 16 4 14 24 12 14 25 16 4 14 24 and these came close: 4 8 15 26 3
* 22 24 10 8 15 26 3 22 24 11 8 15 26 3 22 24 #define final(a,b,c) \ { c ^= b; c -= rot(b,14);
* \ a ^= c; a -= rot(c,11); \ b ^= a; b -= rot(a,25); \ c ^= b; c -= rot(b,16); \ a ^= c; a -=
* rot(c,4); \ b ^= a; b -= rot(a,14); \ c ^= b; c -= rot(b,24); \ }
*/
c ^= b;
c -= rotateLeft(b, 14);
a ^= c;
a -= rotateLeft(c, 11);
b ^= a;
b -= rotateLeft(a, 25);
c ^= b;
c -= rotateLeft(b, 16);
a ^= c;
a -= rotateLeft(c, 4);
b ^= a;
b -= rotateLeft(a, 14);
c ^= b;
c -= rotateLeft(b, 24);
return c;
} | 3.68 |
morf_HumanReadableStatementProducer_versionCompare | /**
* Compare two version strings. This differs from natural ordering
* as a version of 5.3.27 is higher than 5.3.3.
* @param str1 One version string to compare
* @param str2 The other version string to compare
* @return a negative integer, zero, or a positive integer as the
* first argument is less than, equal to, or greater than the
* second. */
@VisibleForTesting
protected static Integer versionCompare(String str1, String str2) {
String[] vals1 = str1.split("\\.");
String[] vals2 = str2.split("\\.");
// set index to first non-equal ordinal or length of shortest version string
int i = 0;
while (i < vals1.length && i < vals2.length && vals1[i].equals(vals2[i])) {
i++;
}
// compare first non-equal ordinal number
if (i < vals1.length && i < vals2.length) {
try {
int diff = Integer.valueOf(vals1[i]).compareTo(Integer.valueOf(vals2[i]));
return Integer.signum(diff);
} catch (NumberFormatException e) {
return Integer.signum(vals1[i].compareTo(vals2[i]));
}
}
// the strings are equal or one string is a substring of the other
// e.g. "1.2.3" = "1.2.3" or "1.2.3" < "1.2.3.4"
else {
return Integer.signum(vals1.length - vals2.length);
}
} | 3.68 |
hbase_MobFileCache_openFile | /**
* Opens a mob file.
* @param fs The current file system.
* @param path The file path.
* @param cacheConf The current MobCacheConfig
* @return A opened mob file.
*/
public MobFile openFile(FileSystem fs, Path path, CacheConfig cacheConf) throws IOException {
if (!isCacheEnabled) {
MobFile mobFile = MobFile.create(fs, path, conf, cacheConf);
mobFile.open();
return mobFile;
} else {
String fileName = path.getName();
CachedMobFile cached = map.get(fileName);
IdLock.Entry lockEntry = keyLock.getLockEntry(hashFileName(fileName));
try {
if (cached == null) {
cached = map.get(fileName);
if (cached == null) {
if (map.size() > mobFileMaxCacheSize) {
evict();
}
cached = CachedMobFile.create(fs, path, conf, cacheConf);
cached.open();
map.put(fileName, cached);
miss.increment();
}
}
cached.open();
cached.access(count.incrementAndGet());
} finally {
keyLock.releaseLockEntry(lockEntry);
}
return cached;
}
} | 3.68 |
hudi_CachingPath_concatPathUnsafe | // TODO java-doc
public static CachingPath concatPathUnsafe(Path basePath, String relativePath) {
try {
URI baseURI = basePath.toUri();
// NOTE: {@code normalize} is going to be invoked by {@code Path} ctor, so there's no
// point in invoking it here
String resolvedPath = resolveRelativePath(baseURI.getPath(), relativePath);
URI resolvedURI = new URI(baseURI.getScheme(), baseURI.getAuthority(), resolvedPath,
baseURI.getQuery(), baseURI.getFragment());
return new CachingPath(resolvedURI);
} catch (URISyntaxException e) {
throw new HoodieException("Failed to instantiate relative path", e);
}
} | 3.68 |
flink_NFACompiler_createStartState | /**
* Creates the Start {@link State} of the resulting NFA graph.
*
* @param sinkState the state that Start state should point to (always first state of middle
* states)
* @return created state
*/
@SuppressWarnings("unchecked")
private State<T> createStartState(State<T> sinkState) {
final State<T> beginningState = convertPattern(sinkState);
beginningState.makeStart();
return beginningState;
} | 3.68 |
flink_Predicates_arePublicStaticOfType | /**
* Tests that the given field is {@code public static} and has the fully qualified type name of
* {@code fqClassName}.
*
* <p>Attention: changing the description will add a rule into the stored.rules.
*/
public static DescribedPredicate<JavaField> arePublicStaticOfType(String fqClassName) {
return areFieldOfType(fqClassName, JavaModifier.PUBLIC, JavaModifier.STATIC);
} | 3.68 |
flink_FlinkContainers_getJobManagerHost | /** Gets JobManager's hostname on the host machine. */
public String getJobManagerHost() {
return jobManager.getHost();
} | 3.68 |
hadoop_NamenodeStatusReport_getHighestPriorityLowRedundancyReplicatedBlocks | /**
* Gets the total number of replicated low redundancy blocks on the cluster
* with the highest risk of loss.
*
* @return the total number of low redundancy blocks on the cluster
* with the highest risk of loss.
*/
public long getHighestPriorityLowRedundancyReplicatedBlocks() {
return this.highestPriorityLowRedundancyReplicatedBlocks;
} | 3.68 |
hadoop_SinglePendingCommit_bindCommitData | /**
* Set the commit data.
* @param parts ordered list of etags.
* @throws ValidationFailure if the data is invalid
*/
public void bindCommitData(List<CompletedPart> parts) throws ValidationFailure {
etags = new ArrayList<>(parts.size());
int counter = 1;
for (CompletedPart part : parts) {
verify(part.partNumber() == counter,
"Expected part number %s but got %s", counter, part.partNumber());
etags.add(part.eTag());
counter++;
}
} | 3.68 |
rocketmq-connect_WorkerSinkTaskContext_configs | /**
* Get the configurations of current task.
*
* @return the configuration of current task.
*/
@Override
public KeyValue configs() {
return taskConfig;
} | 3.68 |
hadoop_AbstractRESTRequestInterceptor_getNextInterceptor | /**
* Gets the next {@link RESTRequestInterceptor} in the chain.
*/
@Override
public RESTRequestInterceptor getNextInterceptor() {
return this.nextInterceptor;
} | 3.68 |
flink_AllWindowedStream_process | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window. The output of the window function is interpreted as a regular
* non-windowed stream.
*
* <p>Note that this function requires that all data in the windows is buffered until the window
* is evaluated, as the function provides no means of incremental aggregation.
*
* @param function The process window function.
* @return The data stream that is the result of applying the window function to the window.
*/
@PublicEvolving
public <R> SingleOutputStreamOperator<R> process(
ProcessAllWindowFunction<T, R, W> function, TypeInformation<R> resultType) {
String callLocation = Utils.getCallLocationName();
function = input.getExecutionEnvironment().clean(function);
return apply(
new InternalIterableProcessAllWindowFunction<>(function), resultType, callLocation);
} | 3.68 |
flink_BuiltInFunctionDefinition_version | /**
* Specifies a version that will be persisted in the plan together with the function's name.
* The default version is 1 for non-internal functions.
*
* <p>Note: Internal functions don't need to specify a version as we enforce a unique name
* that includes a version (see {@link #name(String)}).
*/
public Builder version(int version) {
this.version = version;
return this;
} | 3.68 |
rocketmq-connect_ConnectorPluginsResource_listPlugins | /**
* list connector plugins
*
* @param context
* @return
*/
public void listPlugins(Context context) {
synchronized (this) {
context.json(new HttpResponse<>(context.status(), Collections.unmodifiableList(connectorPlugins)));
}
} | 3.68 |
hadoop_S3ClientFactory_getMultiPartThreshold | /**
* Get the threshold for multipart operations.
* @return multipart threshold
*/
public long getMultiPartThreshold() {
return multiPartThreshold;
} | 3.68 |
pulsar_ProtocolHandlerUtils_load | /**
* Load the protocol handler according to the handler definition.
*
* @param metadata the protocol handler definition.
* @return
*/
static ProtocolHandlerWithClassLoader load(ProtocolHandlerMetadata metadata,
String narExtractionDirectory) throws IOException {
final File narFile = metadata.getArchivePath().toAbsolutePath().toFile();
NarClassLoader ncl = NarClassLoaderBuilder.builder()
.narFile(narFile)
.parentClassLoader(ProtocolHandler.class.getClassLoader())
.extractionDirectory(narExtractionDirectory)
.build();
ProtocolHandlerDefinition phDef = getProtocolHandlerDefinition(ncl);
if (StringUtils.isBlank(phDef.getHandlerClass())) {
throw new IOException("Protocol handler `" + phDef.getName() + "` does NOT provide a protocol"
+ " handler implementation");
}
try {
Class handlerClass = ncl.loadClass(phDef.getHandlerClass());
Object handler = handlerClass.getDeclaredConstructor().newInstance();
if (!(handler instanceof ProtocolHandler)) {
throw new IOException("Class " + phDef.getHandlerClass()
+ " does not implement protocol handler interface");
}
ProtocolHandler ph = (ProtocolHandler) handler;
return new ProtocolHandlerWithClassLoader(ph, ncl);
} catch (Throwable t) {
rethrowIOException(t);
return null;
}
} | 3.68 |
morf_DataSetConnectorMultiThreaded_run | /**
* Cryo-s the table with the name given in the constructor from the producer to
* the consumer.
*/
@Override
public void run() {
consumer.table(producer.getSchema().getTable(tableName), producer.records(tableName));
} | 3.68 |
framework_VScrollTable_getNavigationEndKey | /**
* Get the key the moves the selection to the end of the table. By default
* this is the End key but by overriding this you can change the key to
* whatever you want.
*
* @return
*/
protected int getNavigationEndKey() {
return KeyCodes.KEY_END;
} | 3.68 |
hbase_MasterObserver_postRestoreSnapshot | /**
* Called after a snapshot restore operation has been requested. Called as part of restoreSnapshot
* RPC call.
* @param ctx the environment to interact with the framework and master
* @param snapshot the SnapshotDescriptor for the snapshot
* @param tableDescriptor the TableDescriptor of the table to restore
*/
default void postRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final TableDescriptor tableDescriptor) throws IOException {
} | 3.68 |
framework_AbstractMedia_addSource | /**
* Adds an alternative media file to the sources list. Which of the sources
* is used is selected by the browser depending on which file formats it
* supports. See
* <a href="http://en.wikipedia.org/wiki/HTML5_video#Table">wikipedia</a>
* for a table of formats supported by different browsers.
*
* @param source
*/
public void addSource(Resource source) {
if (source != null) {
List<URLReference> sources = getState().sources;
sources.add(new ResourceReference(source, this,
Integer.toString(sources.size())));
getState().sourceTypes.add(source.getMIMEType());
}
} | 3.68 |
zxing_FinderPatternFinder_foundPatternCross | /**
* @param stateCount count of black/white/black/white/black pixels just read
* @return true iff the proportions of the counts is close enough to the 1/1/3/1/1 ratios
* used by finder patterns to be considered a match
*/
protected static boolean foundPatternCross(int[] stateCount) {
int totalModuleSize = 0;
for (int i = 0; i < 5; i++) {
int count = stateCount[i];
if (count == 0) {
return false;
}
totalModuleSize += count;
}
if (totalModuleSize < 7) {
return false;
}
float moduleSize = totalModuleSize / 7.0f;
float maxVariance = moduleSize / 2.0f;
// Allow less than 50% variance from 1-1-3-1-1 proportions
return
Math.abs(moduleSize - stateCount[0]) < maxVariance &&
Math.abs(moduleSize - stateCount[1]) < maxVariance &&
Math.abs(3.0f * moduleSize - stateCount[2]) < 3 * maxVariance &&
Math.abs(moduleSize - stateCount[3]) < maxVariance &&
Math.abs(moduleSize - stateCount[4]) < maxVariance;
} | 3.68 |
hbase_KeyValueUtil_previousKey | /**
* Decrement the timestamp. For tests (currently wasteful) Remember timestamps are sorted reverse
* chronologically.
* @return previous key
*/
public static KeyValue previousKey(final KeyValue in) {
return createFirstOnRow(CellUtil.cloneRow(in), CellUtil.cloneFamily(in),
CellUtil.cloneQualifier(in), in.getTimestamp() - 1);
} | 3.68 |
hbase_OrderedBytes_decodeNumericAsBigDecimal | /**
* Decode a {@link BigDecimal} value from the variable-length encoding.
* @throws IllegalArgumentException when the encoded value is not a Numeric.
* @see #encodeNumeric(PositionedByteRange, BigDecimal, Order)
*/
public static BigDecimal decodeNumericAsBigDecimal(PositionedByteRange src) {
if (isNull(src)) {
src.get();
return null;
}
if (!isNumeric(src)) throw unexpectedHeader(src.peek());
if (isNumericNaN(src)) throw unexpectedHeader(src.peek());
if (isNumericInfinite(src)) throw unexpectedHeader(src.peek());
return decodeNumericValue(src);
} | 3.68 |
framework_BootstrapFragmentResponse_getFragmentNodes | /**
* Gets the list of DOM nodes that will be used to generate the fragment
* HTML. Changes to the returned list will be reflected in the generated
* HTML.
*
* @return the current list of DOM nodes that makes up the application
* fragment
*/
public List<Node> getFragmentNodes() {
return fragmentNodes;
} | 3.68 |
hudi_BaseHoodieLogRecordReader_scanInternal | /**
* @param keySpecOpt specifies target set of keys to be scanned
* @param skipProcessingBlocks controls, whether (delta) blocks have to actually be processed
*/
protected final void scanInternal(Option<KeySpec> keySpecOpt, boolean skipProcessingBlocks) {
synchronized (this) {
if (enableOptimizedLogBlocksScan) {
scanInternalV2(keySpecOpt, skipProcessingBlocks);
} else {
scanInternalV1(keySpecOpt);
}
}
} | 3.68 |
MagicPlugin_CompoundAction_addAction | // These are here for legacy spell support
// via programmatic action building
public void addAction(SpellAction action) {
addAction(action, null);
} | 3.68 |
flink_AfterMatchSkipStrategy_prune | /**
* Prunes matches/partial matches based on the chosen strategy.
*
* @param matchesToPrune current partial matches
* @param matchedResult already completed matches
* @param sharedBufferAccessor accessor to corresponding shared buffer
* @throws Exception thrown if could not access the state
*/
public void prune(
Collection<ComputationState> matchesToPrune,
Collection<Map<String, List<EventId>>> matchedResult,
SharedBufferAccessor<?> sharedBufferAccessor)
throws Exception {
if (!isSkipStrategy()) {
return;
}
EventId pruningId = getPruningId(matchedResult);
if (pruningId != null) {
List<ComputationState> discardStates = new ArrayList<>();
for (ComputationState computationState : matchesToPrune) {
if (computationState.getStartEventID() != null
&& shouldPrune(computationState.getStartEventID(), pruningId)) {
sharedBufferAccessor.releaseNode(
computationState.getPreviousBufferEntry(),
computationState.getVersion());
discardStates.add(computationState);
}
}
matchesToPrune.removeAll(discardStates);
}
} | 3.68 |
hbase_TableDescriptorBuilder_getColumnFamilyNames | /**
* Returns all the column family names of the current table. The map of TableDescriptor contains
* mapping of family name to ColumnFamilyDescriptor. This returns all the keys of the family map
* which represents the column family names of the table.
* @return Immutable sorted set of the keys of the families.
*/
@Override
public Set<byte[]> getColumnFamilyNames() {
return Collections.unmodifiableSet(this.families.keySet());
} | 3.68 |
druid_SQLASTOutputVisitor_visit | ///////////// for odps & hive
@Override
public boolean visit(SQLLateralViewTableSource x) {
SQLTableSource tableSource = x.getTableSource();
if (tableSource != null) {
tableSource.accept(this);
}
this.indentCount++;
println();
print0(ucase ? "LATERAL VIEW " : "lateral view ");
if (x.isOuter()) {
print0(ucase ? "OUTER " : "outer ");
}
x.getMethod().accept(this);
print(' ');
print0(x.getAlias());
if (x.getColumns() != null && x.getColumns().size() > 0) {
print0(ucase ? " AS " : " as ");
printAndAccept(x.getColumns(), ", ");
}
SQLExpr on = x.getOn();
if (on != null) {
println();
print0(ucase ? "ON " : "on ");
printExpr(on);
}
this.indentCount--;
return false;
} | 3.68 |
flink_ManagedTableListener_notifyTableCompaction | /** Notify compaction for managed table. */
public Map<String, String> notifyTableCompaction(
@Nullable Catalog catalog,
ObjectIdentifier identifier,
ResolvedCatalogBaseTable<?> table,
CatalogPartitionSpec partitionSpec,
boolean isTemporary) {
if (isManagedTable(catalog, table)) {
if (RuntimeExecutionMode.STREAMING.equals(config.get(ExecutionOptions.RUNTIME_MODE))) {
throw new ValidationException("Compact managed table only works under batch mode.");
}
return discoverManagedTableFactory(classLoader)
.onCompactTable(
createTableFactoryContext(
identifier, (ResolvedCatalogTable) table, isTemporary),
partitionSpec);
}
throw new ValidationException("Only managed table supports compaction");
} | 3.68 |
flink_Tuple20_equals | /**
* Deep equality for tuples by calling equals() on the tuple members.
*
* @param o the object checked for equality
* @return true if this is equal to o.
*/
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Tuple20)) {
return false;
}
@SuppressWarnings("rawtypes")
Tuple20 tuple = (Tuple20) o;
if (f0 != null ? !f0.equals(tuple.f0) : tuple.f0 != null) {
return false;
}
if (f1 != null ? !f1.equals(tuple.f1) : tuple.f1 != null) {
return false;
}
if (f2 != null ? !f2.equals(tuple.f2) : tuple.f2 != null) {
return false;
}
if (f3 != null ? !f3.equals(tuple.f3) : tuple.f3 != null) {
return false;
}
if (f4 != null ? !f4.equals(tuple.f4) : tuple.f4 != null) {
return false;
}
if (f5 != null ? !f5.equals(tuple.f5) : tuple.f5 != null) {
return false;
}
if (f6 != null ? !f6.equals(tuple.f6) : tuple.f6 != null) {
return false;
}
if (f7 != null ? !f7.equals(tuple.f7) : tuple.f7 != null) {
return false;
}
if (f8 != null ? !f8.equals(tuple.f8) : tuple.f8 != null) {
return false;
}
if (f9 != null ? !f9.equals(tuple.f9) : tuple.f9 != null) {
return false;
}
if (f10 != null ? !f10.equals(tuple.f10) : tuple.f10 != null) {
return false;
}
if (f11 != null ? !f11.equals(tuple.f11) : tuple.f11 != null) {
return false;
}
if (f12 != null ? !f12.equals(tuple.f12) : tuple.f12 != null) {
return false;
}
if (f13 != null ? !f13.equals(tuple.f13) : tuple.f13 != null) {
return false;
}
if (f14 != null ? !f14.equals(tuple.f14) : tuple.f14 != null) {
return false;
}
if (f15 != null ? !f15.equals(tuple.f15) : tuple.f15 != null) {
return false;
}
if (f16 != null ? !f16.equals(tuple.f16) : tuple.f16 != null) {
return false;
}
if (f17 != null ? !f17.equals(tuple.f17) : tuple.f17 != null) {
return false;
}
if (f18 != null ? !f18.equals(tuple.f18) : tuple.f18 != null) {
return false;
}
if (f19 != null ? !f19.equals(tuple.f19) : tuple.f19 != null) {
return false;
}
return true;
} | 3.68 |
hudi_OptionsResolver_isConsistentLogicalTimestampEnabled | /**
* Returns whether consistent value will be generated for a logical timestamp type column.
*/
public static boolean isConsistentLogicalTimestampEnabled(Configuration conf) {
return conf.getBoolean(KeyGeneratorOptions.KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED.key(),
Boolean.parseBoolean(KeyGeneratorOptions.KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED.defaultValue()));
} | 3.68 |
flink_PojoComparator_accessField | /** This method is handling the IllegalAccess exceptions of Field.get() */
public final Object accessField(Field field, Object object) {
try {
object = field.get(object);
} catch (NullPointerException npex) {
throw new NullKeyFieldException(
"Unable to access field " + field + " on object " + object);
} catch (IllegalAccessException iaex) {
throw new RuntimeException(
"This should not happen since we call setAccesssible(true) in the ctor."
+ " fields: "
+ field
+ " obj: "
+ object);
}
return object;
} | 3.68 |
hbase_Operation_toMap | /**
* Produces a Map containing a full summary of a query.
* @return a map containing parameters of a query (i.e. rows, columns...)
*/
public Map<String, Object> toMap() {
return toMap(DEFAULT_MAX_COLS);
} | 3.68 |
hudi_HoodieCLI_getTableMetaClient | /**
* Get tableMetadata, throw NullPointerException when it is null.
*
* @return tableMetadata which is instance of HoodieTableMetaClient
*/
public static HoodieTableMetaClient getTableMetaClient() {
if (tableMetadata == null) {
throw new NullPointerException("There is no hudi table. Please use connect command to set table first");
}
return tableMetadata;
} | 3.68 |
framework_TableElement_getRow | /**
* Return table row element by zero-based index.
*
* @return table row element by zero-based index
*/
public TableRowElement getRow(int row) {
TestBenchElement rowElem = wrapElement(
findElement(By.vaadin("#row[" + row + "]")),
getCommandExecutor());
return rowElem.wrap(TableRowElement.class);
} | 3.68 |
MagicPlugin_MagicController_hasPermission | // Note that this version doesn't work with mob permissions
@Override
@Deprecated
public boolean hasPermission(CommandSender sender, String pNode, boolean defaultValue) {
if (!(sender instanceof Player)) return true;
return hasPermission((Player) sender, pNode);
} | 3.68 |
hadoop_AzureNativeFileSystemStore_normalizeKey | /**
* This private method normalizes the key by stripping the container name from
* the path and returns a path relative to the root directory of the
* container.
*
* @param directory
* - adjust the key to this directory to a path relative to the root
* directory
*
* @returns normKey
*/
private String normalizeKey(CloudBlobDirectoryWrapper directory) {
String dirKey = normalizeKey(directory.getUri());
// Strip the last delimiter
if (dirKey.endsWith(PATH_DELIMITER)) {
dirKey = dirKey.substring(0, dirKey.length() - 1);
}
return dirKey;
} | 3.68 |
querydsl_Expressions_currentDate | /**
* Create an expression representing the current date as a DateExpression instance
*
* @return current date
*/
public static DateExpression<Date> currentDate() {
return DateExpression.currentDate();
} | 3.68 |
framework_ListenerMethod_matches | /**
* Checks if the given object, event and method match with the ones stored
* in this listener.
*
* @param target
* the object to be matched against the object stored by this
* listener.
* @param eventType
* the type to be tested for equality against the type stored by
* this listener.
* @param method
* the method to be tested for equality against the method stored
* by this listener.
* @return <code>true</code> if <code>target</code> is the same object as
* the one stored in this object, <code>eventType</code> equals with
* the event type stored in this object and <code>method</code>
* equals with the method stored in this object.
*/
public boolean matches(Class<?> eventType, Object target, Method method) {
return (this.target == target) && (eventType.equals(this.eventType)
&& method.equals(this.method));
} | 3.68 |
hadoop_AzureNativeFileSystemStore_initialize | /**
* Method for the URI and configuration object necessary to create a storage
* session with an Azure session. It parses the scheme to ensure it matches
* the storage protocol supported by this file system.
*
* @param uri - URI for target storage blob.
* @param conf - reference to configuration object.
* @param instrumentation - the metrics source that will keep track of operations here.
*
* @throws IllegalArgumentException if URI or job object is null, or invalid scheme.
*/
@Override
public void initialize(URI uri, Configuration conf, AzureFileSystemInstrumentation instrumentation)
throws IllegalArgumentException, AzureException, IOException {
if (null == instrumentation) {
throw new IllegalArgumentException("Null instrumentation");
}
this.instrumentation = instrumentation;
// Check that URI exists.
//
if (null == uri) {
throw new IllegalArgumentException(
"Cannot initialize WASB file system, URI is null");
}
// Check that configuration object is non-null.
//
if (null == conf) {
throw new IllegalArgumentException(
"Cannot initialize WASB file system, conf is null");
}
if (!conf.getBoolean(
NativeAzureFileSystem.SKIP_AZURE_METRICS_PROPERTY_NAME, false)) {
//If not skip azure metrics, create bandwidthGaugeUpdater
this.bandwidthGaugeUpdater = new BandwidthGaugeUpdater(instrumentation);
}
// Incoming parameters validated. Capture the URI and the job configuration
// object.
//
sessionUri = uri;
sessionConfiguration = conf;
useSecureMode = conf.getBoolean(KEY_USE_SECURE_MODE,
DEFAULT_USE_SECURE_MODE);
useLocalSasKeyMode = conf.getBoolean(KEY_USE_LOCAL_SAS_KEY_MODE,
DEFAULT_USE_LOCAL_SAS_KEY_MODE);
if (null == this.storageInteractionLayer) {
if (!useSecureMode) {
this.storageInteractionLayer = new StorageInterfaceImpl();
} else {
this.storageInteractionLayer = new SecureStorageInterfaceImpl(
useLocalSasKeyMode, conf);
}
}
// Configure Azure storage session.
configureAzureStorageSession();
// Start an Azure storage session.
//
createAzureStorageSession();
// Extract the directories that should contain page blobs
pageBlobDirs = getDirectorySet(KEY_PAGE_BLOB_DIRECTORIES);
LOG.debug("Page blob directories: {}", setToString(pageBlobDirs));
// User-agent
userAgentId = conf.get(USER_AGENT_ID_KEY, USER_AGENT_ID_DEFAULT);
// Extract the directories that should contain block blobs with compaction
blockBlobWithCompationDirs = getDirectorySet(
KEY_BLOCK_BLOB_WITH_COMPACTION_DIRECTORIES);
LOG.debug("Block blobs with compaction directories: {}",
setToString(blockBlobWithCompationDirs));
// Extract directories that should have atomic rename applied.
atomicRenameDirs = getDirectorySet(KEY_ATOMIC_RENAME_DIRECTORIES);
String hbaseRoot;
try {
// Add to this the hbase root directory, or /hbase is that is not set.
hbaseRoot = verifyAndConvertToStandardFormat(
sessionConfiguration.get("hbase.rootdir", "hbase"));
if (hbaseRoot != null) {
atomicRenameDirs.add(hbaseRoot);
}
} catch (URISyntaxException e) {
LOG.warn("Unable to initialize HBase root as an atomic rename directory.");
}
LOG.debug("Atomic rename directories: {} ", setToString(atomicRenameDirs));
metadataKeyCaseSensitive = conf
.getBoolean(KEY_BLOB_METADATA_KEY_CASE_SENSITIVE, true);
if (!metadataKeyCaseSensitive) {
LOG.info("{} configured as false. Blob metadata will be treated case insensitive.",
KEY_BLOB_METADATA_KEY_CASE_SENSITIVE);
}
} | 3.68 |
flink_TaskStateStats_getStateSize | /** @return Total checkpoint state size over all subtasks. */
public long getStateSize() {
return summaryStats.getStateSizeStats().getSum();
} | 3.68 |
flink_FlinkContainersSettings_getDefaultCheckpointPath | /**
* Gets default checkpoint path.
*
* @return The default checkpoint path.
*/
public static String getDefaultCheckpointPath() {
return DEFAULT_CHECKPOINT_PATH;
} | 3.68 |
hbase_MultiByteBuff_toBytes | /**
* Copy the content from this MBB to a byte[] based on the given offset and length the position
* from where the copy should start the length upto which the copy has to be done
* @return byte[] with the copied contents from this MBB.
*/
@Override
public byte[] toBytes(int offset, int length) {
checkRefCount();
byte[] output = new byte[length];
this.get(offset, output, 0, length);
return output;
} | 3.68 |
hbase_MasterObserver_postGrant | /**
* Called after granting user permissions.
* @param ctx the coprocessor instance's environment
* @param userPermission the user and permissions
* @param mergeExistingPermissions True if merge with previous granted permissions
*/
default void postGrant(ObserverContext<MasterCoprocessorEnvironment> ctx,
UserPermission userPermission, boolean mergeExistingPermissions) throws IOException {
} | 3.68 |
hadoop_Error_message | /**
**/
public Error message(String message) {
this.message = message;
return this;
} | 3.68 |
pulsar_ResourceUsageTopicTransportManager_registerResourceUsageConsumer | /*
* Register a resource owner (resource-group, tenant, namespace, topic etc).
*
* @param resource usage consumer
*/
public void registerResourceUsageConsumer(ResourceUsageConsumer r) {
consumerMap.put(r.getID(), r);
} | 3.68 |
hbase_ProcedureStoreTracker_setDeletedIfDeletedByThem | /**
* For the global tracker, we will use this method to build the holdingCleanupTracker, as the
* modified flags will be cleared after rolling so we only need to test the deleted flags.
* @see #setDeletedIfModifiedInBoth(ProcedureStoreTracker)
*/
public void setDeletedIfDeletedByThem(ProcedureStoreTracker tracker) {
setDeleteIf(tracker, (node, procId) -> node == null || !node.contains(procId)
|| node.isDeleted(procId) == DeleteState.YES);
} | 3.68 |
morf_ConnectionResources_setFetchSizeForBulkSelects | /**
* Sets the JDBC Fetch Size to use when performing bulk select operations, intended to replace the default in {@link SqlDialect#fetchSizeForBulkSelects()}.
* The default behaviour for this method is interpreted as not setting the value.
* @param fetchSizeForBulkSelects the JDBC fetch size to use.
*/
public default void setFetchSizeForBulkSelects(Integer fetchSizeForBulkSelects){
} | 3.68 |
querydsl_AbstractLuceneQuery_load | /**
* Load only the fields of the given paths
*
* @param paths fields to load
* @return the current object
*/
@SuppressWarnings("unchecked")
public Q load(Path<?>... paths) {
List<String> fields = new ArrayList<String>(paths.length);
for (Path<?> path : paths) {
fields.add(serializer.toField(path));
}
this.fieldSelector = new MapFieldSelector(fields);
return (Q) this;
} | 3.68 |
hadoop_FsAction_or | /**
* OR operation.
* @param that FsAction that.
* @return FsAction.
*/
public FsAction or(FsAction that) {
return vals[ordinal() | that.ordinal()];
} | 3.68 |
hbase_BulkLoadHFilesTool_getRegionIndex | /**
* @param startEndKeys the start/end keys of regions belong to this table, the list in ascending
* order by start key
* @param key the key need to find which region belong to
* @return region index
*/
private int getRegionIndex(List<Pair<byte[], byte[]>> startEndKeys, byte[] key) {
int idx = Collections.binarySearch(startEndKeys, Pair.newPair(key, HConstants.EMPTY_END_ROW),
(p1, p2) -> Bytes.compareTo(p1.getFirst(), p2.getFirst()));
if (idx < 0) {
// not on boundary, returns -(insertion index). Calculate region it
// would be in.
idx = -(idx + 1) - 1;
}
return idx;
} | 3.68 |
flink_OperatorCoordinator_notifyCheckpointAborted | /**
* We override the method here to remove the checked exception. Please check the Java docs of
* {@link CheckpointListener#notifyCheckpointAborted(long)} for more detail semantic of the
* method.
*/
@Override
default void notifyCheckpointAborted(long checkpointId) {} | 3.68 |
flink_RocksDBMemoryConfiguration_getHighPriorityPoolRatio | /**
* Gets the fraction of the total memory to be used for high priority blocks like indexes,
* dictionaries, etc. This only has an effect is either {@link #setUseManagedMemory(boolean)} or
* {@link #setFixedMemoryPerSlot(MemorySize)} are set.
*
* <p>See {@link RocksDBOptions#HIGH_PRIORITY_POOL_RATIO} for details.
*/
public double getHighPriorityPoolRatio() {
return highPriorityPoolRatio != null
? highPriorityPoolRatio
: RocksDBOptions.HIGH_PRIORITY_POOL_RATIO.defaultValue();
} | 3.68 |
druid_StatViewServlet_initJmxConn | /**
* 初始化jmx连接
*
* @throws IOException
*/
private void initJmxConn() throws IOException {
if (jmxUrl != null) {
JMXServiceURL url = new JMXServiceURL(jmxUrl);
Map<String, String[]> env = null;
if (jmxUsername != null) {
env = new HashMap<String, String[]>();
String[] credentials = new String[]{jmxUsername, jmxPassword};
env.put(JMXConnector.CREDENTIALS, credentials);
}
JMXConnector jmxc = JMXConnectorFactory.connect(url, env);
conn = jmxc.getMBeanServerConnection();
}
} | 3.68 |
flink_AsynchronousBlockWriterWithCallback_writeBlock | /**
* Issues a asynchronous write request to the writer.
*
* @param segment The segment to be written.
* @throws IOException Thrown, when the writer encounters an I/O error. Due to the asynchronous
* nature of the writer, the exception thrown here may have been caused by an earlier write
* request.
*/
@Override
public void writeBlock(MemorySegment segment) throws IOException {
addRequest(new SegmentWriteRequest(this, segment));
} | 3.68 |
hudi_HoodieTableMetadataUtil_getPartitionLatestMergedFileSlices | /**
* Get the latest file slices for a Metadata Table partition. If the file slice is
* because of pending compaction instant, then merge the file slice with the one
* just before the compaction instant time. The list of file slices returned is
* sorted in the correct order of file group name.
*
* @param metaClient Instance of {@link HoodieTableMetaClient}.
* @param fsView Metadata table filesystem view.
* @param partition The name of the partition whose file groups are to be loaded.
* @return List of latest file slices for all file groups in a given partition.
*/
public static List<FileSlice> getPartitionLatestMergedFileSlices(
HoodieTableMetaClient metaClient, HoodieTableFileSystemView fsView, String partition) {
LOG.info("Loading latest merged file slices for metadata table partition " + partition);
return getPartitionFileSlices(metaClient, Option.of(fsView), partition, true);
} | 3.68 |
hadoop_AbfsOutputStream_failureWhileSubmit | /**
* A method to set the lastError if an exception is caught.
* @param ex Exception caught.
* @throws IOException Throws the lastError.
*/
private void failureWhileSubmit(Exception ex) throws IOException {
if (ex instanceof AbfsRestOperationException) {
if (((AbfsRestOperationException) ex).getStatusCode()
== HttpURLConnection.HTTP_NOT_FOUND) {
throw new FileNotFoundException(ex.getMessage());
}
}
if (ex instanceof IOException) {
lastError = (IOException) ex;
} else {
lastError = new IOException(ex);
}
throw lastError;
} | 3.68 |
hadoop_AzureBlobFileSystem_incrementStatistic | /**
* Method for incrementing AbfsStatistic by a long value.
*
* @param statistic the Statistic to be incremented.
*/
private void incrementStatistic(AbfsStatistic statistic) {
if (abfsCounters != null) {
abfsCounters.incrementCounter(statistic, 1);
}
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.