name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_CompactingMemStore_size
/** * @return Total memory occupied by this MemStore. This won't include any size occupied by the * snapshot. We assume the snapshot will get cleared soon. This is not thread safe and the * memstore may be changed while computing its size. It is the responsibility of the * caller to make sure this doesn't happen. */ @Override public MemStoreSize size() { MemStoreSizing memstoreSizing = new NonThreadSafeMemStoreSizing(); memstoreSizing.incMemStoreSize(getActive().getMemStoreSize()); for (Segment item : pipeline.getSegments()) { memstoreSizing.incMemStoreSize(item.getMemStoreSize()); } return memstoreSizing.getMemStoreSize(); }
3.68
hbase_ResponseConverter_getResults
/** * Create Results from the cells using the cells meta data. */ public static Result[] getResults(CellScanner cellScanner, ScanResponse response) throws IOException { if (response == null) return null; // If cellscanner, then the number of Results to return is the count of elements in the // cellsPerResult list. Otherwise, it is how many results are embedded inside the response. int noOfResults = cellScanner != null ? response.getCellsPerResultCount() : response.getResultsCount(); Result[] results = new Result[noOfResults]; for (int i = 0; i < noOfResults; i++) { if (cellScanner != null) { // Cells are out in cellblocks. Group them up again as Results. How many to read at a // time will be found in getCellsLength -- length here is how many Cells in the i'th Result int noOfCells = response.getCellsPerResult(i); boolean isPartial = response.getPartialFlagPerResultCount() > i ? response.getPartialFlagPerResult(i) : false; List<Cell> cells = new ArrayList<>(noOfCells); for (int j = 0; j < noOfCells; j++) { try { if (cellScanner.advance() == false) { // We are not able to retrieve the exact number of cells which ResultCellMeta says us. // We have to scan for the same results again. Throwing DNRIOE as a client retry on // the // same scanner will result in OutOfOrderScannerNextException String msg = "Results sent from server=" + noOfResults + ". But only got " + i + " results completely at client. Resetting the scanner to scan again."; LOG.error(msg); throw new DoNotRetryIOException(msg); } } catch (IOException ioe) { // We are getting IOE while retrieving the cells for Results. // We have to scan for the same results again. Throwing DNRIOE as a client retry on the // same scanner will result in OutOfOrderScannerNextException LOG.error( "Exception while reading cells from result." + "Resetting the scanner to scan again.", ioe); throw new DoNotRetryIOException("Resetting the scanner.", ioe); } cells.add(cellScanner.current()); } results[i] = Result.create(cells, null, response.getStale(), isPartial); } else { // Result is pure pb. results[i] = ProtobufUtil.toResult(response.getResults(i)); } } return results; }
3.68
hbase_ColumnFamilyDescriptorBuilder_setVersionsWithTimeToLive
/** * Retain all versions for a given TTL(retentionInterval), and then only a specific number of * versions(versionAfterInterval) after that interval elapses. * @param retentionInterval Retain all versions for this interval * @param versionAfterInterval Retain no of versions to retain after retentionInterval * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setVersionsWithTimeToLive(final int retentionInterval, final int versionAfterInterval) { ModifyableColumnFamilyDescriptor modifyableColumnFamilyDescriptor = setVersions(versionAfterInterval, Integer.MAX_VALUE); modifyableColumnFamilyDescriptor.setTimeToLive(retentionInterval); modifyableColumnFamilyDescriptor.setKeepDeletedCells(KeepDeletedCells.TTL); return modifyableColumnFamilyDescriptor; }
3.68
framework_VTabsheet_focusPreviousTab
/** * Left arrow key focus move. Selection won't change until the selection * key is pressed, but the target tab must be selectable. If no * selectable tabs are found before currently focused tab, focus isn't * moved. */ private void focusPreviousTab() { int newTabIndex = focusedTabIndex; // Find the previous visible and enabled tab if any. do { newTabIndex--; } while (newTabIndex >= 0 && !canSelectTab(newTabIndex)); if (newTabIndex >= 0) { keyFocusTab(newTabIndex); } }
3.68
hadoop_AbfsOutputStream_write
/** * Writes length bytes from the specified byte array starting at off to * this output stream. * * @param data the byte array to write. * @param off the start off in the data. * @param length the number of bytes to write. * @throws IOException if an I/O error occurs. In particular, an IOException may be * thrown if the output stream has been closed. */ @Override public synchronized void write(final byte[] data, final int off, final int length) throws IOException { // validate if data is not null and index out of bounds. DataBlocks.validateWriteArgs(data, off, length); maybeThrowLastError(); if (off < 0 || length < 0 || length > data.length - off) { throw new IndexOutOfBoundsException(); } if (hasLease() && isLeaseFreed()) { throw new PathIOException(path, ERR_WRITE_WITHOUT_LEASE); } DataBlocks.DataBlock block = createBlockIfNeeded(); int written = block.write(data, off, length); int remainingCapacity = block.remainingCapacity(); if (written < length) { // Number of bytes to write is more than the data block capacity, // trigger an upload and then write on the next block. LOG.debug("writing more data than block capacity -triggering upload"); uploadCurrentBlock(); // tail recursion is mildly expensive, but given buffer sizes must be MB. // it's unlikely to recurse very deeply. this.write(data, off + written, length - written); } else { if (remainingCapacity == 0) { // the whole buffer is done, trigger an upload uploadCurrentBlock(); } } incrementWriteOps(); }
3.68
hbase_ProcedureUtil_createRetryCounter
/** * Get a retry counter for getting the backoff time. We will use the * {@link ExponentialBackoffPolicyWithLimit} policy, and the base unit is 1 second, max sleep time * is 10 minutes by default. * <p/> * For UTs, you can set the {@link #PROCEDURE_RETRY_SLEEP_INTERVAL_MS} and * {@link #PROCEDURE_RETRY_MAX_SLEEP_TIME_MS} to make more frequent retry so your UT will not * timeout. */ public static RetryCounter createRetryCounter(Configuration conf) { long sleepIntervalMs = conf.getLong(PROCEDURE_RETRY_SLEEP_INTERVAL_MS, DEFAULT_PROCEDURE_RETRY_SLEEP_INTERVAL_MS); long maxSleepTimeMs = conf.getLong(PROCEDURE_RETRY_MAX_SLEEP_TIME_MS, DEFAULT_PROCEDURE_RETRY_MAX_SLEEP_TIME_MS); RetryConfig retryConfig = new RetryConfig().setSleepInterval(sleepIntervalMs) .setMaxSleepTime(maxSleepTimeMs).setBackoffPolicy(new ExponentialBackoffPolicyWithLimit()); return new RetryCounter(retryConfig); }
3.68
hbase_QuotaState_setQuotas
/** * Setup the global quota information. (This operation is part of the QuotaState setup) */ public synchronized void setQuotas(final Quotas quotas) { if (quotas.hasThrottle()) { globalLimiter = QuotaLimiterFactory.fromThrottle(quotas.getThrottle()); } else { globalLimiter = NoopQuotaLimiter.get(); } }
3.68
flink_CatalogManager_getCatalog
/** * Gets a {@link Catalog} instance by name. * * <p>If the catalog has already been initialized, the initialized instance will be returned * directly. Otherwise, the {@link CatalogDescriptor} will be obtained from the {@link * CatalogStore}, and the catalog instance will be initialized. * * @param catalogName name of the catalog to retrieve * @return the requested catalog or empty if it does not exist */ public Optional<Catalog> getCatalog(String catalogName) { // Get catalog from the initialized catalogs. if (catalogs.containsKey(catalogName)) { return Optional.of(catalogs.get(catalogName)); } // Get catalog from the CatalogStore. Optional<CatalogDescriptor> optionalDescriptor = catalogStoreHolder.catalogStore().getCatalog(catalogName); return optionalDescriptor.map( descriptor -> { Catalog catalog = initCatalog(catalogName, descriptor); catalog.open(); catalogs.put(catalogName, catalog); return catalog; }); }
3.68
morf_SqlDialect_appendFrom
/** * appends from clause to the result * * @param result from clause will be appended here * @param stmt statement with from clause * @param <T> The type of AbstractSelectStatement */ protected <T extends AbstractSelectStatement<T>> void appendFrom(StringBuilder result, AbstractSelectStatement<T> stmt) { if (stmt.getTable() != null) { result.append(" FROM "); result.append(tableNameWithSchemaName(stmt.getTable())); // Add a table alias if necessary if (!stmt.getTable().getAlias().equals("")) { result.append(" "); result.append(stmt.getTable().getAlias()); } } else if (!stmt.getFromSelects().isEmpty()) { result.append(" FROM "); boolean first = true; for (SelectStatement innerSelect : stmt.getFromSelects()) { checkSelectStatementHasNoHints(innerSelect, "Hints not currently permitted on subqueries"); if (!first) { result.append(", "); } first = false; result.append(String.format("(%s)", getSqlFrom(innerSelect))); if (StringUtils.isNotBlank(innerSelect.getAlias())) { result.append(String.format(" %s", innerSelect.getAlias())); } } } else { result.append(getFromDummyTable()); } }
3.68
framework_ComponentLocator_getClient
/** * Returns the {@link ApplicationConnection} used by this locator. * <p> * This method is primarily for internal use by the framework. * * @return the application connection */ public ApplicationConnection getClient() { return client; }
3.68
framework_UIInitHandler_getPushIdUIDL
/** * Gets the push connection identifier as UIDL. * * @param session * the vaadin session to which the security key belongs * @return the push identifier UIDL */ private static String getPushIdUIDL(VaadinSession session) { return "\"" + ApplicationConstants.UIDL_PUSH_ID + "\":\"" + session.getPushId() + "\","; }
3.68
hudi_JenkinsHash_hash
/** * taken from hashlittle() -- hash a variable-length key into a 32-bit value * * @param key the key (the unaligned variable-length array of bytes) * @param nbytes number of bytes to include in hash * @param initval can be any integer value * @return a 32-bit value. Every bit of the key affects every bit of the * return value. Two keys differing by one or two bits will have totally * different hash values. * * <p>The best hash table sizes are powers of 2. There is no need to do mod * a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask. * For example, if you need only 10 bits, do * <code>h = (h & hashmask(10));</code> * In which case, the hash table should have hashsize(10) elements. * * <p>If you are hashing n strings byte[][] k, do it like this: * for (int i = 0, h = 0; i < n; ++i) h = hash( k[i], h); * * <p>By Bob Jenkins, 2006. [email protected]. You may use this * code any way you wish, private, educational, or commercial. It's free. * * <p>Use for hash table lookup, or anything where one collision in 2^^32 is * acceptable. Do NOT use for cryptographic purposes. */ @Override @SuppressWarnings("fallthrough") public int hash(byte[] key, int nbytes, int initval) { int length = nbytes; long a; long b; long c; // We use longs because we don't have unsigned ints a = b = c = (0x00000000deadbeefL + length + initval) & INT_MASK; int offset = 0; for (; length > 12; offset += 12, length -= 12) { a = (a + (key[offset + 0] & BYTE_MASK)) & INT_MASK; a = (a + (((key[offset + 1] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; a = (a + (((key[offset + 2] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; a = (a + (((key[offset + 3] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; b = (b + (key[offset + 4] & BYTE_MASK)) & INT_MASK; b = (b + (((key[offset + 5] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; b = (b + (((key[offset + 6] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; b = (b + (((key[offset + 7] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; c = (c + (key[offset + 8] & BYTE_MASK)) & INT_MASK; c = (c + (((key[offset + 9] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; c = (c + (((key[offset + 10] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; c = (c + (((key[offset + 11] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; /* * mix -- mix 3 32-bit values reversibly. * This is reversible, so any information in (a,b,c) before mix() is * still in (a,b,c) after mix(). * * If four pairs of (a,b,c) inputs are run through mix(), or through * mix() in reverse, there are at least 32 bits of the output that * are sometimes the same for one pair and different for another pair. * * This was tested for: * - pairs that differed by one bit, by two bits, in any combination * of top bits of (a,b,c), or in any combination of bottom bits of * (a,b,c). * - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as * is commonly produced by subtraction) look like a single 1-bit * difference. * - the base values were pseudorandom, all zero but one bit set, or * all zero plus a counter that starts at zero. * * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that * satisfy this are * 4 6 8 16 19 4 * 9 15 3 18 27 15 * 14 9 3 7 17 3 * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for * "differ" defined as + with a one-bit base and a two-bit delta. I * used http://burtleburtle.net/bob/hash/avalanche.html to choose * the operations, constants, and arrangements of the variables. * * This does not achieve avalanche. There are input bits of (a,b,c) * that fail to affect some output bits of (a,b,c), especially of a. * The most thoroughly mixed value is c, but it doesn't really even * achieve avalanche in c. * * This allows some parallelism. Read-after-writes are good at doubling * the number of bits affected, so the goal of mixing pulls in the * opposite direction as the goal of parallelism. I did what I could. * Rotates seem to cost as much as shifts on every machine I could lay * my hands on, and rotates are much kinder to the top and bottom bits, * so I used rotates. * * #define mix(a,b,c) \ * { \ * a -= c; a ^= rot(c, 4); c += b; \ * b -= a; b ^= rot(a, 6); a += c; \ * c -= b; c ^= rot(b, 8); b += a; \ * a -= c; a ^= rot(c,16); c += b; \ * b -= a; b ^= rot(a,19); a += c; \ * c -= b; c ^= rot(b, 4); b += a; \ * } * * mix(a,b,c); */ a = (a - c) & INT_MASK; a ^= rot(c, 4); c = (c + b) & INT_MASK; b = (b - a) & INT_MASK; b ^= rot(a, 6); a = (a + c) & INT_MASK; c = (c - b) & INT_MASK; c ^= rot(b, 8); b = (b + a) & INT_MASK; a = (a - c) & INT_MASK; a ^= rot(c, 16); c = (c + b) & INT_MASK; b = (b - a) & INT_MASK; b ^= rot(a, 19); a = (a + c) & INT_MASK; c = (c - b) & INT_MASK; c ^= rot(b, 4); b = (b + a) & INT_MASK; } //-------------------------------- last block: affect all 32 bits of (c) // all the case statements fall through switch (length) { case 12: c = (c + (((key[offset + 11] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; case 11: c = (c + (((key[offset + 10] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; case 10: c = (c + (((key[offset + 9] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; case 9: c = (c + (key[offset + 8] & BYTE_MASK)) & INT_MASK; case 8: b = (b + (((key[offset + 7] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; case 7: b = (b + (((key[offset + 6] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; case 6: b = (b + (((key[offset + 5] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; case 5: b = (b + (key[offset + 4] & BYTE_MASK)) & INT_MASK; case 4: a = (a + (((key[offset + 3] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; case 3: a = (a + (((key[offset + 2] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; case 2: a = (a + (((key[offset + 1] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; case 1: a = (a + (key[offset + 0] & BYTE_MASK)) & INT_MASK; break; case 0: return (int) (c & INT_MASK); default: } /* * final -- final mixing of 3 32-bit values (a,b,c) into c * * Pairs of (a,b,c) values differing in only a few bits will usually * produce values of c that look totally different. This was tested for * - pairs that differed by one bit, by two bits, in any combination * of top bits of (a,b,c), or in any combination of bottom bits of * (a,b,c). * * - "differ" is defined as +, -, ^, or ~^. For + and -, I transformed * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as * is commonly produced by subtraction) look like a single 1-bit * difference. * * - the base values were pseudorandom, all zero but one bit set, or * all zero plus a counter that starts at zero. * * These constants passed: * 14 11 25 16 4 14 24 * 12 14 25 16 4 14 24 * and these came close: * 4 8 15 26 3 22 24 * 10 8 15 26 3 22 24 * 11 8 15 26 3 22 24 * * #define final(a,b,c) \ * { * c ^= b; c -= rot(b,14); \ * a ^= c; a -= rot(c,11); \ * b ^= a; b -= rot(a,25); \ * c ^= b; c -= rot(b,16); \ * a ^= c; a -= rot(c,4); \ * b ^= a; b -= rot(a,14); \ * c ^= b; c -= rot(b,24); \ * } * */ c ^= b; c = (c - rot(b, 14)) & INT_MASK; a ^= c; a = (a - rot(c, 11)) & INT_MASK; b ^= a; b = (b - rot(a, 25)) & INT_MASK; c ^= b; c = (c - rot(b, 16)) & INT_MASK; a ^= c; a = (a - rot(c, 4)) & INT_MASK; b ^= a; b = (b - rot(a, 14)) & INT_MASK; c ^= b; c = (c - rot(b, 24)) & INT_MASK; return (int) (c & INT_MASK); }
3.68
hudi_AbstractTableFileSystemView_buildFileGroups
/** * Build FileGroups from passed in file-status. */ protected List<HoodieFileGroup> buildFileGroups(FileStatus[] statuses, HoodieTimeline timeline, boolean addPendingCompactionFileSlice) { return buildFileGroups(convertFileStatusesToBaseFiles(statuses), convertFileStatusesToLogFiles(statuses), timeline, addPendingCompactionFileSlice); }
3.68
hbase_Server_getFileSystem
/** Returns Return the FileSystem object used (can return null!). */ // TODO: Distinguish between "dataFs" and "walFs". default FileSystem getFileSystem() { // This default is pretty dodgy! Configuration c = getConfiguration(); FileSystem fs = null; try { if (c != null) { fs = FileSystem.get(c); } } catch (IOException e) { // If an exception, just return null } return fs; }
3.68
dubbo_NetUtils_isValidAddress
/** * Tells whether the address to test is an invalid address. * * @implNote Pattern matching only. * @param address address to test * @return true if invalid */ public static boolean isValidAddress(String address) { return ADDRESS_PATTERN.matcher(address).matches(); }
3.68
hbase_MasterObserver_postAddReplicationPeer
/** * Called after add a replication peer * @param ctx the environment to interact with the framework and master * @param peerId a short name that identifies the peer * @param peerConfig configuration for the replication peer */ default void postAddReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId, ReplicationPeerConfig peerConfig) throws IOException { }
3.68
flink_ExecutionConfig_canEqual
/** * This method simply checks whether the object is an {@link ExecutionConfig} instance. * * @deprecated It is not intended to be used by users. */ @Deprecated public boolean canEqual(Object obj) { return obj instanceof ExecutionConfig; }
3.68
flink_CliFrontend_getDefaultParallelism
/** * Get default parallelism from command line via effective configuration. * * @param effectiveConfiguration Flink effective configuration. * @return default parallelism. */ private int getDefaultParallelism(Configuration effectiveConfiguration) { return effectiveConfiguration.get(CoreOptions.DEFAULT_PARALLELISM); }
3.68
framework_Profiler_isImplEnabled
/** * Overridden in {@link EnabledProfiler} to make {@link #isEnabled()} return * true if GWT.create returns that class. * * @return <code>true</code> if the profiling is enabled, else * <code>false</code> */ protected boolean isImplEnabled() { return false; }
3.68
hbase_SnapshotScannerHDFSAclHelper_grantAcl
/** * Set acl when grant user permission * @param userPermission the user and permission * @param skipNamespaces the namespace set to skip set acl because already set * @param skipTables the table set to skip set acl because already set * @return false if an error occurred, otherwise true */ public boolean grantAcl(UserPermission userPermission, Set<String> skipNamespaces, Set<TableName> skipTables) { try { long start = EnvironmentEdgeManager.currentTime(); handleGrantOrRevokeAcl(userPermission, HDFSAclOperation.OperationType.MODIFY, skipNamespaces, skipTables); LOG.info("Set HDFS acl when grant {}, skipNamespaces: {}, skipTables: {}, cost {} ms", userPermission, skipNamespaces, skipTables, EnvironmentEdgeManager.currentTime() - start); return true; } catch (Exception e) { LOG.error("Set HDFS acl error when grant: {}, skipNamespaces: {}, skipTables: {}", userPermission, skipNamespaces, skipTables, e); return false; } }
3.68
framework_VLayoutSlot_positionHorizontally
/** * Position the slot horizontally and set the width and the right margin. * * @param currentLocation * the left position for this slot * @param allocatedSpace * how much horizontal space is available for this slot * @param marginRight * the right margin this slot should have (removed if negative) */ public void positionHorizontally(double currentLocation, double allocatedSpace, double marginRight) { Style style = wrapper.getStyle(); double availableWidth = allocatedSpace; VCaption caption = getCaption(); Style captionStyle = caption != null ? caption.getElement().getStyle() : null; int captionWidth = getCaptionWidth(); boolean captionAboveComponent; if (caption == null) { captionAboveComponent = false; } else { captionAboveComponent = !caption.shouldBePlacedAfterComponent(); if (!captionAboveComponent) { availableWidth -= captionWidth; if (availableWidth < 0) { availableWidth = 0; } style.setPaddingRight(captionWidth, Unit.PX); widget.getElement().getStyle().setPosition(Position.RELATIVE); } else { captionStyle.setLeft(0, Unit.PX); } } if (marginRight > 0) { style.setMarginRight(marginRight, Unit.PX); } else { style.clearMarginRight(); } style.setPropertyPx("width", (int) availableWidth); double allocatedContentWidth = 0; if (isRelativeWidth()) { String percentWidth = getWidget().getElement().getStyle() .getWidth(); double percentage = parsePercent(percentWidth); allocatedContentWidth = availableWidth * (percentage / 100); reportActualRelativeWidth( Math.round((float) allocatedContentWidth)); } double usedWidth; // widget width in px if (isRelativeWidth()) { usedWidth = allocatedContentWidth; } else { usedWidth = getWidgetWidth(); } style.setLeft(Math.round(currentLocation), Unit.PX); AlignmentInfo alignment = getAlignment(); if (!alignment.isLeft()) { double padding = availableWidth - usedWidth; if (alignment.isHorizontalCenter()) { padding = padding / 2; } long roundedPadding = Math.round(padding); if (captionStyle != null) { captionStyle.setLeft(captionAboveComponent ? roundedPadding : roundedPadding + usedWidth, Unit.PX); } widget.getElement().getStyle().setLeft(roundedPadding, Unit.PX); } else { if (captionStyle != null) { captionStyle.setLeft(captionAboveComponent ? 0 : usedWidth, Unit.PX); } // Reset left when changing back to align left widget.getElement().getStyle().clearLeft(); } }
3.68
pulsar_AuthorizationProvider_getSubscriptionPermissionsAsync
/** * Get authorization-action permissions on a topic. * @param namespaceName * @return CompletableFuture<Map<String, Set<String>>> */ default CompletableFuture<Map<String, Set<String>>> getSubscriptionPermissionsAsync(NamespaceName namespaceName) { return FutureUtil.failedFuture(new IllegalStateException( String.format("getSubscriptionPermissionsAsync on namespace %s is not supported by the Authorization", namespaceName))); }
3.68
hadoop_FederationRegistryClient_removeAppFromRegistry
/** * Remove an application from registry. * * @param appId application id * @param ignoreMemoryState whether to ignore the memory data in terms of * known application */ public synchronized void removeAppFromRegistry(ApplicationId appId, boolean ignoreMemoryState) { Map<String, Token<AMRMTokenIdentifier>> subClusterTokenMap = this.appSubClusterTokenMap.get(appId); if (!ignoreMemoryState) { if (MapUtils.isEmpty(subClusterTokenMap)) { return; } } LOG.info("Removing all registry entries for {}.", appId); // Lastly remove the application directory String key = getRegistryKey(appId, null); try { removeKeyRegistry(this.registry, this.user, key, true, true); if (subClusterTokenMap != null) { subClusterTokenMap.clear(); } } catch (YarnException e) { LOG.error("Failed removing registry directory key {}.", key, e); } }
3.68
hadoop_HsSingleCounterPage_preHead
/* * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); setActiveNavColumnForTask(); set(DATATABLES_ID, "singleCounter"); set(initID(DATATABLES, "singleCounter"), counterTableInit()); setTableStyles(html, "singleCounter"); }
3.68
morf_FieldReference_build
/** * Builds the {@link FieldReference}. * * @return The field reference. */ @Override public FieldReference build() { return new FieldReference(alias, table, name, direction, nullValueHandling); }
3.68
hudi_BaseHoodieTableServiceClient_inlineCompaction
/** * Performs a compaction operation on a table, serially before or after an insert/upsert action. * Scheduling and execution is done inline. */ protected Option<String> inlineCompaction(Option<Map<String, String>> extraMetadata) { Option<String> compactionInstantTimeOpt = inlineScheduleCompaction(extraMetadata); compactionInstantTimeOpt.ifPresent(compactInstantTime -> { // inline compaction should auto commit as the user is never given control compact(compactInstantTime, true); }); return compactionInstantTimeOpt; }
3.68
framework_Page_getHeight
/** * Gets the new browser window height. * * @return an integer with the new pixel height of the browser window */ public int getHeight() { return height; }
3.68
hbase_CreateStoreFileWriterParams_includesTag
/** * Whether to includesTag or not */ public CreateStoreFileWriterParams includesTag(boolean includesTag) { this.includesTag = includesTag; return this; }
3.68
framework_VTree_getNavigationPageUpKey
/** * Get the key the moves the selection one page up in the table. By default * this is the Page Up key but by overriding this you can change the key to * whatever you want. * * @return */ protected int getNavigationPageUpKey() { return KeyCodes.KEY_PAGEUP; }
3.68
framework_VAccordion_setHeight
/** * Sets the height for this stack item's contents. * * @param height * the height to set (in pixels), or {@code -1} to remove * height */ public void setHeight(int height) { if (height == -1) { super.setHeight(""); content.getStyle().setHeight(0, Unit.PX); } else { super.setHeight((height + getCaptionHeight()) + "px"); content.getStyle().setHeight(height, Unit.PX); content.getStyle().setTop(getCaptionHeight(), Unit.PX); } }
3.68
pulsar_TimeAverageBrokerData_reset
/** * Reuse this TimeAverageBrokerData using new data. * * @param bundles * The bundles belonging to the broker. * @param data * Map from bundle names to the data for that bundle. * @param defaultStats * The stats to use when a bundle belonging to this broker is not found in the bundle data map. */ public void reset(final Set<String> bundles, final Map<String, BundleData> data, final NamespaceBundleStats defaultStats) { shortTermMsgThroughputIn = 0; shortTermMsgThroughputOut = 0; shortTermMsgRateIn = 0; shortTermMsgRateOut = 0; longTermMsgThroughputIn = 0; longTermMsgThroughputOut = 0; longTermMsgRateIn = 0; longTermMsgRateOut = 0; for (String bundle : bundles) { final BundleData bundleData = data.get(bundle); if (bundleData == null) { shortTermMsgThroughputIn += defaultStats.msgThroughputIn; shortTermMsgThroughputOut += defaultStats.msgThroughputOut; shortTermMsgRateIn += defaultStats.msgRateIn; shortTermMsgRateOut += defaultStats.msgRateOut; longTermMsgThroughputIn += defaultStats.msgThroughputIn; longTermMsgThroughputOut += defaultStats.msgThroughputOut; longTermMsgRateIn += defaultStats.msgRateIn; longTermMsgRateOut += defaultStats.msgRateOut; } else { final TimeAverageMessageData shortTermData = bundleData.getShortTermData(); final TimeAverageMessageData longTermData = bundleData.getLongTermData(); shortTermMsgThroughputIn += shortTermData.getMsgThroughputIn(); shortTermMsgThroughputOut += shortTermData.getMsgThroughputOut(); shortTermMsgRateIn += shortTermData.getMsgRateIn(); shortTermMsgRateOut += shortTermData.getMsgRateOut(); longTermMsgThroughputIn += longTermData.getMsgThroughputIn(); longTermMsgThroughputOut += longTermData.getMsgThroughputOut(); longTermMsgRateIn += longTermData.getMsgRateIn(); longTermMsgRateOut += longTermData.getMsgRateOut(); } } }
3.68
hadoop_TaskAttemptContextImpl_getTaskAttemptID
/** * Get the unique name for this task attempt. */ public TaskAttemptID getTaskAttemptID() { return taskId; }
3.68
hbase_HFileCorruptionChecker_getMobFilesChecked
/** Returns number of mob files checked in the last HfileCorruptionChecker run */ public int getMobFilesChecked() { return mobFilesChecked.get(); }
3.68
hbase_RegionCoprocessorHost_postScannerFilterRow
/** * This will be called by the scan flow when the current scanned row is being filtered out by the * filter. * @param s the scanner * @param curRowCell The cell in the current row which got filtered out * @return whether more rows are available for the scanner or not */ public boolean postScannerFilterRow(final InternalScanner s, final Cell curRowCell) throws IOException { // short circuit for performance boolean defaultResult = true; if (!hasCustomPostScannerFilterRow) { return defaultResult; } if (this.coprocEnvironments.isEmpty()) { return defaultResult; } return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Boolean>( regionObserverGetter, defaultResult) { @Override public Boolean call(RegionObserver observer) throws IOException { return observer.postScannerFilterRow(this, s, curRowCell, getResult()); } }); }
3.68
hbase_ExecutorType_getExecutorName
/** Returns Conflation of the executor type and the passed {@code serverName}. */ String getExecutorName(String serverName) { return this.toString() + "-" + serverName.replace("%", "%%"); }
3.68
zxing_DataMatrixWriter_convertByteMatrixToBitMatrix
/** * Convert the ByteMatrix to BitMatrix. * * @param reqHeight The requested height of the image (in pixels) with the Datamatrix code * @param reqWidth The requested width of the image (in pixels) with the Datamatrix code * @param matrix The input matrix. * @return The output matrix. */ private static BitMatrix convertByteMatrixToBitMatrix(ByteMatrix matrix, int reqWidth, int reqHeight) { int matrixWidth = matrix.getWidth(); int matrixHeight = matrix.getHeight(); int outputWidth = Math.max(reqWidth, matrixWidth); int outputHeight = Math.max(reqHeight, matrixHeight); int multiple = Math.min(outputWidth / matrixWidth, outputHeight / matrixHeight); int leftPadding = (outputWidth - (matrixWidth * multiple)) / 2 ; int topPadding = (outputHeight - (matrixHeight * multiple)) / 2 ; BitMatrix output; // remove padding if requested width and height are too small if (reqHeight < matrixHeight || reqWidth < matrixWidth) { leftPadding = 0; topPadding = 0; output = new BitMatrix(matrixWidth, matrixHeight); } else { output = new BitMatrix(reqWidth, reqHeight); } output.clear(); for (int inputY = 0, outputY = topPadding; inputY < matrixHeight; inputY++, outputY += multiple) { // Write the contents of this row of the bytematrix for (int inputX = 0, outputX = leftPadding; inputX < matrixWidth; inputX++, outputX += multiple) { if (matrix.get(inputX, inputY) == 1) { output.setRegion(outputX, outputY, multiple, multiple); } } } return output; }
3.68
hbase_SnapshotScannerHDFSAclHelper_snapshotAcl
/** * Set acl when take a snapshot * @param snapshot the snapshot desc * @return false if an error occurred, otherwise true */ public boolean snapshotAcl(SnapshotDescription snapshot) { try { long start = EnvironmentEdgeManager.currentTime(); TableName tableName = snapshot.getTableName(); // global user permission can be inherited from default acl automatically Set<String> userSet = getUsersWithTableReadAction(tableName, true, false); if (userSet.size() > 0) { Path path = pathHelper.getSnapshotDir(snapshot.getName()); handleHDFSAcl(new HDFSAclOperation(fs, path, userSet, HDFSAclOperation.OperationType.MODIFY, true, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).get(); } LOG.info("Set HDFS acl when snapshot {}, cost {} ms", snapshot.getName(), EnvironmentEdgeManager.currentTime() - start); return true; } catch (Exception e) { LOG.error("Set HDFS acl error when snapshot {}", snapshot, e); return false; } }
3.68
hadoop_MoveStep_setVolumeSetID
/** * Sets volume id. * * @param volumeSetID - volume ID */ public void setVolumeSetID(String volumeSetID) { this.volumeSetID = volumeSetID; }
3.68
hadoop_PoolAlignmentContext_updateResponseState
/** * Client side implementation only receives state alignment info. * It does not provide state alignment info therefore this does nothing. */ @Override public void updateResponseState(RpcHeaderProtos.RpcResponseHeaderProto.Builder header) { // Do nothing. }
3.68
flink_ResultPartitionMetrics_refreshAndGetAvg
/** * Iterates over all sub-partitions and collects the average number of queued buffers in a * sub-partition in a best-effort way. * * @return average number of queued buffers per sub-partition */ float refreshAndGetAvg() { return partition.getNumberOfQueuedBuffers() / (float) partition.getNumberOfSubpartitions(); }
3.68
hbase_OrderedBytes_decodeNumericAsLong
/** * Decode a primitive {@code long} value from the Numeric encoding. Numeric encoding is based on * {@link BigDecimal}; in the event the encoded value is larger than can be represented in a * {@code long}, this method performs an implicit narrowing conversion as described in * {@link BigDecimal#doubleValue()}. * @throws NullPointerException when the encoded value is {@code NULL}. * @throws IllegalArgumentException when the encoded value is not a Numeric. * @see #encodeNumeric(PositionedByteRange, long, Order) * @see BigDecimal#longValue() */ public static long decodeNumericAsLong(PositionedByteRange src) { // TODO: should an encoded NULL value throw unexpectedHeader() instead? if (isNull(src)) throw new NullPointerException(); if (!isNumeric(src)) throw unexpectedHeader(src.peek()); if (isNumericNaN(src)) throw unexpectedHeader(src.peek()); if (isNumericInfinite(src)) throw unexpectedHeader(src.peek()); if (isNumericZero(src)) { src.get(); return Long.valueOf(0); } return decodeNumericValue(src).longValue(); }
3.68
framework_Tree_getChildren
/** * Gets the IDs of all Items that are children of the specified Item. * * @see Container.Hierarchical#getChildren(Object) */ @Override public Collection<?> getChildren(Object itemId) { return ((Container.Hierarchical) items).getChildren(itemId); }
3.68
querydsl_AbstractFetchableMongodbQuery_fetchFirst
/** * Fetch first with the specific fields * * @param paths fields to return * @return first result */ public K fetchFirst(Path<?>... paths) { getQueryMixin().setProjection(paths); return fetchFirst(); }
3.68
hmily_HashedWheelTimer_addTimeout
/** * Add {@link HashedWheelTimeout} to this bucket. */ public void addTimeout(final HashedWheelTimeout timeout) { assert timeout.bucket == null; timeout.bucket = this; if (head == null) { head = tail = timeout; } else { tail.next = timeout; timeout.prev = tail; tail = timeout; } }
3.68
morf_SqlScriptExecutorProvider_get
/** * Gets an instance of a {@link SqlScriptExecutor} with the provided visitor * set. * * @param visitor the visitor. * @return an instance of an {@link SqlScriptExecutor}. */ public SqlScriptExecutor get(SqlScriptVisitor visitor) { if (connectionResources != null) { return new SqlScriptExecutor(defaultVisitor(visitor), dataSource, sqlDialect.get(), connectionResources); } else { return new SqlScriptExecutor(defaultVisitor(visitor), dataSource, sqlDialect.get()); } }
3.68
flink_DefaultLookupCache_build
/** Creates the cache. */ public DefaultLookupCache build() { return new DefaultLookupCache( expireAfterAccessDuration, expireAfterWriteDuration, maximumSize, cacheMissingKey); }
3.68
flink_BinaryArrayWriter_setNullAt
/** * @deprecated Use {@link #createNullSetter(LogicalType)} for avoiding logical types during * runtime. */ @Deprecated public void setNullAt(int pos, LogicalType type) { switch (type.getTypeRoot()) { case BOOLEAN: setNullBoolean(pos); break; case TINYINT: setNullByte(pos); break; case SMALLINT: setNullShort(pos); break; case INTEGER: case DATE: case TIME_WITHOUT_TIME_ZONE: case INTERVAL_YEAR_MONTH: setNullInt(pos); break; case BIGINT: case TIMESTAMP_WITHOUT_TIME_ZONE: case TIMESTAMP_WITH_LOCAL_TIME_ZONE: case INTERVAL_DAY_TIME: setNullLong(pos); break; case FLOAT: setNullFloat(pos); break; case DOUBLE: setNullDouble(pos); break; default: setNullAt(pos); } }
3.68
framework_Upload_setImmediateMode
/** * Sets the immediate mode of the upload. * <p> * If the upload is in immediate mode, the file upload is started * immediately after the user has selected the file. * <p> * If the upload is not in immediate mode, after selecting the file the user * must click another button to start the upload. * <p> * The default mode of an Upload component is immediate. * * @param immediateMode * {@code true} for immediate mode, {@code false} for not * @since 8.0 */ public void setImmediateMode(boolean immediateMode) { getState().immediateMode = immediateMode; }
3.68
hadoop_FsGetter_getNewInstance
/** * Gets new file system instance of given uri. * @param uri uri. * @param conf configuration. * @throws IOException raised on errors performing I/O. * @return file system. */ public FileSystem getNewInstance(URI uri, Configuration conf) throws IOException { return FileSystem.newInstance(uri, conf); }
3.68
Activiti_TreeValueExpression_dump
/** * Print the parse tree. * @param writer */ public void dump(PrintWriter writer) { NodePrinter.dump(writer, node); }
3.68
flink_JobGraph_getJobConfiguration
/** * Returns the configuration object for this job. Job-wide parameters should be set into that * configuration object. * * @return The configuration object for this job. */ public Configuration getJobConfiguration() { return this.jobConfiguration; }
3.68
druid_WallConfig_setSelelctAllow
/** * @deprecated use setSelelctAllow */ public void setSelelctAllow(boolean selelctAllow) { this.setSelectAllow(selelctAllow); }
3.68
hbase_HRegion_replayWALFlushStartMarker
/** * Replay the flush marker from primary region by creating a corresponding snapshot of the store * memstores, only if the memstores do not have a higher seqId from an earlier wal edit (because * the events may be coming out of order). * @deprecated Since 3.0.0, will be removed in 4.0.0. Only for keep compatibility for old region * replica implementation. */ @Deprecated PrepareFlushResult replayWALFlushStartMarker(FlushDescriptor flush) throws IOException { long flushSeqId = flush.getFlushSequenceNumber(); Collection<HStore> storesToFlush = getStoresToFlush(flush); MonitoredTask status = TaskMonitor.get().createStatus("Preparing flush " + this); // we will use writestate as a coarse-grain lock for all the replay events // (flush, compaction, region open etc) synchronized (writestate) { try { if (flush.getFlushSequenceNumber() < lastReplayedOpenRegionSeqId) { LOG.warn(getRegionInfo().getEncodedName() + " : " + "Skipping replaying flush event :" + TextFormat.shortDebugString(flush) + " because its sequence id is smaller than this regions lastReplayedOpenRegionSeqId " + " of " + lastReplayedOpenRegionSeqId); return null; } if (numMutationsWithoutWAL.sum() > 0) { numMutationsWithoutWAL.reset(); dataInMemoryWithoutWAL.reset(); } if (!writestate.flushing) { // we do not have an active snapshot and corresponding this.prepareResult. This means // we can just snapshot our memstores and continue as normal. // invoke prepareFlushCache. Send null as wal since we do not want the flush events in wal PrepareFlushResult prepareResult = internalPrepareFlushCache(null, flushSeqId, storesToFlush, status, false, FlushLifeCycleTracker.DUMMY); if (prepareResult.result == null) { // save the PrepareFlushResult so that we can use it later from commit flush this.writestate.flushing = true; this.prepareFlushResult = prepareResult; status.markComplete("Flush prepare successful"); if (LOG.isDebugEnabled()) { LOG.debug(getRegionInfo().getEncodedName() + " : " + " Prepared flush with seqId:" + flush.getFlushSequenceNumber()); } } else { // special case empty memstore. We will still save the flush result in this case, since // our memstore ie empty, but the primary is still flushing if ( prepareResult.getResult().getResult() == FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY ) { this.writestate.flushing = true; this.prepareFlushResult = prepareResult; if (LOG.isDebugEnabled()) { LOG.debug(getRegionInfo().getEncodedName() + " : " + " Prepared empty flush with seqId:" + flush.getFlushSequenceNumber()); } } status.abort("Flush prepare failed with " + prepareResult.result); // nothing much to do. prepare flush failed because of some reason. } return prepareResult; } else { // we already have an active snapshot. if (flush.getFlushSequenceNumber() == this.prepareFlushResult.flushOpSeqId) { // They define the same flush. Log and continue. LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a flush prepare marker with the same seqId: " + +flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + prepareFlushResult.flushOpSeqId + ". Ignoring"); // ignore } else if (flush.getFlushSequenceNumber() < this.prepareFlushResult.flushOpSeqId) { // We received a flush with a smaller seqNum than what we have prepared. We can only // ignore this prepare flush request. LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a flush prepare marker with a smaller seqId: " + +flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + prepareFlushResult.flushOpSeqId + ". Ignoring"); // ignore } else { // We received a flush with a larger seqNum than what we have prepared LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a flush prepare marker with a larger seqId: " + +flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + prepareFlushResult.flushOpSeqId + ". Ignoring"); // We do not have multiple active snapshots in the memstore or a way to merge current // memstore snapshot with the contents and resnapshot for now. We cannot take // another snapshot and drop the previous one because that will cause temporary // data loss in the secondary. So we ignore this for now, deferring the resolution // to happen when we see the corresponding flush commit marker. If we have a memstore // snapshot with x, and later received another prepare snapshot with y (where x < y), // when we see flush commit for y, we will drop snapshot for x, and can also drop all // the memstore edits if everything in memstore is < y. This is the usual case for // RS crash + recovery where we might see consequtive prepare flush wal markers. // Otherwise, this will cause more memory to be used in secondary replica until a // further prapare + commit flush is seen and replayed. } } } finally { status.cleanup(); writestate.notifyAll(); } } return null; }
3.68
flink_PipelinedSubpartition_getChannelStateFuture
/** for testing only. */ // suppress this warning as it is only for testing. @SuppressWarnings("FieldAccessNotGuarded") @VisibleForTesting CompletableFuture<List<Buffer>> getChannelStateFuture() { return channelStateFuture; }
3.68
AreaShop_ResellingRegionEvent_getBuyer
/** * Get the player that is trying to buy the region. * @return The player that is trying to buy the region */ public OfflinePlayer getBuyer() { return player; }
3.68
flink_VersionedIOReadableWritable_getReadVersion
/** * Returns the found serialization version. If this instance was not read from serialized bytes * but simply instantiated, then the current version is returned. * * @return the read serialization version, or the current version if the instance was not read * from bytes. */ public int getReadVersion() { return (readVersion == Integer.MIN_VALUE) ? getVersion() : readVersion; }
3.68
hadoop_AbstractTask_getTaskId
/** * Get TaskId for a Task. * @return TaskID: Task command line */ @Override public final TaskId getTaskId() { return taskID; }
3.68
hbase_HFileWriterImpl_createOutputStream
/** A helper method to create HFile output streams in constructors */ protected static FSDataOutputStream createOutputStream(Configuration conf, FileSystem fs, Path path, InetSocketAddress[] favoredNodes) throws IOException { FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); return FSUtils.create(conf, fs, path, perms, favoredNodes); }
3.68
hibernate-validator_MetaDataBuilder_add
/** * Adds the given element to this builder. It must be checked with * {@link #accepts(ConstrainedElement)} before, whether this is allowed or * not. * * @param constrainedElement The element to add. */ public void add(ConstrainedElement constrainedElement) { directConstraints.addAll( adaptConstraints( constrainedElement, constrainedElement.getConstraints() ) ); containerElementsConstraints.addAll( adaptConstraints( constrainedElement, constrainedElement.getTypeArgumentConstraints() ) ); isCascading = isCascading || constrainedElement.getCascadingMetaDataBuilder().isMarkedForCascadingOnAnnotatedObjectOrContainerElements(); }
3.68
framework_OnStateChangeMethod_invoke
/** * Invokes the listener method for a state change. * * @param stateChangeEvent * the state change event */ public void invoke(StateChangeEvent stateChangeEvent) { ServerConnector connector = (ServerConnector) stateChangeEvent .getSource(); Class<?> declaringClass = this.declaringClass; if (declaringClass == null) { declaringClass = connector.getClass(); } Type declaringType = TypeDataStore.getType(declaringClass); try { declaringType.getMethod(methodName).invoke(connector); } catch (NoDataException e) { throw new RuntimeException( "Couldn't invoke @OnStateChange method " + declaringType.getSignature() + "." + methodName, e); } }
3.68
framework_UIConnector_forceStateChangeRecursively
/** * Force a full recursive re-check of every connector's state variables. * * @see #forceStateChange() * * @since 7.3 * * @param connector * the connector which should get recursive forced state change */ protected static void forceStateChangeRecursively( AbstractConnector connector) { connector.forceStateChange(); for (ServerConnector child : connector.getChildren()) { if (child instanceof AbstractConnector) { forceStateChangeRecursively((AbstractConnector) child); } else { getLogger().warning( "Could not force state change for unknown connector type: " + child.getClass().getName()); } } }
3.68
hadoop_IOStatisticsBinding_trackJavaFunctionDuration
/** * Given a java function/lambda expression, * return a new one which wraps the inner and tracks * the duration of the operation, including whether * it passes/fails. * @param factory factory of duration trackers * @param statistic statistic key * @param inputFn input function * @param <A> type of argument to the input function. * @param <B> return type. * @return a new function which tracks duration and failure. */ public static <A, B> Function<A, B> trackJavaFunctionDuration( @Nullable DurationTrackerFactory factory, String statistic, Function<A, B> inputFn) { return (x) -> { // create the tracker outside try-with-resources so // that failures can be set in the catcher. DurationTracker tracker = createTracker(factory, statistic); try { // exec the input function and return its value return inputFn.apply(x); } catch (RuntimeException e) { // input function failed: note it tracker.failed(); // and rethrow throw e; } finally { // update the tracker. // this is called after the catch() call would have // set the failed flag. tracker.close(); } }; }
3.68
hadoop_ResourceUsageMetrics_getVirtualMemoryUsage
/** * Get the virtual memory usage. */ public long getVirtualMemoryUsage() { return virtualMemoryUsage; }
3.68
framework_AbstractSelect_getItemIdOver
/** * If the drag operation is currently over an {@link Item}, this method * returns the identifier of that {@link Item}. * */ public Object getItemIdOver() { return idOver; }
3.68
pulsar_LegacyHierarchicalLedgerRangeIterator_getEndLedgerIdByLevel
/** * Get the largest cache id in a specified node /level1/level2. * * @param level1 * 1st level node name * @param level2 * 2nd level node name * @return the largest ledger id */ private long getEndLedgerIdByLevel(String level1, String level2) throws IOException { return StringUtils.stringToHierarchicalLedgerId(level1, level2, MAX_ID_SUFFIX); }
3.68
hudi_SparkRDDWriteClient_bootstrap
/** * Main API to run bootstrap to hudi. */ @Override public void bootstrap(Option<Map<String, String>> extraMetadata) { initTable(WriteOperationType.UPSERT, Option.ofNullable(HoodieTimeline.METADATA_BOOTSTRAP_INSTANT_TS)).bootstrap(context, extraMetadata); }
3.68
hbase_MetricSampleQuantiles_snapshot
/** * Get a snapshot of the current values of all the tracked quantiles. * @return snapshot of the tracked quantiles if no items have been added to the estimator */ synchronized public Map<MetricQuantile, Long> snapshot() throws IOException { // flush the buffer first for best results insertBatch(); Map<MetricQuantile, Long> values = new HashMap<>(quantiles.length); for (int i = 0; i < quantiles.length; i++) { values.put(quantiles[i], query(quantiles[i].quantile)); } return values; }
3.68
hbase_ZKNodeTracker_blockUntilAvailable
/** * Gets the data of the node, blocking until the node is available or the specified timeout has * elapsed. * @param timeout maximum time to wait for the node data to be available, n milliseconds. Pass 0 * for no timeout. * @return data of the node * @throws InterruptedException if the waiting thread is interrupted */ public synchronized byte[] blockUntilAvailable(long timeout, boolean refresh) throws InterruptedException { if (timeout < 0) { throw new IllegalArgumentException(); } boolean notimeout = timeout == 0; long startTime = EnvironmentEdgeManager.currentTime(); long remaining = timeout; if (refresh) { try { // This does not create a watch if the node does not exists this.data = ZKUtil.getDataAndWatch(watcher, node); } catch (KeeperException e) { // We use to abort here, but in some cases the abort is ignored ( // (empty Abortable), so it's better to log... LOG.warn("Unexpected exception handling blockUntilAvailable", e); abortable.abort("Unexpected exception handling blockUntilAvailable", e); } } boolean nodeExistsChecked = (!refresh || data != null); while (!this.stopped && (notimeout || remaining > 0) && this.data == null) { if (!nodeExistsChecked) { try { nodeExistsChecked = (ZKUtil.checkExists(watcher, node) != -1); } catch (KeeperException e) { LOG.warn("Got exception while trying to check existence in ZooKeeper" + " of the node: " + node + ", retrying if timeout not reached", e); } // It did not exists, and now it does. if (nodeExistsChecked) { LOG.debug("Node {} now exists, resetting a watcher", node); try { // This does not create a watch if the node does not exists this.data = ZKUtil.getDataAndWatch(watcher, node); } catch (KeeperException e) { LOG.warn("Unexpected exception handling blockUntilAvailable", e); abortable.abort("Unexpected exception handling blockUntilAvailable", e); } } } // We expect a notification; but we wait with a // a timeout to lower the impact of a race condition if any wait(100); remaining = timeout - (EnvironmentEdgeManager.currentTime() - startTime); } return this.data; }
3.68
framework_VScrollTable_isScrollPositionVisible
/** For internal use only. May be removed or replaced in the future. */ public boolean isScrollPositionVisible() { return scrollPositionElement != null && !scrollPositionElement .getStyle().getDisplay().equals(Display.NONE.toString()); }
3.68
morf_GraphBasedUpgradeBuilder_traverseAndLog
/** * Recurrence method logging the nodes. * * @param node to be logged * @param visited set of already logged nodes which should not be logged again */ private void traverseAndLog(GraphBasedUpgradeNode node, Set<GraphBasedUpgradeNode> visited) { if (!visited.contains(node)) { LOG.debug(node.toString()); visited.add(node); for (GraphBasedUpgradeNode child : node.getChildren()) { traverseAndLog(child, visited); } } }
3.68
hudi_StreamerUtil_readConfig
/** * Read config from properties file (`--props` option) and cmd line (`--hoodie-conf` option). */ public static DFSPropertiesConfiguration readConfig(org.apache.hadoop.conf.Configuration hadoopConfig, Path cfgPath, List<String> overriddenProps) { DFSPropertiesConfiguration conf = new DFSPropertiesConfiguration(hadoopConfig, cfgPath); try { if (!overriddenProps.isEmpty()) { LOG.info("Adding overridden properties to file properties."); conf.addPropsFromStream(new BufferedReader(new StringReader(String.join("\n", overriddenProps))), cfgPath); } } catch (IOException ioe) { throw new HoodieIOException("Unexpected error adding config overrides", ioe); } return conf; }
3.68
pulsar_AdditionalServletUtils_searchForServlets
/** * Search and load the available additional servlets. * * @param additionalServletDirectory the directory where all the additional servlets are stored * @return a collection of additional servlet definitions * @throws IOException when fail to load the available additional servlets from the provided directory. */ public AdditionalServletDefinitions searchForServlets(String additionalServletDirectory, String narExtractionDirectory) throws IOException { Path path = Paths.get(additionalServletDirectory).toAbsolutePath(); log.info("Searching for additional servlets in {}", path); AdditionalServletDefinitions servletDefinitions = new AdditionalServletDefinitions(); if (!path.toFile().exists()) { log.warn("Pulsar additional servlets directory not found"); return servletDefinitions; } try (DirectoryStream<Path> stream = Files.newDirectoryStream(path, "*.nar")) { for (Path archive : stream) { try { AdditionalServletDefinition def = AdditionalServletUtils.getAdditionalServletDefinition( archive.toString(), narExtractionDirectory); log.info("Found additional servlet from {} : {}", archive, def); checkArgument(StringUtils.isNotBlank(def.getName())); checkArgument(StringUtils.isNotBlank(def.getAdditionalServletClass())); AdditionalServletMetadata metadata = new AdditionalServletMetadata(); metadata.setDefinition(def); metadata.setArchivePath(archive); servletDefinitions.servlets().put(def.getName(), metadata); } catch (Throwable t) { log.warn("Failed to load additional servlet from {}." + " It is OK however if you want to use this additional servlet," + " please make sure you put the correct additional servlet NAR" + " package in the additional servlets directory.", archive, t); } } } return servletDefinitions; }
3.68
flink_TableColumn_getName
/** Returns the name of this column. */ public String getName() { return name; }
3.68
morf_InsertStatement_avoidDirectPath
/** * If supported by the dialect, hints to the database that an {@code APPEND} query hint should be used in the insert statement. * * <p>In general, as with all query plan modification, <strong>do not use this unless you know * exactly what you are doing</strong>.</p> * * <p>These directives are applied in the SQL in the order they are called on {@link InsertStatement}. This usually * affects their precedence or relative importance, depending on the platform.</p> * * @return a new insert statement with the change applied. */ public InsertStatement avoidDirectPath() { return copyOnWriteOrMutate( InsertStatementBuilder::avoidDirectPath, () -> this.hints.add(NoDirectPathQueryHint.INSTANCE) ); }
3.68
hbase_MetricsSink_incrementFailedBatches
/** * Convenience method to update metrics when batch of operations has failed. */ public void incrementFailedBatches() { mss.incrFailedBatches(); }
3.68
streampipes_AdapterHealthCheck_checkAndRestoreAdapters
/** * In this method it is checked which adapters are currently running. * Then it calls all workers to validate if the adapter instance is * still running as expected. If the adapter is not running anymore a new worker instance is invoked. */ public void checkAndRestoreAdapters() { // Get all adapters Map<String, AdapterDescription> allRunningInstancesAdapterDescriptions = this.getAllRunningInstancesAdapterDescriptions(); // Get all worker containers that run adapters Map<String, List<AdapterDescription>> groupByWorker = this.getAllWorkersWithAdapters(allRunningInstancesAdapterDescriptions); // Get adapters that are not running anymore Map<String, AdapterDescription> allAdaptersToRecover = this.getAdaptersToRecover(groupByWorker, allRunningInstancesAdapterDescriptions); // Recover Adapters this.recoverAdapters(allAdaptersToRecover); }
3.68
hmily_ZkPassiveConfig_fileName
/** * File name string. * * @return the string */ public String fileName() { return path + "." + fileExtension; }
3.68
hadoop_OBSFileSystem_initCannedAcls
/** * Initialize bucket acl for upload, write operation. * * @param conf the configuration to use for the FS. */ private void initCannedAcls(final Configuration conf) { // No canned acl in obs String cannedACLName = conf.get(OBSConstants.CANNED_ACL, OBSConstants.DEFAULT_CANNED_ACL); if (!cannedACLName.isEmpty()) { switch (cannedACLName) { case "Private": case "PublicRead": case "PublicReadWrite": case "AuthenticatedRead": case "LogDeliveryWrite": case "BucketOwnerRead": case "BucketOwnerFullControl": cannedACL = new AccessControlList(); break; default: cannedACL = null; } } else { cannedACL = null; } }
3.68
framework_VaadinService_findVaadinSession
/** * Attempts to find a Vaadin service session associated with this request. * <p> * Handles locking of the session internally to avoid creation of duplicate * sessions by two threads simultaneously. * </p> * * @param request * the request to get a vaadin service session for. * * @see VaadinSession * * @return the vaadin service session for the request, or <code>null</code> * if no session is found and this is a request for which a new * session shouldn't be created. */ public VaadinSession findVaadinSession(VaadinRequest request) throws ServiceException, SessionExpiredException { VaadinSession vaadinSession = findOrCreateVaadinSession(request); if (vaadinSession == null) { return null; } VaadinSession.setCurrent(vaadinSession); request.setAttribute(VaadinSession.class.getName(), vaadinSession); return vaadinSession; }
3.68
framework_DropTargetExtension_onDrop
/** * Invoked when a <code>drop</code> has been received from client side. * Fires the {@link DropEvent}. * * @param types * List of data types from {@code DataTransfer.types} object. * @param data * Map containing all types and corresponding data from the * {@code * DataTransfer} object. * @param dropEffect * the drop effect * @param mouseEventDetails * mouse event details object containing information about the * drop event */ protected void onDrop(List<String> types, Map<String, String> data, DropEffect dropEffect, MouseEventDetails mouseEventDetails) { // Create a linked map that preserves the order of types Map<String, String> dataPreserveOrder = new LinkedHashMap<>(); types.forEach(type -> dataPreserveOrder.put(type, data.get(type))); DropEvent<T> event = new DropEvent<>(getParent(), dataPreserveOrder, dropEffect, getUI().getActiveDragSource(), mouseEventDetails); fireEvent(event); }
3.68
pulsar_KubernetesServiceAccountTokenAuthProvider_cacheAuthData
/** * No need to cache anything. Kubernetes generates the token used for authentication. */ @Override public Optional<FunctionAuthData> cacheAuthData(Function.FunctionDetails funcDetails, AuthenticationDataSource authenticationDataSource) throws Exception { return Optional.empty(); }
3.68
hbase_ProcedureCoordinator_rpcConnectionFailure
/** * The connection to the rest of the procedure group (members and coordinator) has been * broken/lost/failed. This should fail any interested procedures, but not attempt to notify other * members since we cannot reach them anymore. * @param message description of the error * @param cause the actual cause of the failure */ void rpcConnectionFailure(final String message, final IOException cause) { Collection<Procedure> toNotify = procedures.values(); boolean isTraceEnabled = LOG.isTraceEnabled(); LOG.debug("received connection failure: " + message, cause); for (Procedure proc : toNotify) { if (proc == null) { continue; } // notify the elements, if they aren't null if (isTraceEnabled) { LOG.trace("connection failure - notify procedure: " + proc.getName()); } proc.receive(new ForeignException(proc.getName(), cause)); } }
3.68
hbase_ProcedureExecutor_submitProcedures
/** * Add a set of new root-procedure to the executor. * @param procs the new procedures to execute. */ // TODO: Do we need to take nonces here? public void submitProcedures(Procedure<TEnvironment>[] procs) { Preconditions.checkArgument(lastProcId.get() >= 0); if (procs == null || procs.length <= 0) { return; } // Prepare procedure for (int i = 0; i < procs.length; ++i) { prepareProcedure(procs[i]).setProcId(nextProcId()); } // Commit the transaction store.insert(procs); if (LOG.isDebugEnabled()) { LOG.debug("Stored " + Arrays.toString(procs)); } // Add the procedure to the executor for (int i = 0; i < procs.length; ++i) { pushProcedure(procs[i]); } }
3.68
flink_Bucket_restore
/** * Restores a {@code Bucket} from the state included in the provided {@link BucketState}. * * @param subtaskIndex the index of the subtask creating the bucket. * @param initialPartCounter the initial counter for the part files of the bucket. * @param bucketWriter the {@link BucketWriter} used to write part files in the bucket. * @param rollingPolicy the policy based on which a bucket rolls its currently open part file * and opens a new one. * @param bucketState the initial state of the restored bucket. * @param fileListener the listener about the status of file. * @param <IN> the type of input elements to the sink. * @param <BucketID> the type of the identifier of the bucket, as returned by the {@link * BucketAssigner} * @param outputFileConfig the part file configuration. * @return The restored Bucket. */ static <IN, BucketID> Bucket<IN, BucketID> restore( final int subtaskIndex, final long initialPartCounter, final BucketWriter<IN, BucketID> bucketWriter, final RollingPolicy<IN, BucketID> rollingPolicy, final BucketState<BucketID> bucketState, @Nullable final FileLifeCycleListener<BucketID> fileListener, final OutputFileConfig outputFileConfig) throws IOException { return new Bucket<>( subtaskIndex, initialPartCounter, bucketWriter, rollingPolicy, bucketState, fileListener, outputFileConfig); }
3.68
graphhopper_MatrixResponse_hasProblems
/** * @return true if there are invalid or disconnected points (which both do not yield an error in case we do not fail fast). * @see GHMRequest#setFailFast(boolean) */ public boolean hasProblems() { return !disconnectedPoints.isEmpty() || !invalidFromPoints.isEmpty() || !invalidToPoints.isEmpty(); }
3.68
hbase_AsyncConnection_getTable
/** * Retrieve an {@link AsyncTable} implementation for accessing a table. * <p> * This method no longer checks table existence. An exception will be thrown if the table does not * exist only when the first operation is attempted. * @param tableName the name of the table * @param pool the thread pool to use for executing callback * @return an AsyncTable to use for interactions with this table */ default AsyncTable<ScanResultConsumer> getTable(TableName tableName, ExecutorService pool) { return getTableBuilder(tableName, pool).build(); }
3.68
hadoop_RollingFileSystemSink_checkIfPropertyExists
/** * Throw a {@link MetricsException} if the given property is not set. * * @param key the key to validate */ private void checkIfPropertyExists(String key) { if (!properties.containsKey(key)) { throw new MetricsException("Metrics2 configuration is missing " + key + " property"); } }
3.68
pulsar_HeapHistogramUtil_callDiagnosticCommand
/** * Calls a diagnostic commands. * The available operations are similar to what the jcmd commandline tool has, * however the naming of the operations are different. The "help" operation can be used * to find out the available operations. For example, the jcmd command "Thread.print" maps * to "threadPrint" operation name. */ static String callDiagnosticCommand(String operationName, String... args) throws JMException { return (String) ManagementFactory.getPlatformMBeanServer() .invoke(new ObjectName("com.sun.management:type=DiagnosticCommand"), operationName, new Object[] {args}, new String[] {String[].class.getName()}); }
3.68
hbase_AsyncRegionLocationCache_cleanProblematicOverlappedRegions
/** * When caching a location, the region may have been the result of a merge. Check to see if the * region's boundaries overlap any other cached locations in a problematic way. Those would have * been merge parents which no longer exist. We need to proactively clear them out to avoid a case * where a merged region which receives no requests never gets cleared. This causes requests to * other merged regions after it to see the wrong cached location. * <p> * For example, if we have Start_New < Start_Old < End_Old < End_New, then if we only access * within range [End_Old, End_New], then it will always return the old region but it will then * find out the row is not in the range, and try to get the new region, and then we get * [Start_New, End_New), still fall into the same situation. * <p> * If Start_Old is less than Start_New, even if we have overlap, it is not a problem, as when the * row is greater than Start_New, we will locate to the new region, and if the row is less than * Start_New, it will fall into the old region's range and we will try to access the region and * get a NotServing exception, and then we will clean the cache. * <p> * See HBASE-27650 * @param locations the new location that was just cached */ private void cleanProblematicOverlappedRegions(RegionLocations locations) { RegionInfo region = locations.getRegionLocation().getRegion(); boolean isLast = isEmptyStopRow(region.getEndKey()); while (true) { Map.Entry<byte[], RegionLocations> overlap = isLast ? cache.lastEntry() : cache.lowerEntry(region.getEndKey()); if ( overlap == null || overlap.getValue() == locations || Bytes.equals(overlap.getKey(), region.getStartKey()) ) { break; } if (LOG.isDebugEnabled()) { LOG.debug( "Removing cached location {} (endKey={}) because it overlaps with " + "new location {} (endKey={})", overlap.getValue(), Bytes.toStringBinary(overlap.getValue().getRegionLocation().getRegion().getEndKey()), locations, Bytes.toStringBinary(locations.getRegionLocation().getRegion().getEndKey())); } cache.remove(overlap.getKey()); } }
3.68
flink_DefaultConfigurableOptionsFactory_configure
/** * Creates a {@link DefaultConfigurableOptionsFactory} instance from a {@link ReadableConfig}. * * <p>If no options within {@link RocksDBConfigurableOptions} has ever been configured, the * created RocksDBOptionsFactory would not override anything defined in {@link * PredefinedOptions}. * * @param configuration Configuration to be used for the ConfigurableRocksDBOptionsFactory * creation * @return A ConfigurableRocksDBOptionsFactory created from the given configuration */ @Override public DefaultConfigurableOptionsFactory configure(ReadableConfig configuration) { for (ConfigOption<?> option : CANDIDATE_CONFIGS) { Optional<?> newValue = configuration.getOptional(option); if (newValue.isPresent()) { checkArgumentValid(option, newValue.get()); this.configuredOptions.put(option.key(), newValue.get().toString()); } } return this; }
3.68
morf_FilteredDataSetProducerAdapter_tableNames
/** * If multiple calls to this are expected, consider caching the list of table names. * @see org.alfasoftware.morf.dataset.SchemaAdapter#tableNames() */ @Override public Collection<String> tableNames() { return Collections2.filter(delegate.tableNames(), includingPredicate); }
3.68
morf_ExistingViewStateLoader_getViewsToDeploy
/** * @return the views which need to be deployed. */ public Collection<View> getViewsToDeploy() { return viewsToDeploy; }
3.68
hbase_MetaTableAccessor_getAllRegions
/** * Lists all of the regions currently in META. * @param connection to connect with * @param excludeOfflinedSplitParents False if we are to include offlined/splitparents regions, * true and we'll leave out offlined regions from returned list * @return List of all user-space regions. */ public static List<RegionInfo> getAllRegions(Connection connection, boolean excludeOfflinedSplitParents) throws IOException { List<Pair<RegionInfo, ServerName>> result; result = getTableRegionsAndLocations(connection, null, excludeOfflinedSplitParents); return getListOfRegionInfos(result); }
3.68
hadoop_SkylineStoreValidator_validate
/** * Check if pipelineId is <em>null</em> or resourceOverTime is <em>null</em>. * * @param pipelineId the id of the recurring pipeline. * @param resourceOverTime predicted {@code Resource} allocation to be added. * @throws SkylineStoreException if input parameters are invalid. */ public final void validate(final String pipelineId, final RLESparseResourceAllocation resourceOverTime) throws SkylineStoreException { validate(pipelineId); if (resourceOverTime == null) { StringBuilder sb = new StringBuilder(); sb.append("Resource allocation for " + pipelineId + " is null."); LOGGER.error(sb.toString()); throw new NullRLESparseResourceAllocationException(sb.toString()); } }
3.68
querydsl_GeometryExpressions_ymax
/** * Returns Y maxima of a bounding box 2d or 3d or a geometry. * * @param expr geometry * @return y maxima */ public static NumberExpression<Double> ymax(GeometryExpression<?> expr) { return Expressions.numberOperation(Double.class, SpatialOps.YMAX, expr); }
3.68
flink_BlobServerConnection_writeErrorToStream
/** * Writes to the output stream the error return code, and the given exception in serialized * form. * * @param out Thr output stream to write to. * @param t The exception to send. * @throws IOException Thrown, if the output stream could not be written to. */ private static void writeErrorToStream(OutputStream out, Throwable t) throws IOException { byte[] bytes = InstantiationUtil.serializeObject(t); out.write(RETURN_ERROR); writeLength(bytes.length, out); out.write(bytes); }
3.68
zxing_CalendarParsedResult_getEndTimestamp
/** * @return event end {@link Date}, or -1 if event has no duration * @see #getStartTimestamp() */ public long getEndTimestamp() { return end; }
3.68
hbase_ImmutableSegment_getSnapshotScanners
/** * We create a new {@link SnapshotSegmentScanner} to increase the reference count of * {@link MemStoreLABImpl} used by this segment. */ List<KeyValueScanner> getSnapshotScanners() { return Collections.singletonList(new SnapshotSegmentScanner(this)); }
3.68
framework_BasicWeekClickHandler_setDates
/** * Set the start and end dates for the event. * * @param event * The event that the start and end dates should be set * @param start * The start date * @param end * The end date */ protected void setDates(WeekClick event, Date start, Date end) { event.getComponent().setStartDate(start); event.getComponent().setEndDate(end); }
3.68
framework_TreeGrid_addExpandListener
/** * Adds an ExpandListener to this TreeGrid. * * @see ExpandEvent * * @param listener * the listener to add * @return a registration for the listener */ public Registration addExpandListener(ExpandListener<T> listener) { return addListener(ExpandEvent.class, listener, ExpandListener.EXPAND_METHOD); }
3.68