name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_KeyedStream_inEventTime
/** Sets the time characteristic to event time. */ public IntervalJoin<T1, T2, KEY> inEventTime() { timeBehaviour = TimeBehaviour.EventTime; return this; }
3.68
hbase_MobFileCache_hashFileName
/** * Use murmurhash to reduce the conflicts of hashed file names. We should notice that the hash * conflicts may bring deadlocks, when opening mob files with evicting some other files, as * described in HBASE-28047. */ private long hashFileName(String fileName) { return Hashing.murmur3_128().hashString(fileName, java.nio.charset.StandardCharsets.UTF_8) .asLong(); }
3.68
hadoop_ExtensionHelper_getUserAgentSuffix
/** * Invoke {@link BoundDTExtension#getUserAgentSuffix()} or * return the default value. * @param extension extension to invoke * @param def default if the class is of the wrong type. * @return a user agent suffix */ public static String getUserAgentSuffix(Object extension, String def) { return ifBoundDTExtension(extension, BoundDTExtension::getUserAgentSuffix) .orElse(def); }
3.68
hbase_SnapshotManifest_load
/** * Load the information in the SnapshotManifest. Called by SnapshotManifest.open() If the format * is v2 and there is no data-manifest, means that we are loading an in-progress snapshot. Since * we support rolling-upgrades, we loook for v1 and v2 regions format. */ private void load() throws IOException { switch (getSnapshotFormat(desc)) { case SnapshotManifestV1.DESCRIPTOR_VERSION: { this.htd = FSTableDescriptors.getTableDescriptorFromFs(workingDirFs, workingDir); ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader"); try { this.regionManifests = SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc); } finally { tpool.shutdown(); } break; } case SnapshotManifestV2.DESCRIPTOR_VERSION: { SnapshotDataManifest dataManifest = readDataManifest(); if (dataManifest != null) { htd = ProtobufUtil.toTableDescriptor(dataManifest.getTableSchema()); regionManifests = dataManifest.getRegionManifestsList(); } else { // Compatibility, load the v1 regions // This happens only when the snapshot is in-progress and the cache wants to refresh. List<SnapshotRegionManifest> v1Regions, v2Regions; ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader"); try { v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc); v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, rootFs, workingDir, desc, manifestSizeLimit); } catch (InvalidProtocolBufferException e) { throw new CorruptedSnapshotException( "unable to parse region manifest " + e.getMessage(), e); } finally { tpool.shutdown(); } if (v1Regions != null && v2Regions != null) { regionManifests = new ArrayList<>(v1Regions.size() + v2Regions.size()); regionManifests.addAll(v1Regions); regionManifests.addAll(v2Regions); } else if (v1Regions != null) { regionManifests = v1Regions; } else /* if (v2Regions != null) */ { regionManifests = v2Regions; } } break; } default: throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(), ProtobufUtil.createSnapshotDesc(desc)); } }
3.68
hadoop_AdminACLsManager_isAdmin
/** * Returns whether the specified user/group is an administrator * * @param callerUGI user/group to to check * @return <tt>true</tt> if the UserGroupInformation specified * is a member of the access control list for administrators */ public boolean isAdmin(UserGroupInformation callerUGI) { return adminAcl.isUserAllowed(callerUGI); }
3.68
hadoop_AllocateResponse_availableResources
/** * Set the <code>availableResources</code> of the response. * @see AllocateResponse#setAvailableResources(Resource) * @param availableResources * <code>availableResources</code> of the response * @return {@link AllocateResponseBuilder} */ @Private @Unstable public AllocateResponseBuilder availableResources( Resource availableResources) { allocateResponse.setAvailableResources(availableResources); return this; }
3.68
flink_FlinkContainersSettings_setConfigOption
/** * Sets a single Flink configuration parameter (the options for flink-conf.yaml) and returns * a reference to this Builder enabling method chaining. * * @param <T> The type parameter. * @param option The option. * @param value The value. * @return A reference to this Builder. */ public <T> Builder setConfigOption(ConfigOption<T> option, T value) { this.flinkConfiguration.set(option, value); return this; }
3.68
framework_Tree_addCollapseListener
/** * Adds the collapse listener. * * @param listener * the Listener to be added. */ public void addCollapseListener(CollapseListener listener) { addListener(CollapseEvent.class, listener, CollapseListener.COLLAPSE_METHOD); }
3.68
framework_Slot_setWidget
/* * (non-Javadoc) * * @see com.google.gwt.user.client.ui.SimplePanel#setWidget(com.google.gwt * .user.client.ui.Widget) */ @Override public void setWidget(Widget w) { detachListeners(); super.setWidget(w); attachListeners(); }
3.68
flink_AggregatorWithName_getName
/** * Gets the name that the aggregator is registered under. * * @return The name that the aggregator is registered under. */ public String getName() { return name; }
3.68
flink_Operator_name
/** * Sets the name of this operator. This overrides the default name, which is either a generated * description of the operation (such as for example "Aggregate(1:SUM, 2:MIN)") or the name the * user-defined function or input/output format executed by the operator. * * @param newName The name for this operator. * @return The operator with a new name. */ public O name(String newName) { this.name = newName; @SuppressWarnings("unchecked") O returnType = (O) this; return returnType; }
3.68
hbase_AccessControlUtil_buildGrantRequest
/** * Create a request to grant user global permissions. * @param username the short user name who to grant permissions * @param actions the permissions to be granted * @return A {@link AccessControlProtos} GrantRequest */ public static AccessControlProtos.GrantRequest buildGrantRequest(String username, boolean mergeExistingPermissions, AccessControlProtos.Permission.Action... actions) { AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.GlobalPermission.Builder permissionBuilder = AccessControlProtos.GlobalPermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } ret.setType(AccessControlProtos.Permission.Type.Global).setGlobalPermission(permissionBuilder); return AccessControlProtos.GrantRequest.newBuilder() .setUserPermission(AccessControlProtos.UserPermission.newBuilder() .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) .setMergeExistingPermissions(mergeExistingPermissions).build(); }
3.68
flink_Broker_remove
/** Blocking retrieval and removal of the object to share. */ public void remove(String key) { mediations.remove(key); }
3.68
flink_JobEdge_isForward
/** Gets whether the edge is forward edge. */ public boolean isForward() { return isForward; }
3.68
flink_OperatorChain_closeAllOperators
/** * Execute {@link StreamOperator#close()} of each operator in the chain of this {@link * StreamTask}. Closing happens from <b>tail to head</b> operator in the chain. */ public void closeAllOperators() throws Exception { isClosed = true; }
3.68
flink_RouteResult_pathParams
/** Returns all params embedded in the request path. */ public Map<String, String> pathParams() { return pathParams; }
3.68
flink_ChangelogKeyedStateBackend_handleMaterializationResult
/** * This method is not thread safe. It should be called either under a lock or through task * mailbox executor. */ @Override public void handleMaterializationResult( SnapshotResult<KeyedStateHandle> materializedSnapshot, long materializationID, SequenceNumber upTo) { LOG.info( "Task {} finishes materialization, updates the snapshotState upTo {} : {}", subtaskName, upTo, materializedSnapshot); changelogSnapshotState = materializedSnapshot.getTaskLocalSnapshot() == null ? new ChangelogSnapshotState( getMaterializedResult(materializedSnapshot), Collections.emptyList(), upTo, materializationID) : new ChangelogSnapshotState( getMaterializedResult(materializedSnapshot), getLocalMaterializedResult(materializedSnapshot), Collections.emptyList(), Collections.emptyList(), upTo, materializationID); changelogTruncateHelper.materialized(upTo); }
3.68
hbase_HMaster_listTableNames
/** * Returns the list of table names that match the specified request * @param regex The regular expression to match against, or null if querying for all * @param namespace the namespace to query, or null if querying for all * @param includeSysTables False to match only against userspace tables * @return the list of table names */ public List<TableName> listTableNames(final String namespace, final String regex, final boolean includeSysTables) throws IOException { List<TableDescriptor> htds = new ArrayList<>(); if (cpHost != null) { cpHost.preGetTableNames(htds, regex); } htds = getTableDescriptors(htds, namespace, regex, null, includeSysTables); if (cpHost != null) { cpHost.postGetTableNames(htds, regex); } List<TableName> result = new ArrayList<>(htds.size()); for (TableDescriptor htd : htds) result.add(htd.getTableName()); return result; }
3.68
flink_CrossOperator_projectTuple18
/** * Projects a pair of crossed elements to a {@link Tuple} with the previously selected * fields. * * @return The projected data set. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17> ProjectCross< I1, I2, Tuple18< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>> projectTuple18() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo< Tuple18< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>> tType = new TupleTypeInfo< Tuple18< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>>(fTypes); return new ProjectCross< I1, I2, Tuple18< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17>>( this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint); }
3.68
hadoop_AbfsOutputStream_flush
/** * Flushes this output stream and forces any buffered output bytes to be * written out. If any data remains in the payload it is committed to the * service. Data is queued for writing and forced out to the service * before the call returns. */ @Override public void flush() throws IOException { if (!disableOutputStreamFlush) { flushInternalAsync(); } }
3.68
hadoop_AzureNativeFileSystemStore_isAtomicRenameKey
/** * Checks if the given key in Azure storage should have synchronized * atomic folder rename createNonRecursive implemented. */ @Override public boolean isAtomicRenameKey(String key) { return isKeyForDirectorySet(key, atomicRenameDirs); }
3.68
dubbo_LruCache_get
/** * API to return stored value using a key against the calling thread specific store. * @param key Unique identifier for cache lookup * @return Return stored object against key */ @Override public Object get(Object key) { return store.get(key); }
3.68
framework_StringToLongConverter_getModelType
/* * (non-Javadoc) * * @see com.vaadin.data.util.converter.Converter#getModelType() */ @Override public Class<Long> getModelType() { return Long.class; }
3.68
framework_FilesystemContainer_addContainerProperty
/* * (non-Javadoc) * * @see com.vaadin.data.Container#addContainerProperty(java.lang.Object, * java.lang.Class, java.lang.Object) */ @Override public boolean addContainerProperty(Object propertyId, Class<?> type, Object defaultValue) throws UnsupportedOperationException { throw new UnsupportedOperationException( "File system container does not support this operation"); }
3.68
flink_CheckpointStatsCounts_incrementInProgressCheckpoints
/** Increments the number of total and in progress checkpoints. */ void incrementInProgressCheckpoints() { numInProgressCheckpoints++; numTotalCheckpoints++; }
3.68
dubbo_RegistryDirectory_refreshInvoker
/** * Convert the invokerURL list to the Invoker Map. The rules of the conversion are as follows: * <ol> * <li> If URL has been converted to invoker, it is no longer re-referenced and obtained directly from the cache, * and notice that any parameter changes in the URL will be re-referenced.</li> * <li>If the incoming invoker list is not empty, it means that it is the latest invoker list.</li> * <li>If the list of incoming invokerUrl is empty, It means that the rule is only a override rule or a route * rule, which needs to be re-contrasted to decide whether to re-reference.</li> * </ol> * * @param invokerUrls this parameter can't be null */ private void refreshInvoker(List<URL> invokerUrls) { Assert.notNull(invokerUrls, "invokerUrls should not be null"); if (invokerUrls.size() == 1 && invokerUrls.get(0) != null && EMPTY_PROTOCOL.equals(invokerUrls.get(0).getProtocol())) { refreshRouter( BitList.emptyList(), () -> this.forbidden = true // Forbid to access ); destroyAllInvokers(); // Close all invokers } else { this.forbidden = false; // Allow to access if (invokerUrls == Collections.<URL>emptyList()) { invokerUrls = new ArrayList<>(); } // use local reference to avoid NPE as this.cachedInvokerUrls will be set null by destroyAllInvokers(). Set<URL> localCachedInvokerUrls = this.cachedInvokerUrls; if (invokerUrls.isEmpty()) { if (CollectionUtils.isNotEmpty(localCachedInvokerUrls)) { // 1-4 Empty address. logger.warn( REGISTRY_EMPTY_ADDRESS, "configuration ", "", "Service" + serviceKey + " received empty address list with no EMPTY protocol set, trigger empty protection."); invokerUrls.addAll(localCachedInvokerUrls); } } else { localCachedInvokerUrls = new HashSet<>(); localCachedInvokerUrls.addAll(invokerUrls); // Cached invoker urls, convenient for comparison this.cachedInvokerUrls = localCachedInvokerUrls; } if (invokerUrls.isEmpty()) { return; } // use local reference to avoid NPE as this.urlInvokerMap will be set null concurrently at // destroyAllInvokers(). Map<URL, Invoker<T>> localUrlInvokerMap = this.urlInvokerMap; // can't use local reference as oldUrlInvokerMap's mappings might be removed directly at toInvokers(). Map<URL, Invoker<T>> oldUrlInvokerMap = null; if (localUrlInvokerMap != null) { // the initial capacity should be set greater than the maximum number of entries divided by the load // factor to avoid resizing. oldUrlInvokerMap = new LinkedHashMap<>(Math.round(1 + localUrlInvokerMap.size() / DEFAULT_HASHMAP_LOAD_FACTOR)); localUrlInvokerMap.forEach(oldUrlInvokerMap::put); } Map<URL, Invoker<T>> newUrlInvokerMap = toInvokers(oldUrlInvokerMap, invokerUrls); // Translate url list to Invoker map /* * If the calculation is wrong, it is not processed. * * 1. The protocol configured by the client is inconsistent with the protocol of the server. * eg: consumer protocol = dubbo, provider only has other protocol services(rest). * 2. The registration center is not robust and pushes illegal specification data. * */ if (CollectionUtils.isEmptyMap(newUrlInvokerMap)) { // 3-1 - Failed to convert the URL address into Invokers. logger.error( PROXY_FAILED_CONVERT_URL, "inconsistency between the client protocol and the protocol of the server", "", "urls to invokers error", new IllegalStateException("urls to invokers error. invokerUrls.size :" + invokerUrls.size() + ", invoker.size :0. urls :" + invokerUrls.toString())); return; } List<Invoker<T>> newInvokers = Collections.unmodifiableList(new ArrayList<>(newUrlInvokerMap.values())); BitList<Invoker<T>> finalInvokers = multiGroup ? new BitList<>(toMergeInvokerList(newInvokers)) : new BitList<>(newInvokers); // pre-route and build cache refreshRouter(finalInvokers.clone(), () -> this.setInvokers(finalInvokers)); this.urlInvokerMap = newUrlInvokerMap; try { destroyUnusedInvokers(oldUrlInvokerMap, newUrlInvokerMap); // Close the unused Invoker } catch (Exception e) { logger.warn(REGISTRY_FAILED_DESTROY_SERVICE, "", "", "destroyUnusedInvokers error. ", e); } // notify invokers refreshed this.invokersChanged(); } logger.info("Received invokers changed event from registry. " + "Registry type: interface. " + "Service Key: " + getConsumerUrl().getServiceKey() + ". " + "Urls Size : " + invokerUrls.size() + ". " + "Invokers Size : " + getInvokers().size() + ". " + "Available Size: " + getValidInvokers().size() + ". " + "Available Invokers : " + joinValidInvokerAddresses()); }
3.68
hbase_CellArrayMap_createSubCellFlatMap
/* To be used by base class only to create a sub-CellFlatMap */ @Override protected CellFlatMap createSubCellFlatMap(int min, int max, boolean descending) { return new CellArrayMap(comparator(), this.block, min, max, descending); }
3.68
hadoop_MappingRuleActionBase_setFallbackDefaultPlacement
/** * Sets the fallback method to place to default, if the action cannot be * executed the application will be placed into the default queue, if the * default queue does not exist the application will get rejected. * @return MappingRuleAction The same object for method chaining. */ public MappingRuleAction setFallbackDefaultPlacement() { fallback = MappingRuleResult.createDefaultPlacementResult(); return this; }
3.68
dubbo_StringUtils_splitToSet
/** * Split the specified value to be a {@link Set} * * @param value the content to be split * @param separatorChar a char to separate * @param trimElements require to trim the elements or not * @return non-null read-only {@link Set} * @since 2.7.8 */ public static Set<String> splitToSet(String value, char separatorChar, boolean trimElements) { List<String> values = splitToList(value, separatorChar); int size = values.size(); if (size < 1) { // empty condition return emptySet(); } if (!trimElements) { // Do not require to trim the elements return new LinkedHashSet(values); } return unmodifiableSet(values.stream().map(String::trim).collect(LinkedHashSet::new, Set::add, Set::addAll)); }
3.68
framework_LayoutDependencyTree_markAsVerticallyLayouted
/** * Marks the managed layout as layouted vertically and propagates the need * of vertical measuring for any components that might have got their size * changed as a result. If there are blockers, nothing is done. * * @param layout * the managed layout whose vertical layouting has been done, * should not be {@code null} */ public void markAsVerticallyLayouted(ManagedLayout layout) { LayoutDependency dependency = getDependency(layout.getConnectorId(), VERTICAL); dependency.markAsLayouted(); }
3.68
framework_AbsoluteLayout_removeComponent
/* * (non-Javadoc) * * @see * com.vaadin.ui.AbstractComponentContainer#removeComponent(com.vaadin.ui * .Component) */ @Override public void removeComponent(Component c) { internalRemoveComponent(c); super.removeComponent(c); }
3.68
hbase_DefaultMobStoreCompactor_performCompaction
/** * Performs compaction on a column family with the mob flag enabled. This works only when MOB * compaction is explicitly requested (by User), or by Master There are two modes of a MOB * compaction:<br> * <p> * <ul> * <li>1. Full mode - when all MOB data for a region is compacted into a single MOB file. * <li>2. I/O optimized mode - for use cases with no or infrequent updates/deletes of a <br> * MOB data. The main idea behind i/o optimized compaction is to limit maximum size of a MOB file * produced during compaction and to limit I/O write/read amplification. * </ul> * The basic algorithm of compaction is the following: <br> * 1. If the Put cell has a mob reference tag, the cell's value is the path of the mob file. * <ol> * <li>If the value size of a cell is larger than the threshold, this cell is regarded as a mob, * directly copy the (with mob tag) cell into the new store file.</li> * <li>Otherwise, retrieve the mob cell from the mob file, and writes a copy of the cell into the * new store file.</li> * </ol> * 2. If the Put cell doesn't have a reference tag. * <ol> * <li>If the value size of a cell is larger than the threshold, this cell is regarded as a mob, * write this cell to a mob file, and write the path of this mob file to the store file.</li> * <li>Otherwise, directly write this cell into the store file.</li> * </ol> * @param fd File details * @param scanner Where to read from. * @param writer Where to write to. * @param smallestReadPoint Smallest read point. * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= * smallestReadPoint * @param throughputController The compaction throughput controller. * @param request compaction request. * @param progress Progress reporter. * @return Whether compaction ended; false if it was interrupted for any reason. */ @Override protected boolean performCompaction(FileDetails fd, InternalScanner scanner, CellSink writer, long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController, CompactionRequestImpl request, CompactionProgress progress) throws IOException { long bytesWrittenProgressForLog = 0; long bytesWrittenProgressForShippedCall = 0; // Clear old mob references mobRefSet.get().clear(); boolean isUserRequest = userRequest.get(); boolean major = request.isAllFiles(); boolean compactMOBs = major && isUserRequest; boolean discardMobMiss = conf.getBoolean(MobConstants.MOB_UNSAFE_DISCARD_MISS_KEY, MobConstants.DEFAULT_MOB_DISCARD_MISS); if (discardMobMiss) { LOG.warn("{}=true. This is unsafe setting recommended only when first upgrading to a version" + " with the distributed mob compaction feature on a cluster that has experienced MOB data " + "corruption.", MobConstants.MOB_UNSAFE_DISCARD_MISS_KEY); } long maxMobFileSize = conf.getLong(MobConstants.MOB_COMPACTION_MAX_FILE_SIZE_KEY, MobConstants.DEFAULT_MOB_COMPACTION_MAX_FILE_SIZE); boolean ioOptimizedMode = this.ioOptimizedMode && !disableIO.get(); LOG.info( "Compact MOB={} optimized configured={} optimized enabled={} maximum MOB file size={}" + " major={} store={}", compactMOBs, this.ioOptimizedMode, ioOptimizedMode, maxMobFileSize, major, getStoreInfo()); // Since scanner.next() can return 'false' but still be delivering data, // we have to use a do/while loop. List<Cell> cells = new ArrayList<>(); // Limit to "hbase.hstore.compaction.kv.max" (default 10) to avoid OOME long currentTime = EnvironmentEdgeManager.currentTime(); long lastMillis = 0; if (LOG.isDebugEnabled()) { lastMillis = currentTime; } CloseChecker closeChecker = new CloseChecker(conf, currentTime); String compactionName = ThroughputControlUtil.getNameForThrottling(store, "compaction"); long now = 0; boolean hasMore; byte[] fileName = null; StoreFileWriter mobFileWriter = null; /* * mobCells are used only to decide if we need to commit or abort current MOB output file. */ long mobCells = 0; long cellsCountCompactedToMob = 0, cellsCountCompactedFromMob = 0; long cellsSizeCompactedToMob = 0, cellsSizeCompactedFromMob = 0; boolean finished = false; ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax) .setSizeLimit(ScannerContext.LimitScope.BETWEEN_CELLS, Long.MAX_VALUE, Long.MAX_VALUE, compactScannerSizeLimit) .build(); throughputController.start(compactionName); KeyValueScanner kvs = (scanner instanceof KeyValueScanner) ? (KeyValueScanner) scanner : null; long shippedCallSizeLimit = (long) request.getFiles().size() * this.store.getColumnFamilyDescriptor().getBlocksize(); Cell mobCell = null; List<String> committedMobWriterFileNames = new ArrayList<>(); try { mobFileWriter = newMobWriter(fd, major, request.getWriterCreationTracker()); fileName = Bytes.toBytes(mobFileWriter.getPath().getName()); do { hasMore = scanner.next(cells, scannerContext); currentTime = EnvironmentEdgeManager.currentTime(); if (LOG.isDebugEnabled()) { now = currentTime; } if (closeChecker.isTimeLimit(store, currentTime)) { progress.cancel(); return false; } for (Cell c : cells) { if (compactMOBs) { if (MobUtils.isMobReferenceCell(c)) { String fName = MobUtils.getMobFileName(c); // Added to support migration try { mobCell = mobStore.resolve(c, true, false).getCell(); } catch (DoNotRetryIOException e) { if ( discardMobMiss && e.getCause() != null && e.getCause() instanceof FileNotFoundException ) { LOG.error("Missing MOB cell: file={} not found cell={}", fName, c); continue; } else { throw e; } } if (discardMobMiss && mobCell.getValueLength() == 0) { LOG.error("Missing MOB cell value: file={} mob cell={} cell={}", fName, mobCell, c); continue; } else if (mobCell.getValueLength() == 0) { String errMsg = String.format("Found 0 length MOB cell in a file=%s mob cell=%s " + " cell=%s", fName, mobCell, c); throw new IOException(errMsg); } if (mobCell.getValueLength() > mobSizeThreshold) { // put the mob data back to the MOB store file PrivateCellUtil.setSequenceId(mobCell, c.getSequenceId()); if (!ioOptimizedMode) { mobFileWriter.append(mobCell); mobCells++; writer.append( MobUtils.createMobRefCell(mobCell, fileName, this.mobStore.getRefCellTags())); } else { // I/O optimized mode // Check if MOB cell origin file size is // greater than threshold Long size = mobLengthMap.get().get(fName); if (size == null) { // FATAL error (we should never get here though), abort compaction // This error means that meta section of store file does not contain // MOB file, which has references in at least one cell from this store file String msg = String.format( "Found an unexpected MOB file during compaction %s, aborting compaction %s", fName, getStoreInfo()); throw new IOException(msg); } // Can not be null if (size < maxMobFileSize) { // If MOB cell origin file is below threshold // it is get compacted mobFileWriter.append(mobCell); // Update number of mobCells in a current mob writer mobCells++; writer.append( MobUtils.createMobRefCell(mobCell, fileName, this.mobStore.getRefCellTags())); // Update total size of the output (we do not take into account // file compression yet) long len = mobFileWriter.getPos(); if (len > maxMobFileSize) { LOG.debug("Closing output MOB File, length={} file={}, store={}", len, mobFileWriter.getPath().getName(), getStoreInfo()); mobFileWriter = switchToNewMobWriter(mobFileWriter, fd, mobCells, major, request, committedMobWriterFileNames); fileName = Bytes.toBytes(mobFileWriter.getPath().getName()); mobCells = 0; } } else { // We leave large MOB file as is (is not compacted), // then we update set of MOB file references // and append mob cell directly to the store's writer Optional<TableName> refTable = MobUtils.getTableName(c); if (refTable.isPresent()) { mobRefSet.get().put(refTable.get(), fName); writer.append(c); } else { throw new IOException(String.format("MOB cell did not contain a tablename " + "tag. should not be possible. see ref guide on mob troubleshooting. " + "store=%s cell=%s", getStoreInfo(), c)); } } } } else { // If MOB value is less than threshold, append it directly to a store file PrivateCellUtil.setSequenceId(mobCell, c.getSequenceId()); writer.append(mobCell); cellsCountCompactedFromMob++; cellsSizeCompactedFromMob += mobCell.getValueLength(); } } else { // Not a MOB reference cell int size = c.getValueLength(); if (size > mobSizeThreshold) { // This MOB cell comes from a regular store file // therefore we store it into original mob output mobFileWriter.append(c); writer .append(MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags())); mobCells++; cellsCountCompactedToMob++; cellsSizeCompactedToMob += c.getValueLength(); if (ioOptimizedMode) { // Update total size of the output (we do not take into account // file compression yet) long len = mobFileWriter.getPos(); if (len > maxMobFileSize) { mobFileWriter = switchToNewMobWriter(mobFileWriter, fd, mobCells, major, request, committedMobWriterFileNames); fileName = Bytes.toBytes(mobFileWriter.getPath().getName()); mobCells = 0; } } } else { // Not a MOB cell, write it directly to a store file writer.append(c); } } } else if (c.getTypeByte() != KeyValue.Type.Put.getCode()) { // Not a major compaction or major with MOB disabled // If the kv type is not put, directly write the cell // to the store file. writer.append(c); } else if (MobUtils.isMobReferenceCell(c)) { // Not a major MOB compaction, Put MOB reference if (MobUtils.hasValidMobRefCellValue(c)) { // We do not check mobSizeThreshold during normal compaction, // leaving it to a MOB compaction run Optional<TableName> refTable = MobUtils.getTableName(c); if (refTable.isPresent()) { mobRefSet.get().put(refTable.get(), MobUtils.getMobFileName(c)); writer.append(c); } else { throw new IOException(String.format("MOB cell did not contain a tablename " + "tag. should not be possible. see ref guide on mob troubleshooting. " + "store=%s cell=%s", getStoreInfo(), c)); } } else { String errMsg = String.format("Corrupted MOB reference: %s", c.toString()); throw new IOException(errMsg); } } else if (c.getValueLength() <= mobSizeThreshold) { // If the value size of a cell is not larger than the threshold, directly write it to // the store file. writer.append(c); } else { // If the value size of a cell is larger than the threshold, it's regarded as a mob, // write this cell to a mob file, and write the path to the store file. mobCells++; // append the original keyValue in the mob file. mobFileWriter.append(c); Cell reference = MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags()); // write the cell whose value is the path of a mob file to the store file. writer.append(reference); cellsCountCompactedToMob++; cellsSizeCompactedToMob += c.getValueLength(); if (ioOptimizedMode) { long len = mobFileWriter.getPos(); if (len > maxMobFileSize) { mobFileWriter = switchToNewMobWriter(mobFileWriter, fd, mobCells, major, request, committedMobWriterFileNames); fileName = Bytes.toBytes(mobFileWriter.getPath().getName()); mobCells = 0; } } } int len = c.getSerializedSize(); ++progress.currentCompactedKVs; progress.totalCompactedSize += len; bytesWrittenProgressForShippedCall += len; if (LOG.isDebugEnabled()) { bytesWrittenProgressForLog += len; } throughputController.control(compactionName, len); if (closeChecker.isSizeLimit(store, len)) { progress.cancel(); return false; } if (kvs != null && bytesWrittenProgressForShippedCall > shippedCallSizeLimit) { ((ShipperListener) writer).beforeShipped(); kvs.shipped(); bytesWrittenProgressForShippedCall = 0; } } // Log the progress of long running compactions every minute if // logging at DEBUG level if (LOG.isDebugEnabled()) { if ((now - lastMillis) >= COMPACTION_PROGRESS_LOG_INTERVAL) { String rate = String.format("%.2f", (bytesWrittenProgressForLog / 1024.0) / ((now - lastMillis) / 1000.0)); LOG.debug("Compaction progress: {} {}, rate={} KB/sec, throughputController is {}", compactionName, progress, rate, throughputController); lastMillis = now; bytesWrittenProgressForLog = 0; } } cells.clear(); } while (hasMore); // Commit last MOB writer commitOrAbortMobWriter(mobFileWriter, fd.maxSeqId, mobCells, major); finished = true; } catch (InterruptedException e) { progress.cancel(); throw new InterruptedIOException( "Interrupted while control throughput of compacting " + compactionName); } catch (IOException t) { String msg = "Mob compaction failed for region: " + store.getRegionInfo().getEncodedName(); throw new IOException(msg, t); } finally { // Clone last cell in the final because writer will append last cell when committing. If // don't clone here and once the scanner get closed, then the memory of last cell will be // released. (HBASE-22582) ((ShipperListener) writer).beforeShipped(); throughputController.finish(compactionName); if (!finished && mobFileWriter != null) { // Remove all MOB references because compaction failed clearThreadLocals(); // Abort writer LOG.debug("Aborting writer for {} because of a compaction failure, Store {}", mobFileWriter.getPath(), getStoreInfo()); abortWriter(mobFileWriter); deleteCommittedMobFiles(committedMobWriterFileNames); } } mobStore.updateCellsCountCompactedFromMob(cellsCountCompactedFromMob); mobStore.updateCellsCountCompactedToMob(cellsCountCompactedToMob); mobStore.updateCellsSizeCompactedFromMob(cellsSizeCompactedFromMob); mobStore.updateCellsSizeCompactedToMob(cellsSizeCompactedToMob); progress.complete(); return true; }
3.68
hibernate-validator_MetaDataBuilder_adaptOriginsAndImplicitGroups
/** * Adapts the given constraints to the given bean type. In case a constraint * is defined locally at the bean class the original constraint will be * returned without any modifications. If a constraint is defined in the * hierarchy (interface or super class) a new constraint will be returned * with an origin of {@link org.hibernate.validator.internal.metadata.core.ConstraintOrigin#DEFINED_IN_HIERARCHY}. If a * constraint is defined on an interface, the interface type will * additionally be part of the constraint's groups (implicit grouping). * * @param constraints The constraints that shall be adapted. The constraints themselves * will not be altered. * * @return A constraint adapted to the given bean type. */ protected Set<MetaConstraint<?>> adaptOriginsAndImplicitGroups(Set<MetaConstraint<?>> constraints) { Set<MetaConstraint<?>> adaptedConstraints = newHashSet(); for ( MetaConstraint<?> oneConstraint : constraints ) { adaptedConstraints.add( adaptOriginAndImplicitGroup( oneConstraint ) ); } return adaptedConstraints; }
3.68
hadoop_NMClient_setNMTokenCache
/** * Set the NM Token cache of the <code>NMClient</code>. This cache must be * shared with the {@link AMRMClient} that requested the containers managed * by this <code>NMClient</code> * <p> * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()} * singleton instance will be used. * * @param nmTokenCache the NM token cache to use. */ public void setNMTokenCache(NMTokenCache nmTokenCache) { this.nmTokenCache = nmTokenCache; }
3.68
hbase_Procedure_toStringDetails
/** * Extend the toString() information with more procedure details */ public String toStringDetails() { final StringBuilder sb = toStringSimpleSB(); sb.append(" submittedTime="); sb.append(getSubmittedTime()); sb.append(", lastUpdate="); sb.append(getLastUpdate()); final int[] stackIndices = getStackIndexes(); if (stackIndices != null) { sb.append("\n"); sb.append("stackIndexes="); sb.append(Arrays.toString(stackIndices)); } return sb.toString(); }
3.68
framework_DragSourceExtensionConnector_addDragListeners
/** * Adds dragstart and dragend event listeners to the given DOM element. * * @param element * DOM element to attach event listeners to. */ protected void addDragListeners(Element element) { EventTarget target = element.cast(); target.addEventListener(Event.DRAGSTART, dragStartListener); target.addEventListener(Event.DRAGEND, dragEndListener); }
3.68
hadoop_SysInfoWindows_getVirtualMemorySize
/** {@inheritDoc} */ @Override public long getVirtualMemorySize() { refreshIfNeeded(); return vmemSize; }
3.68
dubbo_RpcContextAttachment_startAsync
/** * @return * @throws IllegalStateException */ @SuppressWarnings("unchecked") public static AsyncContext startAsync() throws IllegalStateException { RpcContextAttachment currentContext = getServerAttachment(); if (currentContext.asyncContext == null) { currentContext.asyncContext = new AsyncContextImpl(); } currentContext.asyncContext.start(); return currentContext.asyncContext; }
3.68
hbase_Mutation_setTimestamp
/** * Set the timestamp of the delete. */ public Mutation setTimestamp(long timestamp) { if (timestamp < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); } this.ts = timestamp; return this; }
3.68
hbase_HBaseCluster_getServerHoldingMeta
/** * Get the ServerName of region server serving the first hbase:meta region */ public ServerName getServerHoldingMeta() throws IOException { return getServerHoldingRegion(TableName.META_TABLE_NAME, RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()); }
3.68
morf_AbstractSqlDialectTest_expectedCreateViewStatements
/** * @return The expected SQL statements for creating the test database view. */ protected List<String> expectedCreateViewStatements() { return Arrays.asList("CREATE VIEW " + tableName("TestView") + " AS (SELECT stringField FROM " + tableName(TEST_TABLE) + " WHERE (stringField = " + stringLiteralPrefix() + "'blah'))"); }
3.68
framework_VCalendarPanel_handleKeyPress
/** * Handles the keypress from both the onKeyPress event and the onKeyDown * event * * @param event * The keydown/keypress event */ private void handleKeyPress(DomEvent<?> event) { // Special handling for events from time ListBoxes. if (time != null && time.getElement().isOrHasChild( (Node) event.getNativeEvent().getEventTarget().cast())) { int nativeKeyCode = event.getNativeEvent().getKeyCode(); if (nativeKeyCode == getSelectKey()) { onSubmit(); // submit if enter key hit down on listboxes event.preventDefault(); event.stopPropagation(); } if (nativeKeyCode == getCloseKey()) { onCancel(); // cancel if ESC key hit down on listboxes event.preventDefault(); event.stopPropagation(); } return; } // Check tabs int keycode = event.getNativeEvent().getKeyCode(); if (keycode == KeyCodes.KEY_TAB && event.getNativeEvent().getShiftKey()) { if (onTabOut(event)) { return; } } // Handle the navigation if (handleNavigation(keycode, event.getNativeEvent().getCtrlKey() || event.getNativeEvent().getMetaKey(), event.getNativeEvent().getShiftKey())) { event.preventDefault(); } }
3.68
hbase_MultiTableInputFormatBase_setScans
/** * Allows subclasses to set the list of {@link Scan} objects. * @param scans The list of {@link Scan} used to define the input */ protected void setScans(List<Scan> scans) { this.scans = scans; }
3.68
hbase_SpaceQuotaRefresherChore_getPeriod
/** * Extracts the period for the chore from the configuration. * @param conf The configuration object. * @return The configured chore period or the default value. */ static int getPeriod(Configuration conf) { return conf.getInt(POLICY_REFRESHER_CHORE_PERIOD_KEY, POLICY_REFRESHER_CHORE_PERIOD_DEFAULT); }
3.68
hadoop_AbfsHttpOperation_openConnection
/** * Open the HTTP connection. * * @throws IOException if an error occurs. */ private HttpURLConnection openConnection() throws IOException { long start = System.nanoTime(); try { return (HttpURLConnection) url.openConnection(); } finally { connectionTimeMs = elapsedTimeMs(start); } }
3.68
morf_WindowFunction_drive
/** * @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser) */ @Override public void drive(ObjectTreeTraverser traverser) { traverser .dispatch(getFunction()) .dispatch(getOrderBys()) .dispatch(getPartitionBys()); }
3.68
hadoop_AzureBlobFileSystemStore_getUser
/** * @return local user name. * */ public String getUser() { return this.userName; }
3.68
zxing_BitMatrix_set
/** * <p>Sets the given bit to true.</p> * * @param x The horizontal component (i.e. which column) * @param y The vertical component (i.e. which row) */ public void set(int x, int y) { int offset = y * rowSize + (x / 32); bits[offset] |= 1 << (x & 0x1f); }
3.68
hbase_TableDescriptorBuilder_removeValue
/** * Remove metadata represented by the key from the {@link #values} map * @param key Key whose key and value we're to remove from TableDescriptor parameters. * @return the modifyable TD */ public ModifyableTableDescriptor removeValue(final byte[] key) { return removeValue(new Bytes(key)); }
3.68
dubbo_Bytes_bytes2base64
/** * to base64 string. * * @param bs byte array. * @param off offset. * @param len length. * @param code base64 code(0-63 is base64 char,64 is pad char). * @return base64 string. */ public static String bytes2base64(final byte[] bs, final int off, final int len, final char[] code) { if (off < 0) { throw new IndexOutOfBoundsException("bytes2base64: offset < 0, offset is " + off); } if (len < 0) { throw new IndexOutOfBoundsException("bytes2base64: length < 0, length is " + len); } if (off + len > bs.length) { throw new IndexOutOfBoundsException("bytes2base64: offset + length > array length."); } if (code.length < 64) { throw new IllegalArgumentException("Base64 code length < 64."); } boolean pad = code.length > 64; // has pad char. int num = len / 3, rem = len % 3, r = off, w = 0; char[] cs = new char[num * 4 + (rem == 0 ? 0 : pad ? 4 : rem + 1)]; for (int i = 0; i < num; i++) { int b1 = bs[r++] & MASK8, b2 = bs[r++] & MASK8, b3 = bs[r++] & MASK8; cs[w++] = code[b1 >> 2]; cs[w++] = code[(b1 << 4) & MASK6 | (b2 >> 4)]; cs[w++] = code[(b2 << 2) & MASK6 | (b3 >> 6)]; cs[w++] = code[b3 & MASK6]; } if (rem == 1) { int b1 = bs[r++] & MASK8; cs[w++] = code[b1 >> 2]; cs[w++] = code[(b1 << 4) & MASK6]; if (pad) { cs[w++] = code[64]; cs[w++] = code[64]; } } else if (rem == 2) { int b1 = bs[r++] & MASK8, b2 = bs[r++] & MASK8; cs[w++] = code[b1 >> 2]; cs[w++] = code[(b1 << 4) & MASK6 | (b2 >> 4)]; cs[w++] = code[(b2 << 2) & MASK6]; if (pad) { cs[w++] = code[64]; } } return new String(cs); }
3.68
flink_TypeExtractor_validateIfWritable
// visible for testing static void validateIfWritable(TypeInformation<?> typeInfo, Type type) { try { // try to load the writable type info Class<?> writableTypeInfoClass = Class.forName( HADOOP_WRITABLE_TYPEINFO_CLASS, false, typeInfo.getClass().getClassLoader()); if (writableTypeInfoClass.isAssignableFrom(typeInfo.getClass())) { // this is actually a writable type info // check if the type is a writable if (!(type instanceof Class && isHadoopWritable((Class<?>) type))) { throw new InvalidTypesException(HADOOP_WRITABLE_CLASS + " type expected."); } // check writable type contents Class<?> clazz = (Class<?>) type; if (typeInfo.getTypeClass() != clazz) { throw new InvalidTypesException( "Writable type '" + typeInfo.getTypeClass().getCanonicalName() + "' expected but was '" + clazz.getCanonicalName() + "'."); } } } catch (ClassNotFoundException e) { // class not present at all, so cannot be that type info // ignore } }
3.68
framework_Profiler_logTimings
/** * Outputs the gathered profiling data to the debug console. */ public static void logTimings() { if (!isEnabled()) { getLogger().warning( "Profiler is not enabled, no data has been collected."); return; } LinkedList<Node> stack = new LinkedList<>(); Node rootNode = new Node(null); stack.add(rootNode); JsArray<GwtStatsEvent> gwtStatsEvents = getGwtStatsEvents(); if (gwtStatsEvents.length() == 0) { getLogger().warning( "No profiling events recorded, this might happen if another __gwtStatsEvent handler is installed."); return; } Set<Node> extendedTimeNodes = new HashSet<>(); for (int i = 0; i < gwtStatsEvents.length(); i++) { GwtStatsEvent gwtStatsEvent = gwtStatsEvents.get(i); if (!EVT_GROUP.equals(gwtStatsEvent.getEvtGroup())) { // Only log our own events to avoid problems with events which // are not of type start+end continue; } String eventName = gwtStatsEvent.getEventName(); String type = gwtStatsEvent.getType(); boolean isExtendedEvent = gwtStatsEvent.isExtendedEvent(); boolean isBeginEvent = "begin".equals(type); Node stackTop = stack.getLast(); boolean inEvent = eventName.equals(stackTop.getName()) && !isBeginEvent; if (!inEvent && stack.size() >= 2 && eventName.equals(stack.get(stack.size() - 2).getName()) && !isBeginEvent) { // back out of sub event if (extendedTimeNodes.contains(stackTop) && isExtendedEvent) { stackTop.leave(gwtStatsEvent.getRelativeMillis()); } else { stackTop.leave(gwtStatsEvent.getMillis()); } stack.removeLast(); stackTop = stack.getLast(); inEvent = true; } if (type.equals("end")) { if (!inEvent) { getLogger().severe("Got end event for " + eventName + " but is currently in " + stackTop.getName()); return; } Node previousStackTop = stack.removeLast(); if (extendedTimeNodes.contains(previousStackTop)) { previousStackTop.leave(gwtStatsEvent.getRelativeMillis()); } else { previousStackTop.leave(gwtStatsEvent.getMillis()); } } else { double millis = isExtendedEvent ? gwtStatsEvent.getRelativeMillis() : gwtStatsEvent.getMillis(); if (!inEvent) { stackTop = stackTop.enterChild(eventName, millis); stack.add(stackTop); if (isExtendedEvent) { extendedTimeNodes.add(stackTop); } } if (!isBeginEvent) { // Create sub event Node subNode = stackTop.enterChild(eventName + "." + type, millis); if (isExtendedEvent) { extendedTimeNodes.add(subNode); } stack.add(subNode); } } } if (stack.size() != 1) { getLogger().warning("Not all nodes are left, the last node is " + stack.getLast().getName()); return; } Map<String, Node> totals = new HashMap<>(); rootNode.sumUpTotals(totals); List<Node> totalList = new ArrayList<>(totals.values()); Collections.sort(totalList, (o1, o2) -> (int) (o2.getTimeSpent() - o1.getTimeSpent())); if (getConsumer() != null) { getConsumer().addProfilerData(stack.getFirst(), totalList); } }
3.68
hadoop_FileSystemMultipartUploader_innerComplete
/** * The upload complete operation. * @param multipartUploadId the ID of the upload * @param filePath path * @param handleMap map of handles * @return the path handle * @throws IOException failure */ private PathHandle innerComplete( UploadHandle multipartUploadId, Path filePath, Map<Integer, PartHandle> handleMap) throws IOException { checkPath(filePath); checkUploadId(multipartUploadId.toByteArray()); checkPartHandles(handleMap); List<Map.Entry<Integer, PartHandle>> handles = new ArrayList<>(handleMap.entrySet()); handles.sort(Comparator.comparingInt(Map.Entry::getKey)); List<Path> partHandles = handles .stream() .map(pair -> { byte[] byteArray = pair.getValue().toByteArray(); return new Path(new String(byteArray, 0, byteArray.length, StandardCharsets.UTF_8)); }) .collect(Collectors.toList()); int count = partHandles.size(); // built up to identify duplicates -if the size of this set is // below that of the number of parts, then there's a duplicate entry. Set<Path> values = new HashSet<>(count); values.addAll(partHandles); Preconditions.checkArgument(values.size() == count, "Duplicate PartHandles"); byte[] uploadIdByteArray = multipartUploadId.toByteArray(); Path collectorPath = new Path(new String(uploadIdByteArray, 0, uploadIdByteArray.length, StandardCharsets.UTF_8)); boolean emptyFile = totalPartsLen(partHandles) == 0; if (emptyFile) { fs.create(filePath).close(); } else { Path filePathInsideCollector = mergePaths(collectorPath, new Path(Path.SEPARATOR + filePath.getName())); fs.create(filePathInsideCollector).close(); fs.concat(filePathInsideCollector, partHandles.toArray(new Path[handles.size()])); new InternalOperations() .rename(fs, filePathInsideCollector, filePath, Options.Rename.OVERWRITE); } fs.delete(collectorPath, true); return getPathHandle(filePath); }
3.68
hadoop_BlockGrouper_getSchema
/** * Get EC schema. * @return ECSchema. */ protected ECSchema getSchema() { return schema; }
3.68
flink_TableSinkBase_configure
/** * Returns a copy of this {@link TableSink} configured with the field names and types of the * table to emit. * * @param fieldNames The field names of the table to emit. * @param fieldTypes The field types of the table to emit. * @return A copy of this {@link TableSink} configured with the field names and types of the * table to emit. */ @Override public final TableSink<T> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) { final TableSinkBase<T> configuredSink = this.copy(); configuredSink.fieldNames = Optional.of(fieldNames); configuredSink.fieldTypes = Optional.of(fieldTypes); return configuredSink; }
3.68
hbase_RegionServerSnapshotManager_stop
/** * Abruptly shutdown the thread pool. Call when exiting a region server. */ void stop() { if (this.stopped) return; this.stopped = true; this.executor.shutdown(); }
3.68
dubbo_GovernanceRuleRepository_removeListener
/** * {@link #removeListener(String, String, ConfigurationListener)} * * @param key the key to represent a configuration * @param listener configuration listener */ default void removeListener(String key, ConfigurationListener listener) { removeListener(key, DEFAULT_GROUP, listener); }
3.68
hbase_WALEntryStream_getCurrentPath
/** Returns the {@link Path} of the current WAL */ public Path getCurrentPath() { return currentPath; }
3.68
framework_VScrollTable_isBefore
/** * Makes a check based on indexes whether the row is before the * compared row. * * @param row1 * @return true if this rows index is smaller than in the row1 */ public boolean isBefore(VScrollTableRow row1) { return getIndex() < row1.getIndex(); }
3.68
hbase_IndexOnlyLruBlockCache_cacheBlock
/** * Cache only index block with the specified name and buffer * @param cacheKey block's cache key * @param buf block buffer * @param inMemory if block is in-memory */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { if (isMetaBlock(buf.getBlockType())) { super.cacheBlock(cacheKey, buf, inMemory); } }
3.68
hadoop_OBSBlockOutputStream_waitForAllPartUploads
/** * Block awaiting all outstanding uploads to complete. * * @return list of results * @throws IOException IO Problems */ private List<PartEtag> waitForAllPartUploads() throws IOException { LOG.debug("Waiting for {} uploads to complete", partETagsFutures.size()); try { return Futures.allAsList(partETagsFutures).get(); } catch (InterruptedException ie) { LOG.warn("Interrupted partUpload", ie); LOG.debug("Cancelling futures"); for (ListenableFuture<PartEtag> future : partETagsFutures) { future.cancel(true); } // abort multipartupload this.abort(); throw new IOException( "Interrupted multi-part upload with id '" + uploadId + "' to " + key); } catch (ExecutionException ee) { // there is no way of recovering so abort // cancel all partUploads LOG.debug("While waiting for upload completion", ee); LOG.debug("Cancelling futures"); for (ListenableFuture<PartEtag> future : partETagsFutures) { future.cancel(true); } // abort multipartupload this.abort(); throw OBSCommonUtils.extractException( "Multi-part upload with id '" + uploadId + "' to " + key, key, ee); } }
3.68
flink_MergingWindowSet_getStateWindow
/** * Returns the state window for the given in-flight {@code Window}. The state window is the * {@code Window} in which we keep the actual state of a given in-flight window. Windows might * expand but we keep to original state window for keeping the elements of the window to avoid * costly state juggling. * * @param window The window for which to get the state window. */ public W getStateWindow(W window) { return mapping.get(window); }
3.68
hbase_MetricsAssignmentManager_getReopenProcMetrics
/** Returns Set of common metrics for reopen procedure */ public ProcedureMetrics getReopenProcMetrics() { return reopenProcMetrics; }
3.68
flink_QueryableStateConfiguration_fromConfiguration
/** Creates the {@link QueryableStateConfiguration} from the given Configuration. */ public static QueryableStateConfiguration fromConfiguration(Configuration config) { if (!config.getBoolean(QueryableStateOptions.ENABLE_QUERYABLE_STATE_PROXY_SERVER)) { return null; } final Iterator<Integer> proxyPorts = NetUtils.getPortRangeFromString( config.getString(QueryableStateOptions.PROXY_PORT_RANGE)); final Iterator<Integer> serverPorts = NetUtils.getPortRangeFromString( config.getString(QueryableStateOptions.SERVER_PORT_RANGE)); final int numProxyServerNetworkThreads = config.getInteger(QueryableStateOptions.PROXY_NETWORK_THREADS); final int numProxyServerQueryThreads = config.getInteger(QueryableStateOptions.PROXY_ASYNC_QUERY_THREADS); final int numStateServerNetworkThreads = config.getInteger(QueryableStateOptions.SERVER_NETWORK_THREADS); final int numStateServerQueryThreads = config.getInteger(QueryableStateOptions.SERVER_ASYNC_QUERY_THREADS); return new QueryableStateConfiguration( proxyPorts, serverPorts, numProxyServerNetworkThreads, numProxyServerQueryThreads, numStateServerNetworkThreads, numStateServerQueryThreads); }
3.68
pulsar_ManagedLedgerConfig_setMinimumBacklogCursorsForCaching
/** * Set Minimum cursors with backlog after which broker is allowed to cache read entries to reuse them for other * cursors' backlog reads. * * @param minimumBacklogCursorsForCaching */ public void setMinimumBacklogCursorsForCaching(int minimumBacklogCursorsForCaching) { this.minimumBacklogCursorsForCaching = minimumBacklogCursorsForCaching; }
3.68
dubbo_FutureContext_getCompletableFuture
/** * get future. * * @param <T> * @return future */ @SuppressWarnings("unchecked") public <T> CompletableFuture<T> getCompletableFuture() { try { return (CompletableFuture<T>) future; } finally { if (clearFutureAfterGet) { this.future = null; } } }
3.68
hadoop_LoggingAuditor_modifyHttpRequest
/** * Before transmitting a request, the logging auditor * always builds the referrer header, saves to the outer * class (where {@link #getLastHeader()} can retrieve it, * and logs at debug. * If configured to add the header to the S3 logs, it will * be set as the HTTP referrer. * @param context The current state of the execution, * including the SDK and current HTTP request. * @param executionAttributes A mutable set of attributes scoped * to one specific request/response * cycle that can be used to give data * to future lifecycle methods. * @return The potentially-modified HTTP request that should be * sent to the service. Must not be null. */ @Override public SdkHttpRequest modifyHttpRequest(Context.ModifyHttpRequest context, ExecutionAttributes executionAttributes) { SdkHttpRequest httpRequest = context.httpRequest(); SdkRequest sdkRequest = context.request(); // attach range for GetObject requests attachRangeFromRequest(httpRequest, executionAttributes); // for delete op, attach the number of files to delete attachDeleteKeySizeAttribute(sdkRequest); // build the referrer header final String header = referrer.buildHttpReferrer(); // update the outer class's field. setLastHeader(header); if (headerEnabled) { // add the referrer header httpRequest = httpRequest.toBuilder() .appendHeader(HEADER_REFERRER, header) .build(); } if (LOG.isDebugEnabled()) { LOG.debug("[{}] {} Executing {} with {}; {}", currentThreadID(), getSpanId(), getOperationName(), analyzer.analyze(context.request()), header); } // now see if the request is actually a blocked multipart request if (!isMultipartUploadEnabled && isRequestMultipartIO(sdkRequest)) { throw new AuditOperationRejectedException("Multipart IO request " + sdkRequest + " rejected " + header); } return httpRequest; }
3.68
hadoop_CopyCommandWithMultiThread_setThreadPoolQueueSize
/** * set thread pool queue size by option value, if the value less than 1, * use DEFAULT_QUEUE_SIZE instead. * * @param optValue option value */ protected void setThreadPoolQueueSize(String optValue) { if (optValue != null) { int size = Integer.parseInt(optValue); threadPoolQueueSize = size < 1 ? DEFAULT_QUEUE_SIZE : size; } }
3.68
flink_SqlFunctionUtils_abs
/** SQL <code>ABS</code> operator applied to double values. */ public static double abs(double b0) { return Math.abs(b0); }
3.68
morf_TableSetSchema_getTable
/** * @see org.alfasoftware.morf.metadata.Schema#getTable(java.lang.String) */ @Override public Table getTable(final String name) { return tables.stream() .filter(table -> table.getName().equalsIgnoreCase(name)) .findFirst() .orElseThrow(() -> new IllegalArgumentException(String.format("Requested table [%s] does not exist.", name))); }
3.68
morf_SqlDialect_getSqlForUpper
/** * Converts the <code>UPPER</code> function into SQL. * * @param function the function to convert. * @return a string representation of the SQL. */ protected String getSqlForUpper(Function function) { return "UPPER(" + getSqlFrom(function.getArguments().get(0)) + ")"; }
3.68
framework_AbstractComponent_getLocaleFromString
/** * Constructs a Locale corresponding to the given string. The string should * consist of one, two or three parts with '_' between the different parts * if there is more than one part. The first part specifies the language, * the second part the country and the third part the variant of the locale. * * @param localeString * the locale specified as a string * @return the Locale object corresponding to localeString */ private Locale getLocaleFromString(String localeString) { if (localeString == null) { return null; } String[] parts = localeString.split("_"); if (parts.length > 3) { throw new RuntimeException( "Cannot parse the locale string: " + localeString); } switch (parts.length) { case 1: return new Locale(parts[0]); case 2: return new Locale(parts[0], parts[1]); default: return new Locale(parts[0], parts[1], parts[2]); } }
3.68
morf_SqlServerDialect_getDeleteLimitPreFromClause
/** * @see SqlDialect#getDeleteLimitPreFromClause(int) */ @Override protected Optional<String> getDeleteLimitPreFromClause(int limit) { return Optional.of("TOP (" + limit + ")"); }
3.68
hudi_ImmutableTriple_of
/** * <p> * Obtains an immutable triple of from three objects inferring the generic types. * </p> * * <p> * This factory allows the triple to be created using inference to obtain the generic types. * </p> * * @param <L> the left element type * @param <M> the middle element type * @param <R> the right element type * @param left the left element, may be null * @param middle the middle element, may be null * @param right the right element, may be null * @return a triple formed from the three parameters, not null */ public static <L, M, R> ImmutableTriple<L, M, R> of(final L left, final M middle, final R right) { return new ImmutableTriple<L, M, R>(left, middle, right); }
3.68
framework_VRadioButtonGroup_selectItemKey
/** * Removes previous selection and adds new selection. * * @param selectedItemKey * the key of the selected radio button */ public void selectItemKey(String selectedItemKey) { // At most one item could be selected so reset all radio buttons // before applying current selection keyToOptions.values() .forEach(button -> updateItemSelection(button, false)); if (selectedItemKey != null) { RadioButton radioButton = keyToOptions.get(selectedItemKey); if (radioButton != null) { // Items might not be loaded yet updateItemSelection(radioButton, true); } } }
3.68
hbase_HFileArchiver_archiveFamilyByFamilyDir
/** * Removes from the specified region the store files of the specified column family, either by * archiving them or outright deletion * @param fs the filesystem where the store files live * @param conf {@link Configuration} to examine to determine the archive directory * @param parent Parent region hosting the store files * @param familyDir {@link Path} to where the family is being stored * @param family the family hosting the store files * @throws IOException if the files could not be correctly disposed. */ public static void archiveFamilyByFamilyDir(FileSystem fs, Configuration conf, RegionInfo parent, Path familyDir, byte[] family) throws IOException { FileStatus[] storeFiles = CommonFSUtils.listStatus(fs, familyDir); if (storeFiles == null) { LOG.debug("No files to dispose of in {}, family={}", parent.getRegionNameAsString(), Bytes.toString(family)); return; } FileStatusConverter getAsFile = new FileStatusConverter(fs); Collection<File> toArchive = Stream.of(storeFiles).map(getAsFile).collect(Collectors.toList()); Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, family); // do the actual archive List<File> failedArchive = resolveAndArchive(fs, storeArchiveDir, toArchive, EnvironmentEdgeManager.currentTime()); if (!failedArchive.isEmpty()) { throw new FailedArchiveException( "Failed to archive/delete all the files for region:" + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family) + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.", failedArchive.stream().map(FUNC_FILE_TO_PATH).collect(Collectors.toList())); } }
3.68
framework_Table_getVisibleCellsNoCache
/** * Render rows with index "firstIndex" to "firstIndex+rows-1" to a new * buffer. * * Reuses values from the current page buffer if the rows are found there. * * @param firstIndex * @param rows * @param replaceListeners * @return */ private Object[][] getVisibleCellsNoCache(int firstIndex, int rows, boolean replaceListeners) { if (getLogger().isLoggable(Level.FINEST)) { getLogger().log(Level.FINEST, "Render visible cells for rows {0}-{1}", new Object[] { firstIndex, (firstIndex + rows - 1) }); } final Object[] colids = getVisibleColumns(); final int cols = colids.length; HashSet<Property<?>> oldListenedProperties = listenedProperties; HashSet<Component> oldVisibleComponents = visibleComponents; if (replaceListeners) { // initialize the listener collections, this should only be done if // the entire cache is refreshed (through refreshRenderedCells) listenedProperties = new HashSet<Property<?>>(); visibleComponents = new HashSet<Component>(); } Object[][] cells = new Object[cols + CELL_FIRSTCOL][rows]; if (rows == 0) { unregisterPropertiesAndComponents(oldListenedProperties, oldVisibleComponents); return cells; } final RowHeaderMode headmode = getRowHeaderMode(); final boolean[] iscomponent = new boolean[cols]; for (int i = 0; i < cols; i++) { iscomponent[i] = columnGenerators.containsKey(colids[i]) || Component.class.isAssignableFrom(getType(colids[i])); } int firstIndexNotInCache; if (pageBuffer != null && pageBuffer[CELL_ITEMID].length > 0) { firstIndexNotInCache = pageBufferFirstIndex + pageBuffer[CELL_ITEMID].length; } else { firstIndexNotInCache = -1; } // Creates the page contents int filledRows = 0; if (items instanceof Container.Indexed) { // more efficient implementation for containers supporting access by // index List<?> itemIds = getItemIds(firstIndex, rows); for (int i = 0; i < rows && i < itemIds.size(); i++) { Object id = itemIds.get(i); if (id == null) { throw new IllegalStateException( "Null itemId returned from container"); } // Start by parsing the values, id should already be set parseItemIdToCells(cells, id, i, firstIndex, headmode, cols, colids, firstIndexNotInCache, iscomponent, oldListenedProperties); filledRows++; } } else { // slow back-up implementation for cases where the container does // not support access by index // Gets the first item id Object id = firstItemId(); for (int i = 0; i < firstIndex; i++) { id = nextItemId(id); } for (int i = 0; i < rows && id != null; i++) { // Start by parsing the values, id should already be set parseItemIdToCells(cells, id, i, firstIndex, headmode, cols, colids, firstIndexNotInCache, iscomponent, oldListenedProperties); // Gets the next item id for non indexed container id = nextItemId(id); filledRows++; } } // Assures that all the rows of the cell-buffer are valid if (filledRows != cells[0].length) { final Object[][] temp = new Object[cells.length][filledRows]; for (int i = 0; i < cells.length; i++) { for (int j = 0; j < filledRows; j++) { temp[i][j] = cells[i][j]; } } cells = temp; } unregisterPropertiesAndComponents(oldListenedProperties, oldVisibleComponents); return cells; }
3.68
hbase_ChoreService_isShutdown
/** Returns true when the service is shutdown and thus cannot be used anymore */ public boolean isShutdown() { return scheduler.isShutdown(); }
3.68
framework_VPopupView_addVisibilityChangeHandler
/** * Adds the given visibility change handler to this widget. * * @param visibilityChangeHandler * the handler that should be triggered when visibility changes * @return the registration object for removing the given handler when no * longer needed */ public HandlerRegistration addVisibilityChangeHandler( final VisibilityChangeHandler visibilityChangeHandler) { return addHandler(visibilityChangeHandler, VisibilityChangeEvent.getType()); }
3.68
hbase_ParseFilter_convertByteArrayToLong
/** * Converts a long expressed in a byte array to an actual long * <p> * This doesn't use Bytes.toLong because that assumes that there will be {@link Bytes#SIZEOF_INT} * bytes available. * <p> * @param numberAsByteArray the long value expressed as a byte array * @return the long value */ public static long convertByteArrayToLong(byte[] numberAsByteArray) { if (numberAsByteArray == null) { throw new IllegalArgumentException("convertByteArrayToLong called with a null array"); } int i = 0; long result = 0; boolean isNegative = false; if (numberAsByteArray[i] == ParseConstants.MINUS_SIGN) { i++; isNegative = true; } while (i != numberAsByteArray.length) { if ( numberAsByteArray[i] < ParseConstants.ZERO || numberAsByteArray[i] > ParseConstants.NINE ) { throw new IllegalArgumentException("Byte Array should only contain digits"); } result = result * 10 + (numberAsByteArray[i] - ParseConstants.ZERO); if (result < 0) { throw new IllegalArgumentException("Long Argument too large"); } i++; } if (isNegative) { return -result; } else { return result; } }
3.68
morf_OracleDialect_primaryKeyConstraint
/** * CONSTRAINT DEF_PK PRIMARY KEY (X, Y, Z) */ private String primaryKeyConstraint(Table table) { return primaryKeyConstraint(table.getName(), namesOfColumns(primaryKeysForTable(table))); }
3.68
framework_VRichTextArea_getValue
/** * Gets the value of the text area. * * @return the value as HTML */ public String getValue() { if (rta.isAttached()) { return rta.getHTML(); } else { return html.getHTML(); } }
3.68
flink_TaskSlot_getTasks
/** * Get all tasks running in this task slot. * * @return Iterator to all currently contained tasks in this task slot. */ public Iterator<T> getTasks() { return tasks.values().iterator(); }
3.68
flink_ScanReuser_applyPhysicalAndMetadataPushDown
/** * Generate sourceAbilitySpecs and newProducedType by projected physical fields and metadata * keys. */ private static RowType applyPhysicalAndMetadataPushDown( DynamicTableSource source, RowType originType, List<SourceAbilitySpec> sourceAbilitySpecs, int[][] physicalAndMetaFields, int[][] projectedPhysicalFields, List<String> usedMetadataNames) { RowType newProducedType = originType; boolean supportsProjectPushDown = source instanceof SupportsProjectionPushDown; boolean supportsReadingMeta = source instanceof SupportsReadingMetadata; if (supportsProjectPushDown || supportsReadingMeta) { newProducedType = (RowType) Projection.of(physicalAndMetaFields).project(originType); } if (supportsProjectPushDown) { sourceAbilitySpecs.add( new ProjectPushDownSpec(projectedPhysicalFields, newProducedType)); } if (supportsReadingMeta) { sourceAbilitySpecs.add(new ReadingMetadataSpec(usedMetadataNames, newProducedType)); } return newProducedType; }
3.68
framework_RangeValidator_setMinValue
/** * Sets the minimum value of the range. Use * {@link #setMinValueIncluded(boolean)} to control whether this value is * part of the range or not. * * @param minValue * the minimum value */ public void setMinValue(T minValue) { this.minValue = minValue; }
3.68
hbase_ServerName_getStartCode
/** Return the start code. */ public long getStartCode() { return startCode; }
3.68
AreaShop_FileManager_getBuys
/** * Get all buy regions. * @return List of all buy regions */ public List<BuyRegion> getBuys() { List<BuyRegion> result = new ArrayList<>(); for(GeneralRegion region : regions.values()) { if(region instanceof BuyRegion) { result.add((BuyRegion)region); } } return result; }
3.68
framework_Tree_getIdentifier
/* * (non-Javadoc) * * @see * com.vaadin.event.dd.acceptCriteria.ServerSideCriterion#getIdentifier * () */ @Override protected String getIdentifier() { return TreeDropCriterion.class.getCanonicalName(); }
3.68
framework_Range_expand
/** * Creates a range that is expanded the given amounts in both ends. * * @param startDelta * the amount to expand by in the beginning of the range * @param endDelta * the amount to expand by in the end of the range * * @return an expanded range * * @throws IllegalArgumentException * if the new range would have <code>start &gt; end</code> */ public Range expand(int startDelta, int endDelta) throws IllegalArgumentException { return Range.between(getStart() - startDelta, getEnd() + endDelta); }
3.68
graphhopper_LocationIndex_onTile
/** * This method is called if isTileInfo returns true. */ default void onTile(BBox bbox, int depth) { }
3.68
querydsl_AliasFactory_createAliasForProperty
/** * Create an alias instance for the given class, parent and path * * @param <A> * @param cl type for alias * @param path underlying expression * @return alias instance */ public <A> A createAliasForProperty(Class<A> cl, Expression<?> path) { return createProxy(cl, path); }
3.68
hbase_SnapshotInfo_getSnapshotsFilesMap
/** * Returns the map of store files based on path for all snapshots * @param conf the {@link Configuration} to use * @param uniqueHFilesArchiveSize pass out the size for store files in archive * @param uniqueHFilesSize pass out the size for store files shared * @param uniqueHFilesMobSize pass out the size for mob store files shared * @return the map of store files */ public static Map<Path, Integer> getSnapshotsFilesMap(final Configuration conf, AtomicLong uniqueHFilesArchiveSize, AtomicLong uniqueHFilesSize, AtomicLong uniqueHFilesMobSize) throws IOException { List<SnapshotDescription> snapshotList = getSnapshotList(conf); if (snapshotList.isEmpty()) { return Collections.emptyMap(); } ConcurrentHashMap<Path, Integer> fileMap = new ConcurrentHashMap<>(); ExecutorService exec = SnapshotManifest.createExecutor(conf, "SnapshotsFilesMapping"); try { for (final SnapshotDescription snapshot : snapshotList) { getSnapshotFilesMap(conf, snapshot, exec, fileMap, uniqueHFilesArchiveSize, uniqueHFilesSize, uniqueHFilesMobSize); } } finally { exec.shutdown(); } return fileMap; }
3.68
framework_ValueChangeHandler_scheduleValueChange
/** * Called whenever a change in the value has been detected. Schedules a * value change to be sent to the server, depending on the current value * change mode. * <p> * Note that this method does not consider the {@link ValueChangeMode#BLUR} * mode but assumes that {@link #sendValueChange()} is called directly for * this mode. */ public void scheduleValueChange() { switch (valueChangeMode) { case LAZY: lazyTextChange(); break; case TIMEOUT: timeoutTextChange(); break; case EAGER: eagerTextChange(); break; case BLUR: // Nothing to schedule for this mode break; default: throw new IllegalStateException("Unknown mode: " + valueChangeMode); } }
3.68
morf_DatabaseSchemaManager_dropTableIfPresent
/** * Drop the specified tables from the schema if they are present. * * @param producerCache database dataset producer cache * @param tableName table name to drop * @return sql statements */ public Collection<String> dropTableIfPresent(ProducerCache producerCache, String tableName) { Table table = getTable(producerCache, tableName); return table == null ? Collections.emptySet() : dropTable(table); }
3.68
framework_VCustomLayout_add
/** Adding widget without specifying location is not supported. */ @Override public void add(Widget w) { throw new UnsupportedOperationException(); }
3.68
querydsl_JDOQueryFactory_selectDistinct
/** * Create a new {@link JDOQuery} instance with the given projection * * @param exprs projection * @return select(distinct exprs) */ public JDOQuery<Tuple> selectDistinct(Expression<?>... exprs) { return query().select(exprs).distinct(); }
3.68
hadoop_BlockBlobAppendStream_setBlocksCountAndBlockIdPrefix
/** * Helper method used to generate the blockIDs. The algorithm used is similar * to the Azure storage SDK. */ private void setBlocksCountAndBlockIdPrefix(List<BlockEntry> blockEntries) { if (nextBlockCount == UNSET_BLOCKS_COUNT && blockIdPrefix == null) { Random sequenceGenerator = new Random(); String blockZeroBlockId = (!blockEntries.isEmpty()) ? blockEntries.get(0).getId() : ""; String prefix = UUID.randomUUID().toString() + "-"; String sampleNewerVersionBlockId = generateNewerVersionBlockId(prefix, 0); if (!blockEntries.isEmpty() && blockZeroBlockId.length() < sampleNewerVersionBlockId.length()) { // If blob has already been created with 2.2.0, append subsequent blocks // with older version (2.2.0) blockId compute nextBlockCount, the way it // was done before; and don't use blockIdPrefix this.blockIdPrefix = ""; nextBlockCount = (long) (sequenceGenerator.nextInt(Integer.MAX_VALUE)) + sequenceGenerator.nextInt( Integer.MAX_VALUE - MAX_BLOCK_COUNT); nextBlockCount += blockEntries.size(); } else { // If there are no existing blocks, create the first block with newer // version (4.2.0) blockId. If blob has already been created with 4.2.0, // append subsequent blocks with newer version (4.2.0) blockId this.blockIdPrefix = prefix; nextBlockCount = blockEntries.size(); } } }
3.68
hbase_HtmlQuoting_quoteHtmlChars
/** * Quote the given item to make it html-safe. * @param item the string to quote * @return the quoted string */ public static String quoteHtmlChars(String item) { if (item == null) { return null; } byte[] bytes = Bytes.toBytes(item); if (needsQuoting(bytes, 0, bytes.length)) { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); try { quoteHtmlChars(buffer, bytes, 0, bytes.length); } catch (IOException ioe) { // Won't happen, since it is a bytearrayoutputstream } return buffer.toString(); } else { return item; } }
3.68
hudi_HoodieFileGroup_getAllBaseFiles
/** * Stream of committed data files, sorted reverse commit time. */ public Stream<HoodieBaseFile> getAllBaseFiles() { return getAllFileSlices().filter(slice -> slice.getBaseFile().isPresent()).map(slice -> slice.getBaseFile().get()); }
3.68
hadoop_StageConfig_withJobAttemptDir
/** * Set Job attempt directory. * @param dir new dir * @return this */ public StageConfig withJobAttemptDir(final Path dir) { checkOpen(); jobAttemptDir = dir; return this; }
3.68