name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_VDragAndDropManager_handleServerResponse
/** * Handle the server response for drag and drop. * * @param valueMap * DnD value map from the response */ public void handleServerResponse(ValueMap valueMap) { if (serverCallback == null) { return; } Profiler.enter("VDragAndDropManager.handleServerResponse"); UIDL uidl = (UIDL) valueMap.cast(); int visitId = uidl.getIntAttribute("visitId"); if (this.visitId == visitId) { serverCallback.handleResponse(uidl.getBooleanAttribute("accepted"), uidl); serverCallback = null; } runDeferredCommands(); Profiler.leave("VDragAndDropManager.handleServerResponse"); }
3.68
morf_DialectSpecificHint_isSameDatabaseType
/** * Tests whether the supplied databaseType string is equal to the databaseType that is held by this class. * The test is performed using {@link java.lang.String#equals(Object)}. * * @param databaseType a database type identifier. Eg: ORACLE, PGSQL, SQL_SERVER * @return true if the databaseType parameter is equal to this databaseType, false otherwise */ public boolean isSameDatabaseType(String databaseType) { return this.databaseType.equals(databaseType); }
3.68
morf_AbstractSqlDialectTest_testCreateViewStatements
/** * Tests the SQL for creating views. */ @SuppressWarnings("unchecked") @Test public void testCreateViewStatements() { compareStatements( expectedCreateViewStatements(), testDialect.viewDeploymentStatements(testView)); }
3.68
flink_ZooKeeperStateHandleStore_writeStoreHandleTransactionally
// this method is provided for the sole purpose of easier testing @VisibleForTesting void writeStoreHandleTransactionally(String path, byte[] serializedStoreHandle) throws Exception { // Write state handle (not the actual state) to ZooKeeper. This is expected to be smaller // than the state itself. This level of indirection makes sure that data in ZooKeeper is // small, because ZooKeeper is designed for data in the KB range, but the state can be // larger. Create the lock node in a transaction with the actual state node. That way we can // prevent race conditions with a concurrent delete operation. client.inTransaction() .create() .withMode(CreateMode.PERSISTENT) .forPath(path, serializedStoreHandle) .and() .create() .withMode(CreateMode.PERSISTENT) .forPath(getRootLockPath(path)) .and() .create() .withMode(CreateMode.EPHEMERAL) .forPath(getInstanceLockPath(path)) .and() .commit(); }
3.68
hbase_ReplicationSourceManager_checkBufferQuota
/** * Check if {@link ReplicationSourceManager#totalBufferUsed} exceeds * {@link ReplicationSourceManager#totalBufferLimit} for peer. * @return true if {@link ReplicationSourceManager#totalBufferUsed} not more than * {@link ReplicationSourceManager#totalBufferLimit}. */ boolean checkBufferQuota(String peerId) { // try not to go over total quota if (totalBufferUsed.get() > totalBufferLimit) { LOG.warn("peer={}, can't read more edits from WAL as buffer usage {}B exceeds limit {}B", peerId, totalBufferUsed.get(), totalBufferLimit); return false; } return true; }
3.68
hadoop_AbfsInputStreamStatisticsImpl_readOperationStarted
/** * A {@code read(byte[] buf, int off, int len)} operation has started. */ @Override public void readOperationStarted() { readOps.incrementAndGet(); }
3.68
flink_MailboxMetricsController_setupLatencyMeasurement
/** * Sets up latency measurement with required {@link TimerService} and {@link MailboxExecutor}. * * <p>Note: For each instance, latency measurement can be set up only once. * * @param timerService {@link TimerService} used for latency measurement. * @param mailboxExecutor {@link MailboxExecutor} used for latency measurement. */ public void setupLatencyMeasurement( TimerService timerService, MailboxExecutor mailboxExecutor) { checkState( !isLatencyMeasurementSetup(), "latency measurement has already been setup and cannot be setup twice"); this.timerService = timerService; this.mailboxExecutor = mailboxExecutor; }
3.68
hadoop_OBSFileSystem_isFsBucket
/** * Is posix bucket or not. * * @return is it posix bucket */ boolean isFsBucket() { return enablePosix; }
3.68
flink_TwoPhaseCommitSinkFunction_finishRecoveringContext
/** * Callback for subclasses which is called after restoring (each) user context. * * @param handledTransactions transactions which were already committed or aborted and do not * need further handling */ protected void finishRecoveringContext(Collection<TXN> handledTransactions) {}
3.68
rocketmq-connect_JsonConverterConfig_decimalFormat
/** * Get the serialization format for decimal types. * * @return the decimal serialization format */ public DecimalFormat decimalFormat() { return decimalFormat; }
3.68
dubbo_DubboMergingDigest_setMinMax
/** * Over-ride the min and max values for testing purposes */ @SuppressWarnings("SameParameterValue") void setMinMax(double min, double max) { this.min = min; this.max = max; }
3.68
morf_AbstractSqlDialectTest_expectedHints8a
/** * @return The expected SQL for the {@link SelectStatement#withDialectSpecificHint(String, String)} directive. Testing all dialcts do not react to an empty hint being supplied. */ protected String expectedHints8a() { return "SELECT * FROM SCHEMA2.Foo"; //NOSONAR }
3.68
hbase_HFileReaderImpl_seekTo
/** * Positions this scanner at the start of the file. * @return false if empty file; i.e. a call to next would return false and the current key and * value are undefined. */ @Override public boolean seekTo() throws IOException { if (reader == null) { return false; } if (reader.getTrailer().getEntryCount() == 0) { // No data blocks. return false; } long firstDataBlockOffset = reader.getTrailer().getFirstDataBlockOffset(); if (curBlock != null && curBlock.getOffset() == firstDataBlockOffset) { return processFirstDataBlock(); } readAndUpdateNewBlock(firstDataBlockOffset); return true; }
3.68
hbase_TableState_isEnabled
/** Returns True if table is {@link State#ENABLED}. */ public boolean isEnabled() { return isInStates(State.ENABLED); }
3.68
hadoop_ClientGSIContext_getRouterFederatedStateMap
/** * Utility function to parse routerFederatedState field in RPC headers. */ public static Map<String, Long> getRouterFederatedStateMap(ByteString byteString) { if (byteString != null) { try { RouterFederatedStateProto federatedState = RouterFederatedStateProto.parseFrom(byteString); return federatedState.getNamespaceStateIdsMap(); } catch (InvalidProtocolBufferException e) { // Ignore this exception and will return an empty map } } return Collections.emptyMap(); }
3.68
querydsl_SQLExpressions_selectFrom
/** * Create a new detached SQLQuery instance with the given projection * * @param expr query source and projection * @param <T> * @return select(expr).from(expr) */ public static <T> SQLQuery<T> selectFrom(RelationalPath<T> expr) { return select(expr).from(expr); }
3.68
framework_Escalator_getSpacerHeightsSumBetweenPx
/** * Gets the amount of pixels occupied by spacers between two pixel * points. * <p> * In this method, the {@link SpacerInclusionStrategy} has the following * meaning when a spacer lies in the middle of either pixel argument: * <dl> * <dt>{@link SpacerInclusionStrategy#COMPLETE COMPLETE} * <dd>take the entire spacer into account * <dt>{@link SpacerInclusionStrategy#PARTIAL PARTIAL} * <dd>take only the visible area into account * <dt>{@link SpacerInclusionStrategy#NONE NONE} * <dd>ignore that spacer * </dl> * * @param rangeTop * the top pixel point * @param topInclusion * the inclusion strategy regarding {@code rangeTop}. * @param rangeBottom * the bottom pixel point * @param bottomInclusion * the inclusion strategy regarding {@code rangeBottom}. * @return the pixels occupied by spacers between {@code rangeTop} and * {@code rangeBottom} */ public double getSpacerHeightsSumBetweenPx(double rangeTop, SpacerInclusionStrategy topInclusion, double rangeBottom, SpacerInclusionStrategy bottomInclusion) { assert rangeTop <= rangeBottom : "rangeTop must be less than rangeBottom"; double heights = 0; /* * TODO [[optimize]]: this might be somewhat inefficient (due to * iterator-based scanning, instead of using the treemap's search * functionalities). But it should be easy to write, read, verify * and maintain. */ for (SpacerImpl spacer : rowIndexToSpacer.values()) { double top = spacer.getTop(); double height = spacer.getHeight(); double bottom = top + height; /* * If we happen to implement a DoubleRange (in addition to the * int-based Range) at some point, the following logic should * probably be converted into using the * Range.partitionWith-equivalent. */ boolean topIsAboveRange = top < rangeTop; boolean topIsInRange = rangeTop <= top && top <= rangeBottom; boolean topIsBelowRange = rangeBottom < top; boolean bottomIsAboveRange = bottom < rangeTop; boolean bottomIsInRange = rangeTop <= bottom && bottom <= rangeBottom; boolean bottomIsBelowRange = rangeBottom < bottom; assert topIsAboveRange ^ topIsBelowRange ^ topIsInRange : "Bad top logic"; assert bottomIsAboveRange ^ bottomIsBelowRange ^ bottomIsInRange : "Bad bottom logic"; if (bottomIsAboveRange) { continue; } else if (topIsBelowRange) { return heights; } else if (topIsAboveRange && bottomIsInRange) { switch (topInclusion) { case PARTIAL: heights += bottom - rangeTop; break; case COMPLETE: heights += height; break; default: break; } } else if (topIsAboveRange && bottomIsBelowRange) { /* * Here we arbitrarily decide that the top inclusion will * have the honor of overriding the bottom inclusion if * happens to be a conflict of interests. */ switch (topInclusion) { case NONE: return 0; case COMPLETE: return height; case PARTIAL: return rangeBottom - rangeTop; default: throw new IllegalArgumentException( "Unexpected inclusion state :" + topInclusion); } } else if (topIsInRange && bottomIsInRange) { heights += height; } else if (topIsInRange && bottomIsBelowRange) { switch (bottomInclusion) { case PARTIAL: heights += rangeBottom - top; break; case COMPLETE: heights += height; break; default: break; } return heights; } else { assert false : "Unnaccounted-for situation"; } } return heights; }
3.68
hbase_ReplaySyncReplicationWALCallable_filter
// return whether we should include this entry. private boolean filter(Entry entry) { WALEdit edit = entry.getEdit(); WALUtil.filterCells(edit, c -> CellUtil.matchingFamily(c, WALEdit.METAFAMILY) ? null : c); return !edit.isEmpty(); }
3.68
flink_AvailabilityProvider_isApproximatelyAvailable
/** * Checks whether this instance is available only via constant {@link #AVAILABLE} to avoid * performance concern caused by volatile access in {@link CompletableFuture#isDone()}. So it is * mainly used in the performance sensitive scenarios which do not always need the precise * state. * * <p>This method is still safe to get the precise state if {@link #getAvailableFuture()} was * touched via (.get(), .wait(), .isDone(), ...) before, which also has a "happen-before" * relationship with this call. * * @return true if this instance is available for further processing. */ default boolean isApproximatelyAvailable() { return getAvailableFuture() == AVAILABLE; }
3.68
hadoop_NMClient_localize
/** * Localize resources for a container. * @param containerId the ID of the container * @param nodeId node Id of the container * @param localResources resources to localize */ @InterfaceStability.Unstable public void localize(ContainerId containerId, NodeId nodeId, Map<String, LocalResource> localResources) throws YarnException, IOException { // do nothing. }
3.68
hmily_ConfigLoader_push
/** * Implementation of Active Remote Push. * * @param context the context * @param data the data */ default void push(final Supplier<Context> context, final EventData data) { if (data == null) { return; } Set<EventConsumer<EventData>> events = ConfigEnv.getInstance().getEvents(); if (events.isEmpty()) { return; } String properties = data.getProperties(); List<EventConsumer<EventData>> eventsLists = events.stream() .filter(e -> !Objects.isNull(e.regex())) .filter(e -> Pattern.matches(e.regex(), properties)) .collect(Collectors.toList()); for (EventConsumer<EventData> consumer : eventsLists) { Optional<Config> first = ConfigEnv.getInstance().stream().filter(e -> properties.startsWith(e.prefix())).findFirst(); first.ifPresent(x -> { List<PropertyKeySource<?>> sources = new ArrayList<>(); Map<String, Object> values = new HashMap<>(1); values.put(properties, data.getValue()); sources.add(new MapPropertyKeySource(first.get().prefix(), values)); PassiveHandler<Config> handler = (ct, cf) -> { data.setConfig(cf); data.setSubscribe(consumer.regex()); try { consumer.accept(data); } catch (ClassCastException e) { if (LOG.isWarnEnabled()) { LOG.warn("EventData of type [{}] not accepted by EventConsumer [{}]", data.getClass(), consumer); } } }; context.get().getOriginal().passive(() -> context.get().withSources(sources), handler, first.get()); }); } }
3.68
hadoop_OBSCommonUtils_getOBSAccessKeys
/** * Return the access key and secret for OBS API use. Credentials may exist in * configuration, within credential providers or indicated in the UserInfo of * the name URI param. * * @param name the URI for which we need the access keys. * @param conf the Configuration object to interrogate for keys. * @return OBSAccessKeys * @throws IOException problems retrieving passwords from KMS. */ static OBSLoginHelper.Login getOBSAccessKeys(final URI name, final Configuration conf) throws IOException { OBSLoginHelper.Login login = OBSLoginHelper.extractLoginDetailsWithWarnings(name); Configuration c = ProviderUtils.excludeIncompatibleCredentialProviders(conf, OBSFileSystem.class); String accessKey = getPassword(c, OBSConstants.ACCESS_KEY, login.getUser()); String secretKey = getPassword(c, OBSConstants.SECRET_KEY, login.getPassword()); String sessionToken = getPassword(c, OBSConstants.SESSION_TOKEN, login.getToken()); return new OBSLoginHelper.Login(accessKey, secretKey, sessionToken); }
3.68
pulsar_TxnLogBufferedWriter_newInstance
/** * This constructor is used only when batch is disabled. Different to * {@link AsyncAddArgs#newInstance(AddDataCallback, Object, long)} has {@param byteBuf}. The {@param byteBuf} * generated by {@link DataSerializer#serialize(Object)} will be released during callback when * {@link #recycle()} executed. */ private static AsyncAddArgs newInstance(AddDataCallback callback, Object ctx, long addedTime, ByteBuf byteBuf){ AsyncAddArgs asyncAddArgs = newInstance(callback, ctx, addedTime); asyncAddArgs.byteBuf = byteBuf; return asyncAddArgs; }
3.68
framework_VScrollTable_handleBodyContextMenu
/** * Handles a context menu event on table body. * * @param left * left position of the context menu * @param top * top position of the context menu * @return true if a context menu was shown, otherwise false */ private boolean handleBodyContextMenu(int left, int top) { if (enabled && bodyActionKeys != null) { top += Window.getScrollTop(); left += Window.getScrollLeft(); client.getContextMenu().showAt(this, left, top); return true; } return false; }
3.68
hadoop_BaseRecord_equals
/** * Override equals check to use primary key(s) for comparison. */ @Override public boolean equals(Object obj) { if (!(obj instanceof BaseRecord)) { return false; } BaseRecord baseObject = (BaseRecord) obj; Map<String, String> keyset1 = this.getPrimaryKeys(); Map<String, String> keyset2 = baseObject.getPrimaryKeys(); return keyset1.equals(keyset2); }
3.68
framework_CalendarMonthDropHandler_isLocationValid
/** * Checks if the one can perform a drop in a element * * @param elementOver * The element to check * @return */ private boolean isLocationValid(Element elementOver) { Element monthGridElement = calendarConnector.getWidget().getMonthGrid() .getElement(); // drops are not allowed in: // - weekday header // - week number bart return DOM.isOrHasChild(monthGridElement, elementOver); }
3.68
hadoop_ZKPathDumper_expand
/** * Recursively expand the path into the supplied string builder, increasing * the indentation by {@link #INDENT} as it proceeds (depth first) down * the tree * @param builder string build to append to * @param path path to examine * @param indent current indentation */ private void expand(StringBuilder builder, String path, int indent) { try { GetChildrenBuilder childrenBuilder = curator.getChildren(); List<String> children = childrenBuilder.forPath(path); for (String child : children) { String childPath = path + "/" + child; String body; Stat stat = curator.checkExists().forPath(childPath); StringBuilder bodyBuilder = new StringBuilder(256); bodyBuilder.append(" [") .append(stat.getDataLength()) .append("]"); if (stat.getEphemeralOwner() > 0) { bodyBuilder.append("*"); } if (verbose) { // verbose: extract ACLs builder.append(" -- "); List<ACL> acls = curator.getACL().forPath(childPath); for (ACL acl : acls) { builder.append(RegistrySecurity.aclToString(acl)); builder.append(" "); } } body = bodyBuilder.toString(); // print each child append(builder, indent, ' '); builder.append('/').append(child); builder.append(body); builder.append('\n'); // recurse expand(builder, childPath, indent + INDENT); } } catch (Exception e) { builder.append(e.toString()).append("\n"); } }
3.68
hadoop_EmptyIOStatisticsContextImpl_getInstance
/** * Get the single instance. * @return an instance. */ static IOStatisticsContext getInstance() { return EMPTY_CONTEXT; }
3.68
flink_InternalOperatorIOMetricGroup_reuseOutputMetricsForTask
/** Causes the containing task to use this operators output record counter. */ public void reuseOutputMetricsForTask() { TaskIOMetricGroup taskIO = parentMetricGroup.getTaskIOMetricGroup(); taskIO.reuseRecordsOutputCounter(this.numRecordsOut); }
3.68
morf_OracleDialect_internalTableDeploymentStatements
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#tableDeploymentStatements(org.alfasoftware.morf.metadata.Table) */ @Override public Collection<String> internalTableDeploymentStatements(Table table) { return tableDeploymentStatements(table, false); }
3.68
hmily_HmilyTacTransactionManager_commit
/** * Commit. * * @param currentTransaction the current transaction */ public void commit(final HmilyTransaction currentTransaction) { log.debug("TAC-tm-commit ::: {}", currentTransaction); if (Objects.isNull(currentTransaction)) { return; } List<HmilyParticipant> hmilyParticipants = currentTransaction.getHmilyParticipants(); if (CollectionUtils.isEmpty(hmilyParticipants)) { return; } List<Boolean> successList = Lists.newArrayList(); for (HmilyParticipant participant : hmilyParticipants) { try { if (participant.getRole() == HmilyRoleEnum.START.getCode()) { HmilyTacLocalParticipantExecutor.confirm(participant); } else { HmilyReflector.executor(HmilyActionEnum.CONFIRMING, ExecutorTypeEnum.RPC, participant); } successList.add(true); } catch (Throwable e) { successList.add(false); log.error("HmilyParticipant rollback exception :{} ", participant.toString()); } finally { HmilyContextHolder.remove(); } } if (successList.stream().allMatch(e -> e)) { // remove global HmilyRepositoryStorage.removeHmilyTransaction(currentTransaction); } }
3.68
pulsar_NoStrictCacheSizeAllocator_allocate
/** * This operation will cost available cache size. * if the request size exceed the available size, it's should be allowed, * because maybe one entry size exceed the size and * the query must be finished, the available size will become invalid. * * @param size allocate size */ public void allocate(long size) { lock.lock(); try { availableCacheSize.add(-size); } finally { lock.unlock(); } }
3.68
hadoop_FederationStateStoreFacade_getActiveSubClusters
/** * Get active subclusters. * * @return We will return a list of active subclusters as a Collection. */ public Collection<SubClusterInfo> getActiveSubClusters() throws NotFoundException { try { Map<SubClusterId, SubClusterInfo> subClusterMap = getSubClusters(true); if (MapUtils.isEmpty(subClusterMap)) { throw new NotFoundException("Not Found SubClusters."); } return subClusterMap.values(); } catch (Exception e) { LOG.error("getActiveSubClusters failed.", e); return null; } }
3.68
hbase_MiniBatchOperationInProgress_setOperationStatus
/** * Sets the status code for the operation(Mutation) at the specified position. By setting this * status, {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} can make HRegion to skip * Mutations. */ public void setOperationStatus(int index, OperationStatus opStatus) { this.retCodeDetails[getAbsoluteIndex(index)] = opStatus; }
3.68
hadoop_ContainerServiceRecordProcessor_createPTRInfo
/** * Creates a container PTR record descriptor. * @param record the service record. * @throws Exception if the descriptor creation yields an issue. */ protected void createPTRInfo(ServiceRecord record) throws Exception { PTRContainerRecordDescriptor ptrInfo = new PTRContainerRecordDescriptor(getPath(), record); registerRecordDescriptor(Type.PTR, ptrInfo); }
3.68
hbase_MemoryBoundedLogMessageBuffer_estimateHeapUsage
/** * Estimate the number of bytes this buffer is currently using. */ synchronized long estimateHeapUsage() { return usage; }
3.68
flink_MurmurHashUtils_hashBytes
/** * Hash bytes in MemorySegment. * * @param segment segment. * @param offset offset for MemorySegment * @param lengthInBytes length in MemorySegment * @return hash code */ public static int hashBytes(MemorySegment segment, int offset, int lengthInBytes) { return hashBytes(segment, offset, lengthInBytes, DEFAULT_SEED); }
3.68
framework_VTabsheetBase_isDynamicWidth
/** * Returns whether the width of the widget is undefined. * * @since 7.2 * @return {@code true} if width of the widget is determined by its content, * {@code false} otherwise */ protected boolean isDynamicWidth() { return getConnectorForWidget(this).isUndefinedWidth(); }
3.68
hbase_HFileBlockDefaultEncodingContext_prepareEncoding
/** * prepare to start a new encoding. */ public void prepareEncoding(DataOutputStream out) throws IOException { if (encodingAlgo != null && encodingAlgo != DataBlockEncoding.NONE) { encodingAlgo.writeIdInBytes(out); } }
3.68
hbase_TableSplit_getRegionLocation
/** * Returns the region location. * @return The region's location. */ public String getRegionLocation() { return regionLocation; }
3.68
hbase_AggregateImplementation_getMin
/** * Gives the minimum for a given combination of column qualifier and column family, in the given * row range as defined in the Scan object. In its current implementation, it takes one column * family and one column qualifier (if provided). In case of null column qualifier, minimum value * for the entire column family will be returned. */ @Override public void getMin(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) { AggregateResponse response = null; InternalScanner scanner = null; T min = null; try { ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request); T temp; Scan scan = ProtobufUtil.toScan(request.getScan()); scanner = env.getRegion().getScanner(scan); List<Cell> results = new ArrayList<>(); byte[] colFamily = scan.getFamilies()[0]; NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily); byte[] qualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); } boolean hasMoreRows = false; do { hasMoreRows = scanner.next(results); int listSize = results.size(); for (int i = 0; i < listSize; i++) { temp = ci.getValue(colFamily, qualifier, results.get(i)); min = (min == null || (temp != null && ci.compare(temp, min) < 0)) ? temp : min; } results.clear(); } while (hasMoreRows); if (min != null) { response = AggregateResponse.newBuilder() .addFirstPart(ci.getProtoForCellType(min).toByteString()).build(); } } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); } finally { if (scanner != null) { IOUtils.closeQuietly(scanner); } } log.info("Minimum from this region is " + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + min); done.run(response); }
3.68
hbase_BalanceResponse_build
/** * Build the {@link BalanceResponse} */ public BalanceResponse build() { return new BalanceResponse(balancerRan, movesCalculated, movesExecuted); }
3.68
shardingsphere-elasticjob_SetUpFacade_tearDown
/** * Tear down. */ public void tearDown() { regCenter.removeConnStateListener("/" + this.jobName); regCenter.removeDataListeners("/" + this.jobName); if (reconcileService.isRunning()) { reconcileService.stopAsync(); } }
3.68
hadoop_CommonAuditContext_setGlobalContextEntry
/** * Set a global entry. * @param key key * @param value value */ public static void setGlobalContextEntry(String key, String value) { GLOBAL_CONTEXT_MAP.put(key, value); }
3.68
hadoop_FedBalance_build
/** * Build the balance job. */ public BalanceJob build() throws IOException { // Construct job context. FedBalanceContext context; Path dst = new Path(inputDst); if (dst.toUri().getAuthority() == null) { throw new IOException("The destination cluster must be specified."); } Path src = new Path(inputSrc); if (src.toUri().getAuthority() == null) { throw new IOException("The source cluster must be specified."); } context = new FedBalanceContext.Builder(src, dst, NO_MOUNT, getConf()) .setForceCloseOpenFiles(forceCloseOpen).setUseMountReadOnly(false) .setMapNum(map).setBandwidthLimit(bandwidth).setTrash(trashOpt) .setDiffThreshold(diffThreshold).build(); LOG.info(context.toString()); // Construct the balance job. BalanceJob.Builder<BalanceProcedure> builder = new BalanceJob.Builder<>(); DistCpProcedure dcp = new DistCpProcedure(DISTCP_PROCEDURE, null, delayDuration, context); builder.nextProcedure(dcp); TrashProcedure tp = new TrashProcedure(TRASH_PROCEDURE, null, delayDuration, context); builder.nextProcedure(tp); return builder.build(); }
3.68
hmily_DubboRpcXaProxy_getUrl
/** * Gets url. * * @return the url */ public String getUrl() { return invoker.getUrl().toString(); }
3.68
hadoop_OSSListResult_v2
/** * Restricted constructors to ensure v1 or v2, not both. * @param result v2 result * @return new list result container */ public static OSSListResult v2(ListObjectsV2Result result) { return new OSSListResult(null, result); }
3.68
flink_KvStateLocation_getJobVertexId
/** * Returns the JobVertexID the KvState instances belong to. * * @return JobVertexID the KvState instances belong to */ public JobVertexID getJobVertexId() { return jobVertexId; }
3.68
flink_HsSelectiveSpillingStrategy_onBufferConsumed
// For the case of buffer consumed, this buffer need release. The control of the buffer is taken // over by the downstream task. @Override public Optional<Decision> onBufferConsumed(BufferIndexAndChannel consumedBuffer) { return Optional.of(Decision.builder().addBufferToRelease(consumedBuffer).build()); }
3.68
hbase_HttpServer_start
/** * Start the server. Does not wait for the server to start. */ public void start() throws IOException { try { try { openListeners(); webServer.start(); } catch (IOException ex) { LOG.info("HttpServer.start() threw a non Bind IOException", ex); throw ex; } catch (MultiException ex) { LOG.info("HttpServer.start() threw a MultiException", ex); throw ex; } // Make sure there is no handler failures. Handler[] handlers = webServer.getHandlers(); for (int i = 0; i < handlers.length; i++) { if (handlers[i].isFailed()) { throw new IOException("Problem in starting http server. Server handlers failed"); } } // Make sure there are no errors initializing the context. Throwable unavailableException = webAppContext.getUnavailableException(); if (unavailableException != null) { // Have to stop the webserver, or else its non-daemon threads // will hang forever. webServer.stop(); throw new IOException("Unable to initialize WebAppContext", unavailableException); } } catch (IOException e) { throw e; } catch (InterruptedException e) { throw (IOException) new InterruptedIOException("Interrupted while starting HTTP server") .initCause(e); } catch (Exception e) { throw new IOException("Problem starting http server", e); } }
3.68
hadoop_AbfsPermission_isExtendedAcl
/** * Check whether abfs symbolic permission string is a extended Acl * @param abfsSymbolicPermission e.g. "rw-rw-rw-+" / "rw-rw-rw-" * @return true if the permission string indicates the existence of an * extended ACL; otherwise false. */ public static boolean isExtendedAcl(final String abfsSymbolicPermission) { if (abfsSymbolicPermission == null) { return false; } return abfsSymbolicPermission.charAt(abfsSymbolicPermission.length() - 1) == '+'; }
3.68
zxing_PDF417ResultMetadata_getFileName
/** * Filename of the encoded file * * @return filename */ public String getFileName() { return fileName; }
3.68
flink_LogicalTypeCasts_supportsExplicitCast
/** * Returns whether the source type can be casted to the target type. * * <p>Explicit casts correspond to the SQL cast specification and represent the logic behind a * {@code CAST(sourceType AS targetType)} operation. For example, it allows for converting most * types of the {@link LogicalTypeFamily#PREDEFINED} family to types of the {@link * LogicalTypeFamily#CHARACTER_STRING} family. */ public static boolean supportsExplicitCast(LogicalType sourceType, LogicalType targetType) { return supportsCasting(sourceType, targetType, true); }
3.68
hbase_TableQueue_requireTableExclusiveLock
/** * @param proc must not be null */ private static boolean requireTableExclusiveLock(TableProcedureInterface proc) { switch (proc.getTableOperationType()) { case CREATE: case DELETE: case DISABLE: case ENABLE: return true; case EDIT: // we allow concurrent edit on the ns family in meta table return !proc.getTableName().equals(TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME); case READ: case FLUSH: case SNAPSHOT: return false; // region operations are using the shared-lock on the table // and then they will grab an xlock on the region. case REGION_SPLIT: case REGION_MERGE: case REGION_ASSIGN: case REGION_UNASSIGN: case REGION_EDIT: case REGION_GC: case MERGED_REGIONS_GC: case REGION_SNAPSHOT: case REGION_TRUNCATE: return false; default: break; } throw new UnsupportedOperationException("unexpected type " + proc.getTableOperationType()); }
3.68
flink_Plan_accept
/** * Traverses the job depth first from all data sinks on towards the sources. * * @see Visitable#accept(Visitor) */ @Override public void accept(Visitor<Operator<?>> visitor) { for (GenericDataSinkBase<?> sink : this.sinks) { sink.accept(visitor); } }
3.68
framework_PropertysetItem_removeListener
/** * @deprecated As of 7.0, replaced by * {@link #removePropertySetChangeListener(Item.PropertySetChangeListener)} */ @Override @Deprecated public void removeListener(Item.PropertySetChangeListener listener) { removePropertySetChangeListener(listener); }
3.68
morf_OracleDialect_updateStatementPreTableDirectives
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#updateStatementPreTableDirectives(org.alfasoftware.morf.sql.UpdateStatement) */ @Override protected String updateStatementPreTableDirectives(UpdateStatement updateStatement) { if(updateStatement.getHints().isEmpty()) { return ""; } StringBuilder builder = new StringBuilder("/*+"); for (Hint hint : updateStatement.getHints()) { if (hint instanceof UseParallelDml) { builder.append(" ENABLE_PARALLEL_DML PARALLEL"); builder.append(((UseParallelDml) hint).getDegreeOfParallelism() .map(d -> "(" + d + ")") .orElse("")); } } return builder.append(" */ ").toString(); }
3.68
hbase_CompactionTool_getSplits
/** * Returns a split for each store files directory using the block location of each file as * locality reference. */ @Override public List<InputSplit> getSplits(JobContext job) throws IOException { List<InputSplit> splits = new ArrayList<>(); List<FileStatus> files = listStatus(job); Text key = new Text(); for (FileStatus file : files) { Path path = file.getPath(); FileSystem fs = path.getFileSystem(job.getConfiguration()); LineReader reader = new LineReader(fs.open(path)); long pos = 0; int n; try { while ((n = reader.readLine(key)) > 0) { String[] hosts = getStoreDirHosts(fs, path); splits.add(new FileSplit(path, pos, n, hosts)); pos += n; } } finally { reader.close(); } } return splits; }
3.68
hbase_CachedClusterId_attemptFetch
/** * Attempts to fetch the cluster ID from the file system. If no attempt is already in progress, * synchronously fetches the cluster ID and sets it. If an attempt is already in progress, returns * right away and the caller is expected to wait for the fetch to finish. * @return true if the attempt is done, false if another thread is already fetching it. */ private boolean attemptFetch() { if (fetchInProgress.compareAndSet(false, true)) { // A fetch is not in progress, so try fetching the cluster ID synchronously and then notify // the waiting threads. try { cacheMisses.incrementAndGet(); setClusterId(FSUtils.getClusterId(fs, rootDir)); } catch (IOException e) { LOG.warn("Error fetching cluster ID", e); } finally { Preconditions.checkState(fetchInProgress.compareAndSet(true, false)); synchronized (fetchInProgress) { fetchInProgress.notifyAll(); } } return true; } return false; }
3.68
pulsar_ConnectorUtils_getIOSinkClass
/** * Extract the Pulsar IO Sink class from a connector archive. */ public static String getIOSinkClass(NarClassLoader narClassLoader) throws IOException { ConnectorDefinition conf = getConnectorDefinition(narClassLoader); if (StringUtils.isEmpty(conf.getSinkClass())) { throw new IOException( String.format("The '%s' connector does not provide a sink implementation", conf.getName())); } try { // Try to load sink class and check it implements Sink interface Class sinkClass = narClassLoader.loadClass(conf.getSinkClass()); if (!(Sink.class.isAssignableFrom(sinkClass))) { throw new IOException( "Class " + conf.getSinkClass() + " does not implement interface " + Sink.class.getName()); } } catch (Throwable t) { Exceptions.rethrowIOException(t); } return conf.getSinkClass(); }
3.68
pulsar_SchemaDefinitionImpl_getPojo
/** * Get pojo schema definition. * * @return pojo class */ @Override public Class<T> getPojo() { return pojo; }
3.68
hbase_TableDescriptorBuilder_setColumnFamily
/** * Adds a column family. For the updating purpose please use * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead. * @param family to add. * @return the modifyable TD */ public ModifyableTableDescriptor setColumnFamily(final ColumnFamilyDescriptor family) { if (family.getName() == null || family.getName().length <= 0) { throw new IllegalArgumentException("Family name cannot be null or empty"); } int flength = family.getName() == null ? 0 : family.getName().length; if (flength > Byte.MAX_VALUE) { throw new IllegalArgumentException( "The length of family name is bigger than " + Byte.MAX_VALUE); } if (hasColumnFamily(family.getName())) { throw new IllegalArgumentException( "Family '" + family.getNameAsString() + "' already exists so cannot be added"); } return putColumnFamily(family); }
3.68
framework_RpcDataProviderExtension_dropActiveItem
/** * Marks given item id as dropped. Dropped items are cleared when adding * new active items. * * @param itemId * dropped item id */ public void dropActiveItem(Object itemId) { if (activeItemMap.containsKey(itemId)) { droppedItems.add(itemId); } }
3.68
framework_FieldGroup_getUnboundPropertyIds
/** * Returns a collection of all property ids that exist in the item set using * {@link #setItemDataSource(Item)} but have not been bound to fields. * <p> * Will always return an empty collection before an item has been set using * {@link #setItemDataSource(Item)}. * </p> * <p> * No guarantee is given for the order of the property ids * </p> * * @return A collection of property ids that have not been bound to fields */ public Collection<Object> getUnboundPropertyIds() { if (getItemDataSource() == null) { return new ArrayList<Object>(); } List<Object> unboundPropertyIds = new ArrayList<Object>(); unboundPropertyIds.addAll(getItemDataSource().getItemPropertyIds()); unboundPropertyIds.removeAll(propertyIdToField.keySet()); return unboundPropertyIds; }
3.68
flink_TaskExecutorLocalStateStoresManager_cleanupAllocationBaseDirs
/** Deletes the base dirs for this allocation id (recursively). */ private void cleanupAllocationBaseDirs(AllocationID allocationID) { // clear the base dirs for this allocation id. File[] allocationDirectories = allocationBaseDirectories(allocationID); for (File directory : allocationDirectories) { try { FileUtils.deleteFileOrDirectory(directory); } catch (IOException e) { LOG.warn( "Exception while deleting local state directory for allocation id {}.", allocationID, e); } } }
3.68
flink_NettyShuffleUtils_getNetworkBuffersPerInputChannel
/** * Calculates and returns the number of required exclusive network buffers per input channel. */ public static int getNetworkBuffersPerInputChannel( final int configuredNetworkBuffersPerChannel) { return configuredNetworkBuffersPerChannel; }
3.68
flink_InPlaceMutableHashTable_getOccupancy
/** * Gets the number of bytes currently occupied in this hash table. * * @return The number of bytes occupied. */ public long getOccupancy() { return numAllMemorySegments * segmentSize - freeMemorySegments.size() * segmentSize; }
3.68
hbase_RegionCoprocessorHost_preAppendAfterRowLock
/** * Supports Coprocessor 'bypass'. * @param append append object * @return result to return to client if default operation should be bypassed, null otherwise * @throws IOException if an error occurred on the coprocessor */ public Result preAppendAfterRowLock(final Append append) throws IOException { boolean bypassable = true; Result defaultResult = null; if (this.coprocEnvironments.isEmpty()) { return defaultResult; } return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, Result>( regionObserverGetter, defaultResult, bypassable) { @Override public Result call(RegionObserver observer) throws IOException { return observer.preAppendAfterRowLock(this, append); } }); }
3.68
framework_WindowElement_restore
/** * Clicks the restore button of the window. */ public void restore() { if (isMaximized()) { getRestoreButton().click(); } else { throw new IllegalStateException( "Window is not maximized, cannot be restored."); } }
3.68
hadoop_WasbFsck_recursiveCheckChildPathName
/** * Recursively check if a given path and its child paths have colons in their * names. It returns true if none of them has a colon or this path does not * exist, and false otherwise. */ private boolean recursiveCheckChildPathName(FileSystem fs, Path p) throws IOException { if (p == null) { return true; } FileStatus status; try { status = fs.getFileStatus(p); } catch (FileNotFoundException e) { System.out.println("Path " + p + " does not exist!"); return true; } if (status.isFile()) { if (containsColon(p)) { System.out.println("Warning: file " + p + " has a colon in its name."); return false; } else { return true; } } else { boolean flag; if (containsColon(p)) { System.out.println("Warning: directory " + p + " has a colon in its name."); flag = false; } else { flag = true; } FileStatus[] listed = fs.listStatus(p); for (FileStatus l : listed) { if (!recursiveCheckChildPathName(fs, l.getPath())) { flag = false; } } return flag; } }
3.68
hadoop_SampleQuantiles_getSampleCount
/** * Returns the number of samples kept by the estimator * * @return count current number of samples */ @VisibleForTesting synchronized public int getSampleCount() { return samples.size(); }
3.68
hbase_BalanceRequest_setDryRun
/** * Updates BalancerRequest to run the balancer in dryRun mode. In this mode, the balancer will * try to find a plan but WILL NOT execute any region moves or call any coprocessors. You can * run in dryRun mode regardless of whether the balancer switch is enabled or disabled, but * dryRun mode will not run over an existing request or chore. Dry run is useful for testing out * new balance configs. See the logs on the active HMaster for the results of the dry run. */ public Builder setDryRun(boolean dryRun) { this.dryRun = dryRun; return this; }
3.68
hbase_HStoreFile_isSkipResetSeqId
/** * Gets whether to skip resetting the sequence id for cells. * @param skipResetSeqId The byte array of boolean. * @return Whether to skip resetting the sequence id. */ private boolean isSkipResetSeqId(byte[] skipResetSeqId) { if (skipResetSeqId != null && skipResetSeqId.length == 1) { return Bytes.toBoolean(skipResetSeqId); } return false; }
3.68
framework_Notification_setCaption
/** * Sets the caption part of the notification message. * * @param caption * The message caption */ public void setCaption(String caption) { getState().caption = caption; }
3.68
framework_ComputedStyle_getIntProperty
/** * Retrieves the given computed property as an integer. * * Returns 0 if the property cannot be converted to an integer * * @param name * the property to retrieve * @return the integer value of the property or 0 */ public final int getIntProperty(String name) { Profiler.enter("ComputedStyle.getIntProperty"); String value = getProperty(name); int result = parseIntNative(value); Profiler.leave("ComputedStyle.getIntProperty"); return result; }
3.68
flink_AbstractBinaryExternalMerger_mergeChannels
/** * Merges the sorted runs described by the given Channel IDs into a single sorted run. * * @param channelIDs The IDs of the runs' channels. * @return The ID and number of blocks of the channel that describes the merged run. */ private ChannelWithMeta mergeChannels(List<ChannelWithMeta> channelIDs) throws IOException { // the list with the target iterators List<FileIOChannel> openChannels = new ArrayList<>(channelIDs.size()); final BinaryMergeIterator<Entry> mergeIterator = getMergingIterator(channelIDs, openChannels); // create a new channel writer final FileIOChannel.ID mergedChannelID = ioManager.createChannel(); channelManager.addChannel(mergedChannelID); AbstractChannelWriterOutputView output = null; int numBytesInLastBlock; int numBlocksWritten; try { output = FileChannelUtil.createOutputView( ioManager, mergedChannelID, compressionEnabled, compressionCodecFactory, compressionBlockSize, pageSize); writeMergingOutput(mergeIterator, output); numBytesInLastBlock = output.close(); numBlocksWritten = output.getBlockCount(); } catch (IOException e) { if (output != null) { output.close(); output.getChannel().deleteChannel(); } throw e; } // remove, close and delete channels for (FileIOChannel channel : openChannels) { channelManager.removeChannel(channel.getChannelID()); try { channel.closeAndDelete(); } catch (Throwable ignored) { } } return new ChannelWithMeta(mergedChannelID, numBlocksWritten, numBytesInLastBlock); }
3.68
flink_CompositeTypeSerializerSnapshot_isOuterSnapshotCompatible
/** * Checks whether the outer snapshot is compatible with a given new serializer. * * <p>The base implementation of this method just returns {@code true}, i.e. it assumes that the * outer serializer only has nested serializers and no extra information, and therefore the * result of the check must always be true. Otherwise, if the outer serializer contains some * extra information that has been persisted as part of the serializer snapshot, this must be * overridden. Note that this method and the corresponding methods {@link * #writeOuterSnapshot(DataOutputView)}, {@link #readOuterSnapshot(int, DataInputView, * ClassLoader)} needs to be implemented. * * @param newSerializer the new serializer, which contains the new outer information to check * against. * @return a flag indicating whether or not the new serializer's outer information is compatible * with the one written in this snapshot. * @deprecated this method is deprecated, and will be removed in the future. Please implement * {@link #resolveOuterSchemaCompatibility(TypeSerializer)} instead. */ @Deprecated protected boolean isOuterSnapshotCompatible(S newSerializer) { return true; }
3.68
framework_SQLContainer_addListener
/** * @deprecated As of 7.0, replaced by * {@link #addRowIdChangeListener(RowIdChangeListener)} */ @Deprecated public void addListener(RowIdChangeListener listener) { addRowIdChangeListener(listener); }
3.68
hbase_AsyncConnectionImpl_getChoreService
/** * If choreService has not been created yet, create the ChoreService. */ synchronized ChoreService getChoreService() { if (isClosed()) { throw new IllegalStateException("connection is already closed"); } if (choreService == null) { choreService = new ChoreService("AsyncConn Chore Service"); } return choreService; }
3.68
flink_CommonExecSink_applyKeyBy
/** * Apply a primary key partition transformation to guarantee the strict ordering of changelog * messages. */ private Transformation<RowData> applyKeyBy( ExecNodeConfig config, ClassLoader classLoader, Transformation<RowData> inputTransform, int[] primaryKeys, int sinkParallelism, int inputParallelism, boolean needMaterialize) { final ExecutionConfigOptions.SinkKeyedShuffle sinkShuffleByPk = config.get(ExecutionConfigOptions.TABLE_EXEC_SINK_KEYED_SHUFFLE); boolean sinkKeyBy = false; switch (sinkShuffleByPk) { case NONE: break; case AUTO: // should cover both insert-only and changelog input sinkKeyBy = sinkParallelism != inputParallelism && sinkParallelism != 1; break; case FORCE: // sink single parallelism has no problem (because none partitioner will cause worse // disorder) sinkKeyBy = sinkParallelism != 1; break; } if (!sinkKeyBy && !needMaterialize) { return inputTransform; } final RowDataKeySelector selector = KeySelectorUtil.getRowDataSelector(classLoader, primaryKeys, getInputTypeInfo()); final KeyGroupStreamPartitioner<RowData, RowData> partitioner = new KeyGroupStreamPartitioner<>( selector, KeyGroupRangeAssignment.DEFAULT_LOWER_BOUND_MAX_PARALLELISM); Transformation<RowData> partitionedTransform = new PartitionTransformation<>(inputTransform, partitioner); createTransformationMeta(PARTITIONER_TRANSFORMATION, "Partitioner", "Partitioner", config) .fill(partitionedTransform); partitionedTransform.setParallelism(sinkParallelism, sinkParallelismConfigured); return partitionedTransform; }
3.68
hbase_MobUtils_getTableName
/** * Get the table name from when this cell was written into a mob hfile as a TableName. * @param cell to extract tag from * @return name of table as a TableName. empty if the tag is not found. */ public static Optional<TableName> getTableName(Cell cell) { Optional<Tag> maybe = getTableNameTag(cell); Optional<TableName> name = Optional.empty(); if (maybe.isPresent()) { final Tag tag = maybe.get(); if (tag.hasArray()) { name = Optional .of(TableName.valueOf(tag.getValueArray(), tag.getValueOffset(), tag.getValueLength())); } else { // TODO ByteBuffer handling in tags looks busted. revisit. ByteBuffer buffer = tag.getValueByteBuffer().duplicate(); buffer.mark(); buffer.position(tag.getValueOffset()); buffer.limit(tag.getValueOffset() + tag.getValueLength()); name = Optional.of(TableName.valueOf(buffer)); } } return name; }
3.68
hudi_BaseHoodieLogRecordReader_reconcileSpuriousBlocksAndGetValidOnes
/** * There could be spurious log blocks due to spark task retries. So, we will use BLOCK_SEQUENCE_NUMBER in the log block header to deduce such spurious log blocks and return * a deduped set of log blocks. * * @param allValidLogBlocks all valid log blocks parsed so far. * @param blockSequenceMapPerCommit map containing block sequence numbers for every commit. * @return a Pair of boolean and list of deduped valid block blocks, where boolean of true means, there have been dups detected. */ private Pair<Boolean, List<HoodieLogBlock>> reconcileSpuriousBlocksAndGetValidOnes(List<HoodieLogBlock> allValidLogBlocks, Map<String, Map<Long, List<Pair<Integer, HoodieLogBlock>>>> blockSequenceMapPerCommit) { boolean dupsFound = blockSequenceMapPerCommit.values().stream().anyMatch(perCommitBlockList -> perCommitBlockList.size() > 1); if (dupsFound) { // duplicates are found. we need to remove duplicate log blocks. for (Map.Entry<String, Map<Long, List<Pair<Integer, HoodieLogBlock>>>> entry : blockSequenceMapPerCommit.entrySet()) { Map<Long, List<Pair<Integer, HoodieLogBlock>>> perCommitBlockSequences = entry.getValue(); if (perCommitBlockSequences.size() > 1) { // only those that have more than 1 sequence needs deduping. int maxSequenceCount = -1; int maxAttemptNo = -1; int totalSequences = perCommitBlockSequences.size(); int counter = 0; for (Map.Entry<Long, List<Pair<Integer, HoodieLogBlock>>> perAttemptEntries : perCommitBlockSequences.entrySet()) { Long attemptNo = perAttemptEntries.getKey(); int size = perAttemptEntries.getValue().size(); if (maxSequenceCount < size) { maxSequenceCount = size; maxAttemptNo = Math.toIntExact(attemptNo); } counter++; } // for other sequence (!= maxSequenceIndex), we need to remove the corresponding logBlocks from allValidLogBlocks for (Map.Entry<Long, List<Pair<Integer, HoodieLogBlock>>> perAttemptEntries : perCommitBlockSequences.entrySet()) { Long attemptNo = perAttemptEntries.getKey(); if (maxAttemptNo != attemptNo) { List<HoodieLogBlock> logBlocksToRemove = perCommitBlockSequences.get(attemptNo).stream().map(pair -> pair.getValue()).collect(Collectors.toList()); logBlocksToRemove.forEach(logBlockToRemove -> allValidLogBlocks.remove(logBlocksToRemove)); } } } } return Pair.of(true, allValidLogBlocks); } else { return Pair.of(false, allValidLogBlocks); } }
3.68
pulsar_BrokerMonitor_process
/** * Print the local and historical broker data in a tabular format, and put this back as a watcher. * * @param event The watched event. */ public synchronized void process(final WatchedEvent event) { try { if (event.getType() == Event.EventType.NodeDataChanged) { printData(event.getPath()); } } catch (Exception ex) { throw new RuntimeException(ex); } }
3.68
streampipes_AssetLinkBuilder_withQueryHint
/** * Sets the query hint for the AssetLink being built. * * @param queryHint The query hint to set. * @return The AssetLinkBuilder instance for method chaining. */ public AssetLinkBuilder withQueryHint(String queryHint) { this.assetLink.setQueryHint(queryHint); return this; }
3.68
flink_StateTable_removeAndGetOld
/** * Removes the mapping for the composite of active key and given namespace, returning the state * that was found under the entry. * * @param namespace the namespace of the mapping to remove. Not null. * @return the state of the removed mapping or {@code null} if no mapping for the specified key * was found. */ public S removeAndGetOld(N namespace) { return removeAndGetOld( keyContext.getCurrentKey(), keyContext.getCurrentKeyGroupIndex(), namespace); }
3.68
hadoop_ServiceLauncher_instantiateService
/** * @return Instantiate the service defined in {@code serviceClassName}. * * Sets the {@code configuration} field * to the the value of {@code conf}, * and the {@code service} field to the service created. * * @param conf configuration to use */ @SuppressWarnings("unchecked") public Service instantiateService(Configuration conf) { Preconditions.checkArgument(conf != null, "null conf"); Preconditions.checkArgument(serviceClassName != null, "null service classname"); Preconditions.checkArgument(!serviceClassName.isEmpty(), "undefined service classname"); configuration = conf; // Instantiate the class. this requires the service to have a public // zero-argument or string-argument constructor Object instance; try { Class<?> serviceClass = getClassLoader().loadClass(serviceClassName); try { instance = serviceClass.getConstructor().newInstance(); } catch (NoSuchMethodException noEmptyConstructor) { // no simple constructor, fall back to a string LOG.debug("No empty constructor {}", noEmptyConstructor, noEmptyConstructor); instance = serviceClass.getConstructor(String.class) .newInstance(serviceClassName); } } catch (Exception e) { throw serviceCreationFailure(e); } if (!(instance instanceof Service)) { //not a service throw new ServiceLaunchException( LauncherExitCodes.EXIT_SERVICE_CREATION_FAILURE, "Not a service class: \"%s\"", serviceClassName); } // cast to the specific instance type of this ServiceLauncher service = (S) instance; return service; }
3.68
flink_FsCheckpointStreamFactory_close
/** * If the stream is only closed, we remove the produced file (cleanup through the auto close * feature, for example). This method throws no exception if the deletion fails, but only * logs the error. */ @Override public void close() { if (!closed) { closed = true; // make sure write requests need to go to 'flushToFile()' where they recognized // that the stream is closed pos = writeBuffer.length; if (outStream != null) { try { outStream.close(); } catch (Throwable throwable) { LOG.warn("Could not close the state stream for {}.", statePath, throwable); } finally { try { fs.delete(statePath, false); } catch (Exception e) { LOG.warn( "Cannot delete closed and discarded state stream for {}.", statePath, e); } } } } }
3.68
AreaShop_Utils_getImportantRentRegions
/** * Get the most important rental AreaShop regions. * - Returns highest priority, child instead of parent regions. * @param location The location to check for regions * @return empty list if no regions found, 1 member if 1 region is a priority, more if regions with the same priority */ public static List<RentRegion> getImportantRentRegions(Location location) { List<RentRegion> result = new ArrayList<>(); for(GeneralRegion region : getImportantRegions(location, GeneralRegion.RegionType.RENT)) { result.add((RentRegion)region); } return result; }
3.68
framework_BootstrapHandler_getAppId
/** * Gets the application id. * * The application id is defined by * {@link VaadinService#getMainDivId(VaadinSession, VaadinRequest, Class)} * * @return the application id */ public String getAppId() { if (appId == null) { appId = getRequest().getService().getMainDivId(getSession(), getRequest(), getUIClass()); } return appId; }
3.68
flink_BlobClient_getInternal
/** * Downloads the BLOB identified by the given BLOB key from the BLOB server. * * @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated) * @param blobKey blob key associated with the requested file * @return an input stream to read the retrieved data from * @throws FileNotFoundException if there is no such file; * @throws IOException if an I/O error occurs during the download */ InputStream getInternal(@Nullable JobID jobId, BlobKey blobKey) throws IOException { if (this.socket.isClosed()) { throw new IllegalStateException( "BLOB Client is not connected. " + "Client has been shut down or encountered an error before."); } if (LOG.isDebugEnabled()) { LOG.debug("GET BLOB {}/{} from {}.", jobId, blobKey, socket.getLocalSocketAddress()); } try { OutputStream os = this.socket.getOutputStream(); InputStream is = this.socket.getInputStream(); // Send GET header sendGetHeader(os, jobId, blobKey); receiveAndCheckGetResponse(is); return new BlobInputStream(is, blobKey, os); } catch (Throwable t) { BlobUtils.closeSilently(socket, LOG); throw new IOException("GET operation failed: " + t.getMessage(), t); } }
3.68
hmily_YamlProcessor_getFlattenedMap
/** * Gets flattened map. * * @param source the source * @return the flattened map */ protected final Map<String, Object> getFlattenedMap(final Map<String, Object> source) { Map<String, Object> result = new LinkedHashMap<>(); buildFlattenedMap(result, source, null); return result; }
3.68
flink_OptimizableHashSet_addNull
/** Add a null key. */ public void addNull() { this.containsNull = true; }
3.68
rocketmq-connect_LocalStateManagementServiceImpl_initialize
/** * initialize cb config * * @param config */ @Override public void initialize(WorkerConfig config, RecordConverter converter) { super.initialize(config, converter); /**connector status store*/ this.connectorStatusStore = new FileBaseKeyValueStore<>( FilePathConfigUtil.getConnectorStatusConfigPath(config.getStorePathRootDir()), new Serdes.StringSerde(), new JsonSerde(ConnectorStatus.class)); /**task status store*/ this.taskStatusStore = new FileBaseKeyValueStore<>( FilePathConfigUtil.getTaskStatusConfigPath(config.getStorePathRootDir()), new Serdes.StringSerde(), new ListSerde(TaskStatus.class)); }
3.68
hadoop_JobTokenSecretManager_createIdentifier
/** * Create an empty job token identifier * @return a newly created empty job token identifier */ @Override public JobTokenIdentifier createIdentifier() { return new JobTokenIdentifier(); }
3.68
framework_CalendarComponentEvents_getNewEndTime
/** * @deprecated Use {@link #getNewEnd()} instead * * @return the new end time */ @Deprecated public Date getNewEndTime() { return endTime; }
3.68
flink_Plan_setJobName
/** * Sets the jobName for this Plan. * * @param jobName The jobName to set. */ public void setJobName(String jobName) { checkNotNull(jobName, "The job name must not be null."); this.jobName = jobName; }
3.68
hibernate-validator_AnnotationDescriptor_hashCode
/** * Calculates the hash code of this annotation descriptor as described in * {@link Annotation#hashCode()}. * * @return The hash code of this descriptor. * * @see Annotation#hashCode() */ @Override public int hashCode() { return hashCode; }
3.68
flink_Created_startScheduling
/** Starts the scheduling by going into the {@link WaitingForResources} state. */ void startScheduling() { context.goToWaitingForResources(null); }
3.68
hadoop_AuditReplayThread_drainCounters
/** * Merge all of this thread's counter values into the counters contained * within the passed context. * * @param context The context holding the counters to increment. */ void drainCounters(Mapper.Context context) { for (Map.Entry<REPLAYCOUNTERS, Counter> ent : replayCountersMap .entrySet()) { context.getCounter(ent.getKey()).increment(ent.getValue().getValue()); } for (Map.Entry<String, Counter> ent : individualCommandsMap.entrySet()) { context.getCounter(INDIVIDUAL_COMMANDS_COUNTER_GROUP, ent.getKey()) .increment(ent.getValue().getValue()); } }
3.68
hadoop_PoolAlignmentContext_receiveRequestState
/** * Client side implementation only provides state alignment info in requests. * Client does not receive RPC requests therefore this does nothing. */ @Override public long receiveRequestState(RpcHeaderProtos.RpcRequestHeaderProto header, long threshold) throws IOException { // Do nothing. return 0; }
3.68