name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_AbstractFSWAL_finishSync
// try advancing the highestSyncedTxid as much as possible private int finishSync() { if (unackedAppends.isEmpty()) { // All outstanding appends have been acked. if (toWriteAppends.isEmpty()) { // Also no appends that wait to be written out, then just finished all pending syncs. long maxSyncTxid = highestSyncedTxid.get(); for (SyncFuture sync : syncFutures) { maxSyncTxid = Math.max(maxSyncTxid, sync.getTxid()); markFutureDoneAndOffer(sync, maxSyncTxid, null); } highestSyncedTxid.set(maxSyncTxid); int finished = syncFutures.size(); syncFutures.clear(); return finished; } else { // There is no append between highestProcessedAppendTxid and lowestUnprocessedAppendTxid, so // if highestSyncedTxid >= highestProcessedAppendTxid, then all syncs whose txid are between // highestProcessedAppendTxid and lowestUnprocessedAppendTxid can be finished. long lowestUnprocessedAppendTxid = toWriteAppends.peek().getTxid(); assert lowestUnprocessedAppendTxid > highestProcessedAppendTxid; long doneTxid = lowestUnprocessedAppendTxid - 1; highestSyncedTxid.set(doneTxid); return finishSyncLowerThanTxid(doneTxid); } } else { // There are still unacked appends. So let's move the highestSyncedTxid to the txid of the // first unacked append minus 1. long lowestUnackedAppendTxid = unackedAppends.peek().getTxid(); long doneTxid = Math.max(lowestUnackedAppendTxid - 1, highestSyncedTxid.get()); highestSyncedTxid.set(doneTxid); return finishSyncLowerThanTxid(doneTxid); } }
3.68
flink_Task_isCanceledOrFailed
/** * Checks whether the task has failed, is canceled, or is being canceled at the moment. * * @return True is the task in state FAILED, CANCELING, or CANCELED, false otherwise. */ public boolean isCanceledOrFailed() { return executionState == ExecutionState.CANCELING || executionState == ExecutionState.CANCELED || executionState == ExecutionState.FAILED; }
3.68
framework_VDragAndDropManager_getCurrentDragApplicationConnection
/** * Returns the application connection for the current drag source. If there * is no current drag source, returns {@code null} instead. * * @return the application connection, or {@code null} if not found */ protected ApplicationConnection getCurrentDragApplicationConnection() { if (currentDrag == null) { return null; } final ComponentConnector dragSource = currentDrag.getTransferable() .getDragSource(); if (dragSource == null) { return null; } return dragSource.getConnection(); }
3.68
hadoop_WordList_isUpdated
/** * Returns 'true' if the list is updated since creation (and reload). */ @Override public boolean isUpdated() { return isUpdated; }
3.68
hadoop_SingleFilePerBlockCache_addToHeadOfLinkedList
/** * Add the given entry to the head of the linked list. * * @param entry Block entry to add. */ private void addToHeadOfLinkedList(Entry entry) { if (head == null) { head = entry; tail = entry; } LOG.debug( "Block num {} to be added to the head. Current head block num: {} and tail block num: {}", entry.blockNumber, head.blockNumber, tail.blockNumber); if (entry != head) { Entry prev = entry.getPrevious(); Entry nxt = entry.getNext(); // no-op if the block is already evicted if (!blocks.containsKey(entry.blockNumber)) { return; } if (prev != null) { prev.setNext(nxt); } if (nxt != null) { nxt.setPrevious(prev); } entry.setPrevious(null); entry.setNext(head); head.setPrevious(entry); head = entry; if (prev != null && prev.getNext() == null) { tail = prev; } } }
3.68
flink_HiveParserUtils_canHandleQbForCbo
// Overrides CalcitePlanner::canHandleQbForCbo to support SORT BY, CLUSTER BY, etc. public static String canHandleQbForCbo(QueryProperties queryProperties) { if (!queryProperties.hasPTF()) { return null; } String msg = ""; if (queryProperties.hasPTF()) { msg += "has PTF; "; } return msg; }
3.68
pulsar_ClientConfiguration_isUseTcpNoDelay
/** * @return whether TCP no-delay should be set on the connections */ public boolean isUseTcpNoDelay() { return confData.isUseTcpNoDelay(); }
3.68
flink_VarBinaryType_ofEmptyLiteral
/** * The SQL standard defines that character string literals are allowed to be zero-length strings * (i.e., to contain no characters) even though it is not permitted to declare a type that is * zero. For consistent behavior, the same logic applies to binary strings. This has also * implications on variable-length binary strings during type inference because any fixed-length * binary string should be convertible to a variable-length one. * * <p>This method enables this special kind of binary string. * * <p>Zero-length binary strings have no serializable string representation. */ public static VarBinaryType ofEmptyLiteral() { return new VarBinaryType(EMPTY_LITERAL_LENGTH, false); }
3.68
flink_JoinOperator_projectTuple4
/** * Projects a pair of joined elements to a {@link Tuple} with the previously selected * fields. Requires the classes of the fields of the resulting tuples. * * @return The projected data set. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3> ProjectJoin<I1, I2, Tuple4<T0, T1, T2, T3>> projectTuple4() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo<Tuple4<T0, T1, T2, T3>> tType = new TupleTypeInfo<Tuple4<T0, T1, T2, T3>>(fTypes); return new ProjectJoin<I1, I2, Tuple4<T0, T1, T2, T3>>( this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this); }
3.68
hadoop_ArrayFile_key
/** * Returns the key associated with the most recent call to {@link * #seek(long)}, {@link #next(Writable)}, or {@link * #get(long,Writable)}. * * @return key key. * @throws IOException raised on errors performing I/O. */ public synchronized long key() throws IOException { return key.get(); }
3.68
flink_SortPartitionOperator_sortPartition
/** * Appends an additional sort order with the specified field in the specified order to the local * partition sorting of the DataSet. * * @param field The field expression referring to the field of the additional sort order of the * local partition sorting. * @param order The order of the additional sort order of the local partition sorting. * @return The DataSet with sorted local partitions. */ public SortPartitionOperator<T> sortPartition(String field, Order order) { if (useKeySelector) { throw new InvalidProgramException( "Expression keys cannot be appended after a KeySelector"); } ensureSortableKey(field); keys.add(new Keys.ExpressionKeys<>(field, getType())); orders.add(order); return this; }
3.68
hmily_HmilyRepositoryNode_getHmilyParticipantRealPath
/** * Get hmily participant real path. * * @param participantId participant id * @return hmily participant real path */ public String getHmilyParticipantRealPath(final Long participantId) { return Joiner.on("/").join(getHmilyParticipantRootPath(), participantId); }
3.68
hbase_MasterObserver_postRemoveRSGroup
/** * Called after a region server group is removed * @param ctx the environment to interact with the framework and master * @param name group name */ default void postRemoveRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx, String name) throws IOException { }
3.68
rocketmq-connect_Worker_allocatedTasks
/** * get connectors * * @return */ public Map<String, List<ConnectKeyValue>> allocatedTasks() { return latestTaskConfigs; }
3.68
flink_DeltaIterationBase_setInitialSolutionSet
/** * Sets the given input as the initial solution set. * * @param input The contract to set the initial solution set. */ public void setInitialSolutionSet(Operator input) { setFirstInput(input); }
3.68
morf_SelectStatement_deepCopy
/** * @see org.alfasoftware.morf.util.DeepCopyableWithTransformation#deepCopy(org.alfasoftware.morf.util.DeepCopyTransformation) */ @Override public SelectStatementBuilder deepCopy(DeepCopyTransformation transformer) { return new SelectStatementBuilder(this, transformer); }
3.68
hbase_WAL_skipRemoteWAL
/** * Tell the WAL that when creating new writer you can skip creating the remote writer. * <p> * Used by sync replication for switching states from ACTIVE, where the remote cluster is broken. */ default void skipRemoteWAL(boolean markerEditOnly) { }
3.68
hbase_MasterRpcServices_bypassProcedure
/** * Bypass specified procedure to completion. Procedure is marked completed but no actual work is * done from the current state/ step onwards. Parents of the procedure are also marked for bypass. * NOTE: this is a dangerous operation and may be used to unstuck buggy procedures. This may leave * system in incoherent state. This may need to be followed by some cleanup steps/ actions by * operator. * @return BypassProcedureToCompletionResponse indicating success or failure */ @Override public MasterProtos.BypassProcedureResponse bypassProcedure(RpcController controller, MasterProtos.BypassProcedureRequest request) throws ServiceException { try { LOG.info("{} bypass procedures={}, waitTime={}, override={}, recursive={}", server.getClientIdAuditPrefix(), request.getProcIdList(), request.getWaitTime(), request.getOverride(), request.getRecursive()); List<Boolean> ret = server.getMasterProcedureExecutor().bypassProcedure(request.getProcIdList(), request.getWaitTime(), request.getOverride(), request.getRecursive()); return MasterProtos.BypassProcedureResponse.newBuilder().addAllBypassed(ret).build(); } catch (IOException e) { throw new ServiceException(e); } }
3.68
hbase_HFileArchiveManager_enable
/** * Perform a best effort enable of hfile retention, which relies on zookeeper communicating the * change back to the hfile cleaner. * <p> * No attempt is made to make sure that backups are successfully created - it is inherently an * <b>asynchronous operation</b>. * @param zooKeeper watcher connection to zk cluster * @param table table name on which to enable archiving * @throws KeeperException if a ZooKeeper operation fails */ private void enable(ZKWatcher zooKeeper, byte[] table) throws KeeperException { LOG.debug("Ensuring archiving znode exists"); ZKUtil.createAndFailSilent(zooKeeper, archiveZnode); // then add the table to the list of znodes to archive String tableNode = this.getTableNode(table); LOG.debug("Creating: " + tableNode + ", data: []"); ZKUtil.createSetData(zooKeeper, tableNode, new byte[0]); }
3.68
pulsar_PulsarAdminImpl_getClientConfigData
/** * @return the client Configuration Data that is being used */ public ClientConfigurationData getClientConfigData() { return clientConfigData; }
3.68
hibernate-validator_Configuration_getMethodConstraintsSupportedOption
/** * Retrieves the value for the "methodConstraintsSupported" property from the options. */ private boolean getMethodConstraintsSupportedOption(Map<String, String> options) { String methodConstraintsSupported = options.get( METHOD_CONSTRAINTS_SUPPORTED_PROCESSOR_OPTION ); //allow method constraints by default if ( methodConstraintsSupported == null ) { return true; } return Boolean.parseBoolean( methodConstraintsSupported ); }
3.68
flink_BoundedBlockingSubpartition_createWithFileChannel
/** * Creates a BoundedBlockingSubpartition that simply stores the partition data in a file. Data * is eagerly spilled (written to disk) and readers directly read from the file. */ public static BoundedBlockingSubpartition createWithFileChannel( int index, ResultPartition parent, File tempFile, int readBufferSize, boolean sslEnabled) throws IOException { final FileChannelBoundedData bd = FileChannelBoundedData.create(tempFile.toPath(), readBufferSize); return new BoundedBlockingSubpartition(index, parent, bd, !sslEnabled); }
3.68
rocketmq-connect_AvroData_toConnectSchema
/** * Convert to connect schema * * @param schema * @param forceOptional * @param fieldDefaultVal * @param docDefaultVal * @param toConnectContext * @return */ private Schema toConnectSchema(org.apache.avro.Schema schema, boolean forceOptional, Object fieldDefaultVal, String docDefaultVal, ToConnectContext toConnectContext) { return toConnectSchema( schema, forceOptional, fieldDefaultVal, docDefaultVal, null, toConnectContext); }
3.68
flink_PartitionableTableSink_configurePartitionGrouping
/** * If returns true, sink can trust all records will definitely be grouped by partition fields * before consumed by the {@link TableSink}, i.e. the sink will receive all elements of one * partition and then all elements of another partition, elements of different partitions will * not be mixed. For some sinks, this can be used to reduce number of the partition writers to * improve writing performance. * * <p>This method is used to configure the behavior of input whether to be grouped by partition, * if true, at the same time the sink should also configure itself, i.e. set an internal field * that changes the writing behavior (writing one partition at a time). * * @param supportsGrouping whether the execution mode supports grouping, e.g. grouping (usually * use sort to implement) is only supported in batch mode, not supported in streaming mode. * @return whether data need to be grouped by partition before consumed by the sink. Default is * false. If {@code supportsGrouping} is false, it should never return true (requires * grouping), otherwise it will fail. */ default boolean configurePartitionGrouping(boolean supportsGrouping) { return false; }
3.68
hadoop_ClientThrottlingIntercept_eventOccurred
/** * Called after the Azure Storage SDK receives a response. Client-side * throttling uses this * to collect metrics. * * @param event The connection, operation, and request state. */ @Override public void eventOccurred(ResponseReceivedEvent event) { singleton.responseReceived(event); }
3.68
hbase_MasterObserver_postGetReplicationPeerConfig
/** * Called after get the configured ReplicationPeerConfig for the specified peer * @param peerId a short name that identifies the peer */ default void postGetReplicationPeerConfig(final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId) throws IOException { }
3.68
hadoop_ConfiguredNodeLabels_getLabelsByQueue
/** * Returns a set of configured node labels for a queue. If no labels are set * for a queue, it defaults to a one element immutable collection containing * empty label. * @param queuePath path of the queue * @return configured node labels or an immutable set containing the empty * label */ public Set<String> getLabelsByQueue(String queuePath) { Set<String> labels = configuredNodeLabelsByQueue.get(queuePath); if (labels == null) { return NO_LABEL; } return ImmutableSet.copyOf(labels); }
3.68
hbase_MetaTableAccessor_scanByRegionEncodedName
/** * Scans META table for a row whose key contains the specified <B>regionEncodedName</B>, returning * a single related <code>Result</code> instance if any row is found, null otherwise. * @param connection the connection to query META table. * @param regionEncodedName the region encoded name to look for at META. * @return <code>Result</code> instance with the row related info in META, null otherwise. * @throws IOException if any errors occur while querying META. */ public static Result scanByRegionEncodedName(Connection connection, String regionEncodedName) throws IOException { RowFilter rowFilter = new RowFilter(CompareOperator.EQUAL, new SubstringComparator(regionEncodedName)); Scan scan = getMetaScan(connection.getConfiguration(), 1); scan.setFilter(rowFilter); try (Table table = getMetaHTable(connection); ResultScanner resultScanner = table.getScanner(scan)) { return resultScanner.next(); } }
3.68
hadoop_ServiceRecord_get
/** * Get the "other" attribute with a specific key. * @param key key to look up * @param defVal default value * @return the value as a string, * or <code>defval</code> if the value was not present */ public String get(String key, String defVal) { String val = attributes.get(key); return val != null ? val: defVal; }
3.68
hbase_ColumnFamilyDescriptorBuilder_setPrefetchBlocksOnOpen
/** * Set the setPrefetchBlocksOnOpen flag * @param value true if we should prefetch blocks into the blockcache on open * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setPrefetchBlocksOnOpen(boolean value) { return setValue(PREFETCH_BLOCKS_ON_OPEN_BYTES, Boolean.toString(value)); }
3.68
hadoop_CommitUtilsWithMR_getMagicJobPath
/** * Compute the "magic" path for a job. * @param jobUUID unique Job ID. * @param dest the final output directory * @return the path to store job attempt data. */ public static Path getMagicJobPath(String jobUUID, Path dest) { return getMagicJobAttemptsPath(dest, jobUUID); }
3.68
hbase_QuotaObserverChore_updateTableQuota
/** * Updates the hbase:quota table with the new quota policy for this <code>table</code> if * necessary. * @param table The table being checked * @param currentSnapshot The state of the quota on this table from the previous invocation. * @param targetSnapshot The state the quota should be in for this table. */ void updateTableQuota(TableName table, SpaceQuotaSnapshot currentSnapshot, SpaceQuotaSnapshot targetSnapshot) throws IOException { final SpaceQuotaStatus currentStatus = currentSnapshot.getQuotaStatus(); final SpaceQuotaStatus targetStatus = targetSnapshot.getQuotaStatus(); // If we're changing something, log it. if (!currentSnapshot.equals(targetSnapshot)) { this.snapshotNotifier.transitionTable(table, targetSnapshot); // Update it in memory tableSnapshotStore.setCurrentState(table, targetSnapshot); // If the target is none, we're moving out of violation. Update the hbase:quota table SpaceViolationPolicy currPolicy = currentStatus.getPolicy().orElse(null); SpaceViolationPolicy targetPolicy = targetStatus.getPolicy().orElse(null); if (!targetStatus.isInViolation()) { // In case of Disable SVP, we need to enable the table as it moves out of violation if (isDisableSpaceViolationPolicy(currPolicy, targetPolicy)) { QuotaUtil.enableTableIfNotEnabled(conn, table); } if (LOG.isDebugEnabled()) { LOG.debug(table + " moved into observance of table space quota."); } } else { // We're either moving into violation or changing violation policies if (currPolicy != targetPolicy && SpaceViolationPolicy.DISABLE == currPolicy) { // In case of policy switch, we need to enable the table if current policy is Disable SVP QuotaUtil.enableTableIfNotEnabled(conn, table); } else if (SpaceViolationPolicy.DISABLE == targetPolicy) { // In case of Disable SVP, we need to disable the table as it moves into violation QuotaUtil.disableTableIfNotDisabled(conn, table); } if (LOG.isDebugEnabled()) { LOG.debug( table + " moved into violation of table space quota with policy of " + targetPolicy); } } } else if (LOG.isTraceEnabled()) { // Policies are the same, so we have nothing to do except log this. Don't need to re-update // the quota table if (!currentStatus.isInViolation()) { LOG.trace(table + " remains in observance of quota."); } else { LOG.trace(table + " remains in violation of quota."); } } }
3.68
flink_AbstractStreamOperator_getOperatorName
/** * Return the operator name. If the runtime context has been set, then the task name with * subtask index is returned. Otherwise, the simple class name is returned. * * @return If runtime context is set, then return task name with subtask index. Otherwise return * simple class name. */ protected String getOperatorName() { if (runtimeContext != null) { return runtimeContext.getTaskNameWithSubtasks(); } else { return getClass().getSimpleName(); } }
3.68
hadoop_ManifestStoreOperations_renameDir
/** * Rename a dir; defaults to invoking * Forward to {@link #renameFile(Path, Path)}. * Usual "what does 'false' mean?" ambiguity. * @param source source file * @param dest destination path -which must not exist. * @return true if the directory was created. * @throws IOException failure. */ public boolean renameDir(Path source, Path dest) throws IOException { return renameFile(source, dest); }
3.68
flink_HiveParserTypeCheckCtx_setAllowStatefulFunctions
/** @param allowStatefulFunctions whether to allow stateful UDF invocations */ public void setAllowStatefulFunctions(boolean allowStatefulFunctions) { this.allowStatefulFunctions = allowStatefulFunctions; }
3.68
framework_AbsoluteLayoutConnector_getState
/* * (non-Javadoc) * * @see com.vaadin.client.ui.AbstractComponentConnector#getState() */ @Override public AbsoluteLayoutState getState() { return (AbsoluteLayoutState) super.getState(); }
3.68
hadoop_FederationPolicyUtils_getWeightedRandom
/** * Select a random bin according to the weight array for the bins. Only bins * with positive weights will be considered. If no positive weight found, * return -1. * * @param weights the weight array * @return the index of the sample in the array */ public static int getWeightedRandom(ArrayList<Float> weights) { int i; float totalWeight = 0; for (i = 0; i < weights.size(); i++) { if (weights.get(i) > 0) { totalWeight += weights.get(i); } } if (totalWeight == 0) { return -1; } float samplePoint = rand.nextFloat() * totalWeight; int lastIndex = 0; for (i = 0; i < weights.size(); i++) { if (weights.get(i) > 0) { if (samplePoint <= weights.get(i)) { return i; } else { lastIndex = i; samplePoint -= weights.get(i); } } } // This can only happen if samplePoint is very close to totalWeight and // float rounding kicks in during subtractions return lastIndex; }
3.68
framework_Image_addClickListener
/** * Add a click listener to the component. The listener is called whenever * the user clicks inside the component. Depending on the content the event * may be blocked and in that case no event is fired. * * @see Registration * * @param listener * The listener to add, not null * @return a registration object for removing the listener * @since 8.0 */ public Registration addClickListener(ClickListener listener) { return addListener(EventId.CLICK_EVENT_IDENTIFIER, ClickEvent.class, listener, ClickListener.clickMethod); }
3.68
hadoop_RecordStore_newInstance
/** * Build a state store API implementation interface. * * @param clazz The specific interface implementation to create * @param driver The {@link StateStoreDriver} implementation in use. * @param <T> Instance of type RecordStore. * @return An initialized instance of the specified state store API * implementation. */ public static <T extends RecordStore<?>> T newInstance( final Class<T> clazz, final StateStoreDriver driver) { try { Constructor<T> constructor = clazz.getConstructor(StateStoreDriver.class); T recordStore = constructor.newInstance(driver); return recordStore; } catch (Exception e) { LOG.error("Cannot create new instance for " + clazz, e); return null; } }
3.68
hudi_AbstractTableFileSystemView_reset
/** * Clears the partition Map and reset view states. * <p> * NOTE: The logic MUST BE guarded by the write lock. */ @Override public void reset() { try { writeLock.lock(); clear(); // Initialize with new Hoodie timeline. init(metaClient, getTimeline()); } finally { writeLock.unlock(); } }
3.68
flink_PushCalcPastChangelogNormalizeRule_extractUsedInputFields
/** Extracts input fields which are used in the Calc node and the ChangelogNormalize node. */ private int[] extractUsedInputFields(StreamPhysicalCalc calc, Set<Integer> primaryKeyIndices) { RexProgram program = calc.getProgram(); List<RexNode> projectsAndCondition = program.getProjectList().stream() .map(program::expandLocalRef) .collect(Collectors.toList()); if (program.getCondition() != null) { projectsAndCondition.add(program.expandLocalRef(program.getCondition())); } Set<Integer> projectedFields = Arrays.stream(extractRefInputFields(projectsAndCondition)) .boxed() .collect(Collectors.toSet()); // we can't project primary keys projectedFields.addAll(primaryKeyIndices); return projectedFields.stream().sorted().mapToInt(Integer::intValue).toArray(); }
3.68
framework_VFilterSelect_setTdWidth
/** * Descends to child elements until finds TD elements and sets their * width in pixels. Can be used to workaround IE8 & 9 TD element * display: block issues * * @param parent * @param width */ private void setTdWidth(Node parent, int width) { for (int i = 0; i < parent.getChildCount(); i++) { Node child = parent.getChild(i); if ("td".equals(child.getNodeName().toLowerCase(Locale.ROOT))) { ((Element) child).getStyle().setWidth(width, Unit.PX); } else { setTdWidth(child, width); } } }
3.68
framework_DataCommunicator_getPushRows
/** * Get the current range of rows to push in the next response. * * @return the range of rows to push * @since 8.0.6 */ protected Range getPushRows() { return pushRows; }
3.68
hibernate-validator_ConstraintHelper_putValidatorDescriptors
/** * Registers the given validator descriptors with the given constraint * annotation type. * * @param annotationType The constraint annotation type * @param validatorDescriptors The validator descriptors to register * @param keepExistingClasses Whether already-registered validators should be kept or not * @param <A> the type of the annotation */ public <A extends Annotation> void putValidatorDescriptors(Class<A> annotationType, List<ConstraintValidatorDescriptor<A>> validatorDescriptors, boolean keepExistingClasses) { List<ConstraintValidatorDescriptor<A>> validatorDescriptorsToAdd = new ArrayList<>(); if ( keepExistingClasses ) { List<ConstraintValidatorDescriptor<A>> existingvalidatorDescriptors = getAllValidatorDescriptors( annotationType ); if ( existingvalidatorDescriptors != null ) { validatorDescriptorsToAdd.addAll( 0, existingvalidatorDescriptors ); } } validatorDescriptorsToAdd.addAll( validatorDescriptors ); this.validatorDescriptors.put( annotationType, CollectionHelper.toImmutableList( validatorDescriptorsToAdd ) ); }
3.68
hbase_AbstractFSWALProvider_getNumRolledLogFiles
/** * returns the number of rolled WAL files. */ public static int getNumRolledLogFiles(WAL wal) { return ((AbstractFSWAL<?>) wal).getNumRolledLogFiles(); }
3.68
flink_SubtaskGatewayImpl_tryCloseGateway
/** * Closes the gateway. All events sent through this gateway are blocked until the gateway is * re-opened. If the gateway is already closed, this does nothing. * * @return True if the gateway is closed, false if the checkpointId is incorrect. */ boolean tryCloseGateway(long checkpointId) { checkRunsInMainThread(); if (currentMarkedCheckpointIds.contains(checkpointId)) { blockedEventsMap.putIfAbsent(checkpointId, new LinkedList<>()); return true; } return false; }
3.68
hadoop_OBSFileSystem_getCanonicalServiceName
/** * Override {@code getCanonicalServiceName} and return {@code null} since * delegation token is not supported. */ @Override public String getCanonicalServiceName() { // Does not support Token return null; }
3.68
flink_SqlCreateTableConverter_convertCreateTable
/** Convert the {@link SqlCreateTable} node. */ Operation convertCreateTable(SqlCreateTable sqlCreateTable) { CatalogTable catalogTable = createCatalogTable(sqlCreateTable); UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(sqlCreateTable.fullTableName()); ObjectIdentifier identifier = catalogManager.qualifyIdentifier(unresolvedIdentifier); return new CreateTableOperation( identifier, catalogTable, sqlCreateTable.isIfNotExists(), sqlCreateTable.isTemporary()); }
3.68
framework_AbstractConnector_forceStateChange
/** * Force the connector to recheck its state variables as the variables or * their meaning might have changed. * * @since 7.3 */ public void forceStateChange() { StateChangeEvent event = new FullStateChangeEvent(this); fireEvent(event); }
3.68
hadoop_NamenodeStatusReport_getClusterId
/** * Get the cluster identifier. * * @return The cluster identifier. */ public String getClusterId() { return this.clusterId; }
3.68
hadoop_RecordCreatorFactory_create
/** * Creates a DNS SRV record. * * @param name the record name. * @param target the record target/value. * @return an A record. */ @Override public SRVRecord create(Name name, HostPortInfo target) { return new SRVRecord(name, DClass.IN, ttl, 1, 1, target.getPort(), target.getHost()); }
3.68
hudi_ClusteringPlanStrategy_getFileSliceInfo
/** * Transform {@link FileSlice} to {@link HoodieSliceInfo}. */ protected static List<HoodieSliceInfo> getFileSliceInfo(List<FileSlice> slices) { return slices.stream().map(slice -> new HoodieSliceInfo().newBuilder() .setPartitionPath(slice.getPartitionPath()) .setFileId(slice.getFileId()) .setDataFilePath(slice.getBaseFile().map(BaseFile::getPath).orElse(StringUtils.EMPTY_STRING)) .setDeltaFilePaths(slice.getLogFiles().map(f -> f.getPath().toString()).collect(Collectors.toList())) .setBootstrapFilePath(slice.getBaseFile().map(bf -> bf.getBootstrapBaseFile().map(bbf -> bbf.getPath()).orElse(StringUtils.EMPTY_STRING)).orElse(StringUtils.EMPTY_STRING)) .build()).collect(Collectors.toList()); }
3.68
hbase_CompactionPipeline_validateSuffixList
// debug method private boolean validateSuffixList(LinkedList<ImmutableSegment> suffix) { if (suffix.isEmpty()) { // empty suffix is always valid return true; } Iterator<ImmutableSegment> pipelineBackwardIterator = pipeline.descendingIterator(); Iterator<ImmutableSegment> suffixBackwardIterator = suffix.descendingIterator(); ImmutableSegment suffixCurrent; ImmutableSegment pipelineCurrent; for (; suffixBackwardIterator.hasNext();) { if (!pipelineBackwardIterator.hasNext()) { // a suffix longer than pipeline is invalid return false; } suffixCurrent = suffixBackwardIterator.next(); pipelineCurrent = pipelineBackwardIterator.next(); if (suffixCurrent != pipelineCurrent) { // non-matching suffix return false; } } // suffix matches pipeline suffix return true; }
3.68
hudi_AbstractTableFileSystemView_refreshTimeline
/** * Refresh commits timeline. * * @param visibleActiveTimeline Visible Active Timeline */ protected void refreshTimeline(HoodieTimeline visibleActiveTimeline) { this.visibleCommitsAndCompactionTimeline = visibleActiveTimeline.getWriteTimeline(); }
3.68
morf_TableLoader_runRecoverably
/** * PostgreSQL does not like failing commands, and marks connections as "dirty" after errors: * <q>ERROR: current transaction is aborted, commands ignored until end of transaction block.</q> * * <p>To recover a connection, one has to issue a rollback.</p> * * @param runnable */ private void runRecoverably(Runnable runnable) throws SQLException { // make sure we commit if we can if (explicitCommit) { connection.commit(); } try { runnable.run(); } catch (Exception e) { // make sure we rollback if we can if (explicitCommit) { connection.rollback(); } throw e; } }
3.68
flink_ExecutionEnvironment_createCollectionsEnvironment
/** * Creates a {@link CollectionEnvironment} that uses Java Collections underneath. This will * execute in a single thread in the current JVM. It is very fast but will fail if the data does * not fit into memory. parallelism will always be 1. This is useful during implementation and * for debugging. * * @return A Collection Environment */ @PublicEvolving public static CollectionEnvironment createCollectionsEnvironment() { CollectionEnvironment ce = new CollectionEnvironment(); ce.setParallelism(1); return ce; }
3.68
hbase_RegionMover_isolateRegions
/** * Isolated regions specified in {@link #isolateRegionIdArray} on {@link #hostname} in ack Mode * and Unload regions from given {@link #hostname} using ack/noAck mode and {@link #maxthreads}. * In noAck mode we do not make sure that region is successfully online on the target region * server,hence it is the best effort. We do not unload regions to hostnames given in * {@link #excludeFile}. If designatedFile is present with some contents, we will unload regions * to hostnames provided in {@link #designatedFile} * @return true if region isolation succeeded, false otherwise */ public boolean isolateRegions() throws ExecutionException, InterruptedException, TimeoutException { return unloadRegions(false, isolateRegionIdArray); }
3.68
flink_CallContext_fail
/** * Helper method for handling failures during the type inference process while considering the * {@code throwOnFailure} flag. * * <p>Shorthand for {@code if (throwOnFailure) throw ValidationException(...) else return * Optional.empty()}. */ default <T> Optional<T> fail(boolean throwOnFailure, String message, Object... args) { if (throwOnFailure) { throw newValidationError(message, args); } return Optional.empty(); }
3.68
hbase_ScheduledChore_isScheduled
/** Returns true when this Chore is scheduled with a ChoreService */ public synchronized boolean isScheduled() { return choreService != null && choreService.isChoreScheduled(this); }
3.68
querydsl_AbstractMySQLQuery_withRollup
/** * The GROUP BY clause permits a WITH ROLLUP modifier that causes extra rows to be added to the * summary output. These rows represent higher-level (or super-aggregate) summary operations. * ROLLUP thus enables you to answer questions at multiple levels of analysis with a single query. * It can be used, for example, to provide support for OLAP (Online Analytical Processing) operations. * * @return the current object */ public C withRollup() { return addFlag(Position.AFTER_GROUP_BY, WITH_ROLLUP); }
3.68
graphhopper_VectorTile_setTags
/** * <pre> * Tags of this feature are encoded as repeated pairs of * integers. * A detailed description of tags is located in sections * 4.2 and 4.4 of the specification * </pre> * * <code>repeated uint32 tags = 2 [packed = true];</code> */ public Builder setTags( int index, int value) { ensureTagsIsMutable(); tags_.set(index, value); onChanged(); return this; }
3.68
flink_PythonStreamGroupAggregateOperator_getUserDefinedFunctionsProto
/** * Gets the proto representation of the Python user-defined aggregate functions to be executed. */ @Override public FlinkFnApi.UserDefinedAggregateFunctions getUserDefinedFunctionsProto() { FlinkFnApi.UserDefinedAggregateFunctions.Builder builder = super.getUserDefinedFunctionsProto().toBuilder(); builder.setCountStarInserted(countStarInserted); return builder.build(); }
3.68
flink_ThroughputCalculator_pauseMeasurement
/** Mark when the time should not be taken into account. */ public void pauseMeasurement() { if (measurementStartTime != NOT_TRACKED) { currentMeasurementTime += clock.absoluteTimeMillis() - measurementStartTime; } measurementStartTime = NOT_TRACKED; }
3.68
hbase_VersionModel_setJVMVersion
/** * @param version the JVM version string */ public void setJVMVersion(String version) { this.jvmVersion = version; }
3.68
hudi_HoodieSparkQuickstart_updateData
/** * This is similar to inserting new data. Generate updates to existing trips using the data generator, * load into a DataFrame and write DataFrame into the hudi dataset. */ public static Dataset<Row> updateData(SparkSession spark, JavaSparkContext jsc, String tablePath, String tableName, HoodieExampleDataGenerator<HoodieAvroPayload> dataGen) { String commitTime = Long.toString(System.currentTimeMillis()); List<String> updates = dataGen.convertToStringList(dataGen.generateUniqueUpdates(commitTime)); Dataset<Row> df = spark.read().json(jsc.parallelize(updates, 1)); df.write().format("hudi") .options(QuickstartUtils.getQuickstartWriteConfigs()) .option(HoodieWriteConfig.PRECOMBINE_FIELD_NAME.key(), "ts") .option(KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(), "uuid") .option(KeyGeneratorOptions.PARTITIONPATH_FIELD_NAME.key(), "partitionpath") .option(TBL_NAME.key(), tableName) .mode(Append) .save(tablePath); return df; }
3.68
framework_VProgressBar_setState
/** * Sets the value of this progress bar. The value is a {@code float} between * 0 and 1 where 0 represents no progress at all and 1 represents fully * completed. * * @param state * the new progress value */ public void setState(float state) { final int size = Math.round(100 * state); indicator.getStyle().setWidth(size, Unit.PCT); }
3.68
framework_DDEventHandleStrategy_findDragTarget
/** * Find drag handler for the {@code target} element. * * @param target * target element over which DnD event has happened * @param mediator * VDragAndDropManager data accessor * @return drop handler of target element */ protected VDropHandler findDragTarget(Element target, DDManagerMediator mediator) { return mediator.getManager().findDragTarget(target); }
3.68
hbase_HBaseServerExceptionPauseManager_getPauseNsFromException
/** * Returns the nanos, if any, for which the client should wait * @param error The exception from the server * @param tries The current retry count * @return The time, in nanos, to pause. If empty then pausing would exceed our timeout, so we * should throw now */ public OptionalLong getPauseNsFromException(Throwable error, int tries, long startNs) { long expectedSleepNs; long remainingTimeNs = remainingTimeNs(startNs) - SLEEP_DELTA_NS; if (error instanceof RpcThrottlingException) { RpcThrottlingException rpcThrottlingException = (RpcThrottlingException) error; expectedSleepNs = TimeUnit.MILLISECONDS.toNanos(rpcThrottlingException.getWaitInterval()); if (expectedSleepNs > remainingTimeNs && remainingTimeNs > 0) { if (LOG.isDebugEnabled()) { LOG.debug("RpcThrottlingException suggested pause of {}ns which would exceed " + "the timeout. We should throw instead.", expectedSleepNs, rpcThrottlingException); } return OptionalLong.empty(); } if (LOG.isDebugEnabled()) { LOG.debug("Sleeping for {}ns after catching RpcThrottlingException", expectedSleepNs, rpcThrottlingException); } } else { expectedSleepNs = HBaseServerException.isServerOverloaded(error) ? pauseNsForServerOverloaded : pauseNs; // RpcThrottlingException tells us exactly how long the client should wait for, // so we should not factor in the retry count for said exception expectedSleepNs = getPauseTime(expectedSleepNs, tries - 1); } if (timeoutNs > 0) { if (remainingTimeNs <= 0) { return OptionalLong.empty(); } expectedSleepNs = Math.min(remainingTimeNs, expectedSleepNs); } return OptionalLong.of(expectedSleepNs); }
3.68
hbase_StoreScanner_getAllScannersForTesting
/** * Used in testing. * @return all scanners in no particular order */ List<KeyValueScanner> getAllScannersForTesting() { List<KeyValueScanner> allScanners = new ArrayList<>(); KeyValueScanner current = heap.getCurrentForTesting(); if (current != null) allScanners.add(current); for (KeyValueScanner scanner : heap.getHeap()) allScanners.add(scanner); return allScanners; }
3.68
morf_RenameTable_accept
/** * @see org.alfasoftware.morf.upgrade.SchemaChange#accept(org.alfasoftware.morf.upgrade.SchemaChangeVisitor) */ @Override public void accept(SchemaChangeVisitor visitor) { visitor.visit(this); }
3.68
hadoop_WriteOperationHelper_deactivateAuditSpan
/** * Deactivate the audit span. */ private void deactivateAuditSpan() { auditSpan.deactivate(); }
3.68
hbase_BackupManager_getConf
/** * Get configuration */ Configuration getConf() { return conf; }
3.68
pulsar_TopicEventsDispatcher_addTopicEventListener
/** * Adds listeners, ignores null listeners. * @param listeners */ public void addTopicEventListener(TopicEventsListener... listeners) { Objects.requireNonNull(listeners); Arrays.stream(listeners) .filter(x -> x != null) .forEach(topicEventListeners::add); }
3.68
framework_TreeTable_fireCollapseEvent
/** * Emits a collapse event. * * @param itemId * the item id. */ protected void fireCollapseEvent(Object itemId) { fireEvent(new CollapseEvent(this, itemId)); }
3.68
framework_VaadinSession_close
/** * Sets this session to be closed and all UI state to be discarded at the * end of the current request, or at the end of the next request if there is * no ongoing one. * <p> * After the session has been discarded, any UIs that have been left open * will give a Session Expired error and a new session will be created for * serving new UIs. * <p> * To avoid causing out of sync errors, you should typically redirect to * some other page using {@link Page#setLocation(String)} to make the * browser unload the invalidated UI. * * @see SystemMessages#getSessionExpiredCaption() * */ public void close() { assert hasLock(); state = State.CLOSING; }
3.68
morf_OracleDialect_getSqlForRandom
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForRandom() */ @Override protected String getSqlForRandom() { return "dbms_random.value"; }
3.68
querydsl_AbstractJPAQuery_getResultList
/** * Transforms results using FactoryExpression if ResultTransformer can't be used * * @param query query * @return results */ private List<?> getResultList(Query query) { // TODO : use lazy fetch here? if (projection != null) { List<?> results = query.getResultList(); List<Object> rv = new ArrayList<Object>(results.size()); for (Object o : results) { if (o != null) { if (!o.getClass().isArray()) { o = new Object[]{o}; } rv.add(projection.newInstance((Object[]) o)); } else { rv.add(projection.newInstance(new Object[] {null})); } } return rv; } else { return query.getResultList(); } }
3.68
hbase_MasterAddressTracker_parse
/** * @param data zookeeper data. may be null * @return pb object of master, null if no active master * @throws DeserializationException if the parsing fails */ public static ZooKeeperProtos.Master parse(byte[] data) throws DeserializationException { if (data == null) { return null; } int prefixLen = ProtobufUtil.lengthOfPBMagic(); try { return ZooKeeperProtos.Master.parser().parseFrom(data, prefixLen, data.length - prefixLen); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } }
3.68
flink_FlinkContainers_build
/** * Returns {@code FlinkContainers} built from the provided settings. * * @return {@code FlinkContainers} built with parameters of this {@code * FlinkContainers.Builder}. */ public FlinkContainers build() { FlinkTestcontainersConfigurator configurator = new FlinkTestcontainersConfigurator( flinkContainersSettings, testcontainersSettings); return configurator.configure(); }
3.68
flink_SSLUtils_createInternalNettySSLContext
/** * Creates the SSL Context for internal SSL, if internal SSL is configured. For internal SSL, * the client and server side configuration are identical, because of mutual authentication. */ @Nullable private static SslContext createInternalNettySSLContext( Configuration config, boolean clientMode, SslProvider provider) throws Exception { checkNotNull(config, "config"); if (!SecurityOptions.isInternalSSLEnabled(config)) { return null; } String[] sslProtocols = getEnabledProtocols(config); List<String> ciphers = Arrays.asList(getEnabledCipherSuites(config)); int sessionCacheSize = config.getInteger(SecurityOptions.SSL_INTERNAL_SESSION_CACHE_SIZE); int sessionTimeoutMs = config.getInteger(SecurityOptions.SSL_INTERNAL_SESSION_TIMEOUT); KeyManagerFactory kmf = getKeyManagerFactory(config, true, provider); ClientAuth clientAuth = ClientAuth.REQUIRE; final SslContextBuilder sslContextBuilder; if (clientMode) { sslContextBuilder = SslContextBuilder.forClient().keyManager(kmf); } else { sslContextBuilder = SslContextBuilder.forServer(kmf); } Optional<TrustManagerFactory> tmf = getTrustManagerFactory(config, true); tmf.map(sslContextBuilder::trustManager); return sslContextBuilder .sslProvider(provider) .protocols(sslProtocols) .ciphers(ciphers) .clientAuth(clientAuth) .sessionCacheSize(sessionCacheSize) .sessionTimeout(sessionTimeoutMs / 1000) .build(); }
3.68
hadoop_GangliaConf_setDmax
/** * @param dmax the dmax to set */ void setDmax(int dmax) { this.dmax = dmax; }
3.68
hbase_Reference_readFields
/** * @deprecated Writables are going away. Use the pb serialization methods instead. Remove in a * release after 0.96 goes out. This is here only to migrate old Reference files * written with Writables before 0.96. */ @Deprecated public void readFields(DataInput in) throws IOException { boolean tmp = in.readBoolean(); // If true, set region to top. this.region = tmp ? Range.top : Range.bottom; this.splitkey = Bytes.readByteArray(in); }
3.68
flink_BinaryStringDataUtil_keyValue
/** * Parse target string as key-value string and return the value matches key name. If accept any * null arguments, return null. example: keyvalue('k1=v1;k2=v2', ';', '=', 'k2') = 'v2' * keyvalue('k1:v1,k2:v2', ',', ':', 'k3') = NULL * * @param split1 separator between key-value tuple. * @param split2 separator between key and value. * @param keyName name of the key whose value you want return. * @return target value. */ public static BinaryStringData keyValue( BinaryStringData str, byte split1, byte split2, BinaryStringData keyName) { str.ensureMaterialized(); if (keyName == null || keyName.getSizeInBytes() == 0) { return null; } if (str.inFirstSegment() && keyName.inFirstSegment()) { // position in byte int byteIdx = 0; // position of last split1 int lastSplit1Idx = -1; while (byteIdx < str.getSizeInBytes()) { // If find next split1 in str, process current kv if (str.getSegments()[0].get(str.getOffset() + byteIdx) == split1) { int currentKeyIdx = lastSplit1Idx + 1; // If key of current kv is keyName, return the value directly BinaryStringData value = findValueOfKey(str, split2, keyName, currentKeyIdx, byteIdx); if (value != null) { return value; } lastSplit1Idx = byteIdx; } byteIdx++; } // process the string which is not ends with split1 int currentKeyIdx = lastSplit1Idx + 1; return findValueOfKey(str, split2, keyName, currentKeyIdx, str.getSizeInBytes()); } else { return keyValueSlow(str, split1, split2, keyName); } }
3.68
flink_AbstractInvokable_getExecutionConfig
/** Returns the global ExecutionConfig. */ public ExecutionConfig getExecutionConfig() { return this.environment.getExecutionConfig(); }
3.68
hadoop_YarnVersionInfo_getSrcChecksum
/** * Get the checksum of the source files from which YARN was * built. * * @return srcChecksum. */ public static String getSrcChecksum() { return YARN_VERSION_INFO._getSrcChecksum(); }
3.68
framework_FreeformQuery_getCount
/** * This implementation of getCount() actually fetches all records from the * database, which might be a performance issue. Override this method with a * SELECT COUNT(*) ... query if this is too slow for your needs. * * {@inheritDoc} */ @Override public int getCount() throws SQLException { // First try the delegate int count = countByDelegate(); if (count < 0) { // Couldn't use the delegate, use the bad way. Statement statement = null; ResultSet rs = null; Connection conn = getConnection(); try { statement = conn.createStatement( ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY); rs = statement.executeQuery(queryString); if (rs.last()) { count = rs.getRow(); } else { count = 0; } } finally { releaseConnection(conn, statement, rs); } } return count; }
3.68
hibernate-validator_Filters_excludeProxies
/** * Returns a filter which excludes proxy objects. * * @return a filter which excludes proxy objects */ public static Filter excludeProxies() { return PROXY_FILTER; }
3.68
framework_AbstractClientConnector_isAttached
/* * (non-Javadoc) * * @see com.vaadin.server.ClientConnector#isAttached() */ @Override public boolean isAttached() { return getSession() != null; }
3.68
flink_LimitedConnectionsFileSystem_unregisterOutputStream
/** * Atomically removes the given output stream from the set of currently open output streams, and * signals that new stream can now be opened. */ void unregisterOutputStream(OutStream stream) { lock.lock(); try { // only decrement if we actually remove the stream if (openOutputStreams.remove(stream)) { numReservedOutputStreams--; available.signalAll(); } } finally { lock.unlock(); } }
3.68
dubbo_TreePathDynamicConfiguration_getConfigNamespace
/** * Get the namespace from the specified {@link URL connection URl} * * @param url the specified {@link URL connection URl} * @return non-null */ protected String getConfigNamespace(URL url) { return url.getParameter(CONFIG_NAMESPACE_KEY, DEFAULT_GROUP); }
3.68
morf_UpgradePath_getSteps
/** * Return the list of upgrade steps. If empty, a deployment of the complete * database should be assumed. * * @return the list of upgrade steps; or an empty list if "everything" should be done. */ public List<UpgradeStep> getSteps() { return steps; }
3.68
hadoop_AzureFileSystemInstrumentation_setContainerName
/** * Sets the container name to tag all the metrics with. * @param containerName The container name. */ public void setContainerName(String containerName) { registry.tag("containerName", "Name of the Azure Storage container that these metrics are going against", containerName); }
3.68
framework_Margins_getMarginTop
/** * Returns the height of the top margin. * * @return top margin (in pixels) */ public int getMarginTop() { return marginTop; }
3.68
framework_AbstractRenderer_getNullRepresentation
/** * Null representation for the renderer. * * @return a textual representation of {@code null} */ protected String getNullRepresentation() { return nullRepresentation; }
3.68
flink_SkipListUtils_getNodeStatus
/** * Returns the status of the node. * * @param memorySegment memory segment for key space. * @param offset offset of key space in the memory segment. */ public static NodeStatus getNodeStatus(MemorySegment memorySegment, int offset) { byte status = (byte) ((memorySegment.getInt(offset + KEY_META_OFFSET) >>> 8) & BYTE_MASK); return NodeStatus.valueOf(status); }
3.68
hadoop_ColumnRWHelper_store
/** * Sends a Mutation to the table. The mutations will be buffered and sent over * the wire as part of a batch. * * @param rowKey identifying the row to write. Nothing gets written when null. * @param tableMutator used to modify the underlying HBase table. Caller is * responsible to pass a mutator for the table that actually has this * column. * @param qualifier column qualifier. Nothing gets written when null. * @param timestamp version timestamp. When null the server timestamp will be * used. * @param attributes attributes for the mutation that are used by the * coprocessor to set/read the cell tags. * @param inputValue the value to write to the rowKey and column qualifier. * Nothing gets written when null. * @throws IOException if there is any exception encountered while doing * store operation(sending mutation to the table). */ public static void store(byte[] rowKey, TypedBufferedMutator<?> tableMutator, ColumnPrefix<?> columnPrefix, String qualifier, Long timestamp, Object inputValue, Attribute... attributes) throws IOException { // Null check if (qualifier == null) { throw new IOException("Cannot store column with null qualifier in " + tableMutator.getName().getNameAsString()); } byte[] columnQualifier = columnPrefix.getColumnPrefixBytes(qualifier); Attribute[] combinedAttributes = columnPrefix.getCombinedAttrsWithAggr(attributes); store(rowKey, tableMutator, columnPrefix.getColumnFamilyBytes(), columnQualifier, timestamp, columnPrefix.supplementCellTimeStamp(), inputValue, columnPrefix.getValueConverter(), combinedAttributes); }
3.68
hbase_BackupManager_updateBackupInfo
/** * Updates status (state) of a backup session in a persistent store * @param context context * @throws IOException exception */ public void updateBackupInfo(BackupInfo context) throws IOException { systemTable.updateBackupInfo(context); }
3.68
querydsl_Expressions_as
/** * Create a {@code source as alias} expression * * @param source source * @param alias alias * @return source as alias */ public static <D> SimpleExpression<D> as(Expression<D> source, String alias) { return as(source, ExpressionUtils.path(source.getType(), alias)); }
3.68
druid_Resources_setDefaultClassLoader
/** * Sets the default classloader * * @param defaultClassLoader - the new default ClassLoader */ public static void setDefaultClassLoader(ClassLoader defaultClassLoader) { Resources.defaultClassLoader = defaultClassLoader; }
3.68
hbase_ZKListener_nodeDataChanged
/** * Called when an existing node has changed data. * @param path full path of the updated node */ public void nodeDataChanged(String path) { // no-op }
3.68