name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_VColorPickerGrid_getSelectedX
/** * Returns currently selected x-coordinate of the grid. * * @return the selected x-coordinate */ public int getSelectedX() { return selectedX; }
3.68
hbase_BlockCache_notifyFileBlockEvicted
/** * Notifies the cache implementation that the given file had a block evicted * @param fileName the file had a block evicted. */ default void notifyFileBlockEvicted(String fileName) { // noop }
3.68
dubbo_AbstractZookeeperTransporter_connect
/** * share connect for registry, metadata, etc.. * <p> * Make sure the connection is connected. * * @param url * @return */ @Override public ZookeeperClient connect(URL url) { ZookeeperClient zookeeperClient; // address format: {[username:password@]address} List<String> addressList = getURLBackupAddress(url); // The field define the zookeeper server , including protocol, host, port, username, password if ((zookeeperClient = fetchAndUpdateZookeeperClientCache(addressList)) != null && zookeeperClient.isConnected()) { logger.info("find valid zookeeper client from the cache for address: " + url); return zookeeperClient; } // avoid creating too many connections, so add lock synchronized (zookeeperClientMap) { if ((zookeeperClient = fetchAndUpdateZookeeperClientCache(addressList)) != null && zookeeperClient.isConnected()) { logger.info("find valid zookeeper client from the cache for address: " + url); return zookeeperClient; } zookeeperClient = createZookeeperClient(url); logger.info("No valid zookeeper client found from cache, therefore create a new client for url. " + url); writeToClientMap(addressList, zookeeperClient); } return zookeeperClient; }
3.68
hbase_MasterProcedureUtil_getServerPriority
/** * Return the priority for the given procedure. For now we only have two priorities, 100 for * server carrying meta, and 1 for others. */ public static int getServerPriority(ServerProcedureInterface proc) { return proc.hasMetaTableRegion() ? 100 : 1; }
3.68
framework_SingleSelectionModelImpl_asSingleSelect
/** * Gets a wrapper for using this grid as a single select in a binder. * * @return a single select wrapper for grid */ @Override public SingleSelect<T> asSingleSelect() { return new SingleSelect<T>() { @Override public void setValue(T value) { SingleSelectionModelImpl.this.setSelectedFromServer(value); } @Override public T getValue() { return SingleSelectionModelImpl.this.getSelectedItem() .orElse(null); } @Override public Registration addValueChangeListener( com.vaadin.data.HasValue.ValueChangeListener<T> listener) { return SingleSelectionModelImpl.this.addSingleSelectionListener( (SingleSelectionListener<T>) event -> listener .valueChange(event)); } @Override public void setRequiredIndicatorVisible( boolean requiredIndicatorVisible) { // TODO support required indicator when grid is used in binder ? throw new UnsupportedOperationException( "Required indicator is not supported for Grid."); } @Override public boolean isRequiredIndicatorVisible() { throw new UnsupportedOperationException( "Required indicator is not supported for Grid."); } @Override public void setReadOnly(boolean readOnly) { setUserSelectionAllowed(!readOnly); } @Override public boolean isReadOnly() { return !isUserSelectionAllowed(); } }; }
3.68
hbase_HRegionServer_isClusterUp
/** Returns True if the cluster is up. */ @Override public boolean isClusterUp() { return this.masterless || (this.clusterStatusTracker != null && this.clusterStatusTracker.isClusterUp()); }
3.68
dubbo_ReactorServerCalls_manyToOne
/** * Implements a stream -> unary call as Flux -> Mono * * @param responseObserver response StreamObserver * @param func service implementation * @return request StreamObserver */ public static <T, R> StreamObserver<T> manyToOne( StreamObserver<R> responseObserver, Function<Flux<T>, Mono<R>> func) { ServerTripleReactorPublisher<T> serverPublisher = new ServerTripleReactorPublisher<T>((CallStreamObserver<R>) responseObserver); try { Mono<R> responseMono = func.apply(Flux.from(serverPublisher)); responseMono.subscribe( value -> { // Don't try to respond if the server has already canceled the request if (!serverPublisher.isCancelled()) { responseObserver.onNext(value); } }, throwable -> { // Don't try to respond if the server has already canceled the request if (!serverPublisher.isCancelled()) { responseObserver.onError(throwable); } }, responseObserver::onCompleted); serverPublisher.startRequest(); } catch (Throwable throwable) { responseObserver.onError(throwable); } return serverPublisher; }
3.68
streampipes_DbDataTypeFactory_getFromObject
/** * Tries to identify the data type of the object {@code o}. In case it is not supported, it is * interpreted as a String (VARCHAR(255)) * * @param o The object which should be identified * @return */ public static DbDataTypes getFromObject(final Object o, SupportedDbEngines sqlEngine) { if (o instanceof Integer) { return getInteger(sqlEngine); } else if (o instanceof Long) { return getLong(sqlEngine); } else if (o instanceof Float) { return getFloat(sqlEngine); } else if (o instanceof Double) { return getDouble(sqlEngine); } else if (o instanceof Boolean) { return getBoolean(sqlEngine); } else { return getLongString(sqlEngine); } }
3.68
MagicPlugin_PreLoadEvent_registerEntityTargetingManager
/** * Register an EntityTargetingProvider, for determining when one entity may target another with spells. * * @param manager The manager to add. */ public void registerEntityTargetingManager(EntityTargetingManager manager) { targetingManagers.add(manager); }
3.68
hadoop_ConnectionPool_removeConnections
/** * Remove connections from the current pool. * * @param num Number of connections to remove. * @return Removed connections. */ public synchronized List<ConnectionContext> removeConnections(int num) { List<ConnectionContext> removed = new LinkedList<>(); if (this.connections.size() > this.minSize) { int targetCount = Math.min(num, this.connections.size() - this.minSize); // Remove and close targetCount of connections List<ConnectionContext> tmpConnections = new ArrayList<>(); for (ConnectionContext conn : this.connections) { // Only pick idle connections to close if (removed.size() < targetCount && conn.isIdle()) { removed.add(conn); } else { tmpConnections.add(conn); } } this.connections = tmpConnections; } LOG.debug("Expected to remove {} connection and actually removed {} connections " + "for connectionPool: {}", num, removed.size(), connectionPoolId); return removed; }
3.68
hudi_Option_orElseGet
/** * Identical to {@code Optional.orElseGet} */ public T orElseGet(Supplier<? extends T> other) { return val != null ? val : other.get(); }
3.68
framework_Form_getField
/** * Gets the field identified by the propertyid. * * @param propertyId * the id of the property. */ public Field getField(Object propertyId) { return fields.get(propertyId); }
3.68
hudi_DFSPropertiesConfiguration_addPropsFromFile
/** * Add properties from external configuration files. * * @param filePath File path for configuration file */ public void addPropsFromFile(Path filePath) { if (visitedFilePaths.contains(filePath.toString())) { throw new IllegalStateException("Loop detected; file " + filePath + " already referenced"); } FileSystem fs = FSUtils.getFs( filePath.toString(), Option.ofNullable(hadoopConfig).orElseGet(Configuration::new) ); try { if (filePath.equals(DEFAULT_PATH) && !fs.exists(filePath)) { LOG.warn("Properties file " + filePath + " not found. Ignoring to load props file"); return; } } catch (IOException ioe) { throw new HoodieIOException("Cannot check if the properties file exist: " + filePath, ioe); } try (BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(filePath)))) { visitedFilePaths.add(filePath.toString()); addPropsFromStream(reader, filePath); } catch (IOException ioe) { LOG.error("Error reading in properties from dfs from file " + filePath); throw new HoodieIOException("Cannot read properties from dfs from file " + filePath, ioe); } }
3.68
flink_StreamArrowPythonGroupWindowAggregateFunctionOperator_isWindowLate
/** * Returns {@code true} if the watermark is after the end timestamp plus the allowed lateness of * the given window. */ private boolean isWindowLate(W window) { return windowAssigner.isEventTime() && (toEpochMillsForTimer(cleanupTime(window), shiftTimeZone) <= internalTimerService.currentWatermark()); }
3.68
hudi_HoodieJavaPairRDD_getJavaPairRDD
/** * @param hoodiePairData {@link HoodieJavaPairRDD <K, V>} instance containing the {@link JavaPairRDD} of pairs. * @param <K> type of key. * @param <V> type of value. * @return the {@link JavaPairRDD} of pairs. */ public static <K, V> JavaPairRDD<K, V> getJavaPairRDD(HoodiePairData<K, V> hoodiePairData) { return ((HoodieJavaPairRDD<K, V>) hoodiePairData).get(); }
3.68
hadoop_PlacementPolicy_toIndentedString
/** * Convert the given object to string with each line indented by 4 spaces * (except the first line). */ private String toIndentedString(java.lang.Object o) { if (o == null) { return "null"; } return o.toString().replace("\n", "\n "); }
3.68
flink_HiveParserQBSubQuery_analyzeExpr
/* * 1. On encountering a DOT, we attempt to resolve the leftmost name * to the Parent Query. * 2. An unqualified name is assumed to be a SubQuery reference. * We don't attempt to resolve this to the Parent; because * we require all Parent column references to be qualified. * 3. All other expressions have a Type based on their children. * An Expr w/o children is assumed to refer to neither. */ private ObjectPair<HiveParserQBSubQuery.ExprType, ColumnInfo> analyzeExpr( HiveParserASTNode expr) { ColumnInfo cInfo = null; if (forHavingClause) { try { cInfo = parentQueryRR.getExpression(expr); if (cInfo != null) { return ObjectPair.create( HiveParserQBSubQuery.ExprType.REFERS_PARENT, cInfo); } } catch (SemanticException se) { } } if (expr.getType() == HiveASTParser.DOT) { HiveParserASTNode dot = firstDot(expr); cInfo = resolveDot(dot); if (cInfo != null) { return ObjectPair.create(HiveParserQBSubQuery.ExprType.REFERS_PARENT, cInfo); } return ObjectPair.create(HiveParserQBSubQuery.ExprType.REFERS_SUBQUERY, null); } else if (expr.getType() == HiveASTParser.TOK_TABLE_OR_COL) { return ObjectPair.create(HiveParserQBSubQuery.ExprType.REFERS_SUBQUERY, null); } else { HiveParserQBSubQuery.ExprType exprType = HiveParserQBSubQuery.ExprType.REFERS_NONE; int cnt = expr.getChildCount(); for (int i = 0; i < cnt; i++) { HiveParserASTNode child = (HiveParserASTNode) expr.getChild(i); exprType = exprType.combine(analyzeExpr(child).getFirst()); } return ObjectPair.create(exprType, null); } }
3.68
druid_ZookeeperNodeListener_init
/** * Init a PathChildrenCache to watch the given path. */ @Override public void init() { checkParameters(); super.init(); if (client == null) { client = CuratorFrameworkFactory.builder() .canBeReadOnly(true) .connectionTimeoutMs(5000) .connectString(zkConnectString) .retryPolicy(new RetryForever(10000)) .sessionTimeoutMs(30000) .build(); client.start(); privateZkClient = true; } cache = new PathChildrenCache(client, path, true); cache.getListenable().addListener(new PathChildrenCacheListener() { @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { try { LOG.info("Receive an event: " + event.getType()); lock.lock(); PathChildrenCacheEvent.Type eventType = event.getType(); switch (eventType) { case CHILD_REMOVED: updateSingleNode(event, NodeEventTypeEnum.DELETE); break; case CHILD_ADDED: updateSingleNode(event, NodeEventTypeEnum.ADD); break; case CONNECTION_RECONNECTED: refreshAllNodes(); break; default: // CHILD_UPDATED // INITIALIZED // CONNECTION_LOST // CONNECTION_SUSPENDED LOG.info("Received a PathChildrenCacheEvent, IGNORE it: " + event); } } finally { lock.unlock(); LOG.info("Finish the processing of event: " + event.getType()); } } }); try { // Use BUILD_INITIAL_CACHE to force build cache in the current Thread. // We don't use POST_INITIALIZED_EVENT, so there's no INITIALIZED event. cache.start(PathChildrenCache.StartMode.BUILD_INITIAL_CACHE); } catch (Exception e) { LOG.error("Can't start PathChildrenCache", e); } }
3.68
pulsar_ConsumerConfiguration_setAckTimeoutRedeliveryBackoff
/** * @param ackTimeoutRedeliveryBackoff redelivery backoff policy for ack timeout. * Default value is: MultiplierRedeliveryBackoff * @return the {@link ConsumerConfiguration} */ public ConsumerConfiguration setAckTimeoutRedeliveryBackoff(RedeliveryBackoff ackTimeoutRedeliveryBackoff) { conf.setAckTimeoutRedeliveryBackoff(ackTimeoutRedeliveryBackoff); return this; }
3.68
hadoop_MutableQuantiles_getEstimator
/** * Get the quantile estimator. * * @return the quantile estimator */ @VisibleForTesting public synchronized QuantileEstimator getEstimator() { return estimator; }
3.68
flink_FileInputFormat_getAverageRecordWidth
/** * Gets the estimated average number of bytes per record. * * @return The average number of bytes per record. * @see org.apache.flink.api.common.io.statistics.BaseStatistics#getAverageRecordWidth() */ @Override public float getAverageRecordWidth() { return this.avgBytesPerRecord; }
3.68
hadoop_JobTokenSecretManager_removeTokenForJob
/** * Remove the cached job token of a job from cache * @param jobId the job whose token is to be removed */ public void removeTokenForJob(String jobId) { synchronized (currentJobTokens) { currentJobTokens.remove(jobId); } }
3.68
morf_ViewChanges_nameToView
/** * @return the view for a given name. */ private Function<String, View> nameToView() { return new Function<String, View>() { @Override public View apply(String name) { return viewIndex.get(name); } }; }
3.68
hadoop_LocalAllocationTagsManager_cleanTempContainers
/** * Method removes temporary containers associated with an application * Used by the placement algorithm to clean temporary tags at the end of * a placement cycle. * @param applicationId Application Id. */ public void cleanTempContainers(ApplicationId applicationId) { if (!appTempMappings.get(applicationId).isEmpty()) { appTempMappings.get(applicationId).entrySet().stream().forEach(nodeE -> { nodeE.getValue().entrySet().stream().forEach(tagE -> { for (int i = 0; i < tagE.getValue().get(); i++) { removeTags(nodeE.getKey(), applicationId, Collections.singleton(tagE.getKey())); } }); }); appTempMappings.remove(applicationId); LOG.debug("Removed TEMP containers of app={}", applicationId); } }
3.68
framework_ApplicationConnection_getContextMenu
/** * Singleton method to get instance of app's context menu. * * @return VContextMenu object */ public VContextMenu getContextMenu() { if (contextMenu == null) { contextMenu = new VContextMenu(); contextMenu.setOwner(uIConnector.getWidget()); DOM.setElementProperty(contextMenu.getElement(), "id", "PID_VAADIN_CM"); } return contextMenu; }
3.68
hudi_AbstractTableFileSystemView_ensureAllPartitionsLoadedCorrectly
/** * Batch loading all the partitions if needed. * * @return A list of relative partition paths of all partitions. */ private List<String> ensureAllPartitionsLoadedCorrectly() { ValidationUtils.checkArgument(!isClosed(), "View is already closed"); try { List<String> formattedPartitionList = getAllPartitionPaths().stream() .map(this::formatPartitionKey).collect(Collectors.toList()); ensurePartitionsLoadedCorrectly(formattedPartitionList); return formattedPartitionList; } catch (IOException e) { throw new HoodieIOException("Failed to get all partition paths", e); } }
3.68
flink_BinaryStringData_fromAddress
/** * Creates a {@link BinaryStringData} instance from the given address (base and offset) and * length. */ public static BinaryStringData fromAddress(MemorySegment[] segments, int offset, int numBytes) { return new BinaryStringData(segments, offset, numBytes); }
3.68
framework_WebBrowser_getRawTimezoneOffset
/** * Returns the browser-reported TimeZone offset in milliseconds from GMT * ignoring possible daylight saving adjustments that may be in effect in * the browser. * <p> * You can use this to figure out which TimeZones the user could actually be * in by calling {@link TimeZone#getAvailableIDs(int)}. * </p> * <p> * If {@link #getRawTimezoneOffset()} and {@link #getTimezoneOffset()} * returns the same value, the browser is either in a zone that does not * currently have daylight saving time, or in a zone that never has daylight * saving time. * </p> * * @return timezone offset in milliseconds excluding DST, 0 if not available */ public int getRawTimezoneOffset() { return rawTimezoneOffset; }
3.68
rocketmq-connect_ProcessingContext_stage
/** * @return the stage in the connector pipeline which is currently executing. */ public ErrorReporter.Stage stage() { return stage; }
3.68
pulsar_FastThreadLocalStateCleaner_cleanupAllFastThreadLocals
// cleanup all fast thread local state on all active threads public void cleanupAllFastThreadLocals(BiConsumer<Thread, Object> cleanedValueListener) { for (Thread thread : ThreadUtils.getAllThreads()) { cleanupAllFastThreadLocals(thread, cleanedValueListener); } }
3.68
graphhopper_GHResponse_hasErrors
/** * This method returns true if one of the paths has an error or if the response itself is * erroneous. */ public boolean hasErrors() { if (!errors.isEmpty()) return true; for (ResponsePath p : responsePaths) { if (p.hasErrors()) return true; } return false; }
3.68
hbase_StripeStoreFileManager_rowEquals
/** * Compare two keys for equality. */ private final boolean rowEquals(byte[] k1, byte[] k2) { return Bytes.equals(k1, 0, k1.length, k2, 0, k2.length); }
3.68
hbase_ZKUtil_nodeHasChildren
/** * Checks if the specified znode has any children. Sets no watches. Returns true if the node * exists and has children. Returns false if the node does not exist or if the node does not have * any children. Used during master initialization to determine if the master is a failed-over-to * master or the first master during initial cluster startup. If the directory for regionserver * ephemeral nodes is empty then this is a cluster startup, if not then it is not cluster startup. * @param zkw zk reference * @param znode path of node to check for children of * @return true if node has children, false if not or node does not exist * @throws KeeperException if unexpected zookeeper exception */ public static boolean nodeHasChildren(ZKWatcher zkw, String znode) throws KeeperException { try { return !zkw.getRecoverableZooKeeper().getChildren(znode, null).isEmpty(); } catch (KeeperException.NoNodeException ke) { LOG.debug(zkw.prefix("Unable to list children of znode " + znode + " because node does not exist (not an error)")); return false; } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e); zkw.keeperException(e); return false; } catch (InterruptedException e) { LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e); zkw.interruptedException(e); return false; } }
3.68
hbase_WhileMatchFilter_parseFrom
/** * Parse a serialized representation of {@link WhileMatchFilter} * @param pbBytes A pb serialized {@link WhileMatchFilter} instance * @return An instance of {@link WhileMatchFilter} made from <code>bytes</code> * @throws DeserializationException if an error occurred * @see #toByteArray */ public static WhileMatchFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.WhileMatchFilter proto; try { proto = FilterProtos.WhileMatchFilter.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } try { return new WhileMatchFilter(ProtobufUtil.toFilter(proto.getFilter())); } catch (IOException ioe) { throw new DeserializationException(ioe); } }
3.68
pulsar_BrokerInterceptor_consumerCreated
/** * Intercept after a consumer is created. * * @param cnx client Connection * @param consumer Consumer object * @param metadata A map of metadata */ default void consumerCreated(ServerCnx cnx, Consumer consumer, Map<String, String> metadata) { }
3.68
framework_TimeZoneUtil_toJSON
/** * Returns a JSON string of the specified {@code zoneId} and {@link Locale}, * which is used in * {@link com.google.gwt.i18n.client.TimeZone#createTimeZone(String)}. * * This method calculates the JSON string from {@code startYear} until * {@code startYear}, both inclusive. * * @param zoneId * the {@link ZoneId} to get the daylight transitions from * @param locale * the locale used to determine the short name of the time zone * @param startYear * the start year of DST transitions * @param endYear * the end year of DST transitions * * @return the encoded string * @since 8.11 */ public static String toJSON(ZoneId zoneId, Locale locale, int startYear, int endYear) { if (zoneId == null || locale == null) { return null; } ZoneRules rules = zoneId.getRules(); TimeZone timeZone = TimeZone.getTimeZone(zoneId); List<Long> transitionsList = new ArrayList<>(); TimeZoneInfo info = new TimeZoneInfo(); if (timeZone.useDaylightTime()) { for (int year = startYear; year <= endYear; year++) { ZonedDateTime i = LocalDateTime.of(year, 1, 1, 0, 0) .atZone(zoneId); while (true) { ZoneOffsetTransition t = rules .nextTransition(i.toInstant()); if (t == null) { break; } i = t.getInstant().atZone(zoneId); if (i.toLocalDate().getYear() != year) { break; } long epochHours = Duration .ofSeconds(t.getInstant().getEpochSecond()) .toHours(); long duration = Math.max(t.getDuration().toMinutes(), 0); transitionsList.add(epochHours); transitionsList.add(duration); } } } info.id = zoneId.getId(); info.transitions = transitionsList.stream().mapToLong(l -> l).toArray(); info.stdOffset = (int) Duration.ofMillis(timeZone.getRawOffset()) .toMinutes(); info.names = new String[] { timeZone.getDisplayName(false, TimeZone.SHORT, locale), timeZone.getDisplayName(false, TimeZone.LONG, locale), timeZone.getDisplayName(true, TimeZone.SHORT, locale), timeZone.getDisplayName(true, TimeZone.LONG, locale) }; return stringify(info); }
3.68
framework_GridConnector_updateCaption
/* * (non-Javadoc) * * @see * com.vaadin.client.HasComponentsConnector#updateCaption(com.vaadin.client * .ComponentConnector) */ @Override public void updateCaption(ComponentConnector connector) { // TODO Auto-generated method stub }
3.68
shardingsphere-elasticjob_JobNodeStorage_getJobNodeChildrenKeys
/** * Get job node children keys. * * @param node node * @return children keys */ public List<String> getJobNodeChildrenKeys(final String node) { return regCenter.getChildrenKeys(jobNodePath.getFullPath(node)); }
3.68
framework_RadioButtonGroup_setHtmlContentAllowed
/** * Sets whether html is allowed in the item captions. If set to true, the * captions are passed to the browser as html and the developer is * responsible for ensuring no harmful html is used. If set to false, the * content is passed to the browser as plain text. * * @param htmlContentAllowed * true if the captions are used as html, false if used as plain * text */ public void setHtmlContentAllowed(boolean htmlContentAllowed) { getState().htmlContentAllowed = htmlContentAllowed; }
3.68
hadoop_AllowAllImpersonationProvider_authorize
// Although this API was removed from the interface by HADOOP-17367, we need // to keep it here because TestDynamometerInfra uses an old hadoop binary. public void authorize(UserGroupInformation user, String remoteAddress) { // Do nothing }
3.68
framework_StringToEnumConverter_stringToEnum
/** * Converts the given string to the given enum type using the given locale. * <p> * Compatible with {@link #enumToString(Enum, Locale)} * * @param value * The string value to convert * @param enumType * The type of enum to create * @param locale * The locale to use for conversion. If null, the JVM default * locale will be used * @return The enum which matches the given string * @throws ConversionException * if the conversion fails */ public static <T extends Enum<T>> T stringToEnum(String value, Class<T> enumType, Locale locale) throws ConversionException { if (locale == null) { locale = Locale.getDefault(); } if (!enumType.isEnum()) { throw new ConversionException( enumType.getName() + " is not an enum type"); } // First test for the human-readable value since that's the more likely // input String upperCaseValue = value.toUpperCase(locale); T match = null; for (T e : EnumSet.allOf(enumType)) { String upperCase = enumToString(e, locale).toUpperCase(locale); if (upperCase.equals(upperCaseValue)) { if (match != null) { throw new ConversionException("Both " + match.name() + " and " + e.name() + " are matching the input string " + value); } match = e; } } if (match != null) { return match; } // Then fall back to using a strict match based on name() try { return Enum.valueOf(enumType, upperCaseValue); } catch (Exception ee) { throw new ConversionException(ee); } }
3.68
flink_DeclarativeSlotPoolService_onFailAllocation
/** * This method is called when an allocation fails. It can be overridden by subclasses. * * @param previouslyFulfilledRequirements previouslyFulfilledRequirements by the failed * allocation */ protected void onFailAllocation(ResourceCounter previouslyFulfilledRequirements) {}
3.68
flink_StreamTask_getTimerService
/** * Returns the {@link TimerService} responsible for telling the current processing time and * registering actual timers. */ @VisibleForTesting TimerService getTimerService() { return timerService; }
3.68
hbase_MasterObserver_preEnableTableAction
/** * Called prior to enabling a table. Called as part of enable table procedure and it is async to * the enable table RPC call. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void preEnableTableAction(final ObserverContext<MasterCoprocessorEnvironment> ctx, final TableName tableName) throws IOException { }
3.68
flink_TableFactoryUtil_isLegacyConnectorOptions
/** Checks whether the {@link CatalogTable} uses legacy connector sink options. */ public static boolean isLegacyConnectorOptions( @Nullable Catalog catalog, ReadableConfig configuration, boolean isStreamingMode, ObjectIdentifier objectIdentifier, CatalogTable catalogTable, boolean isTemporary) { // normalize option keys DescriptorProperties properties = new DescriptorProperties(true); properties.putProperties(catalogTable.getOptions()); if (properties.containsKey(ConnectorDescriptorValidator.CONNECTOR_TYPE)) { return true; } else { try { // try to create legacy table source using the options, // some legacy factories may use the 'type' key TableFactoryUtil.findAndCreateTableSink( catalog, objectIdentifier, catalogTable, configuration, isStreamingMode, isTemporary); // success, then we will use the legacy factories return true; } catch (Throwable ignore) { // fail, then we will use new factories return false; } } }
3.68
hbase_FSDataInputStreamWrapper_shouldUseHBaseChecksum
/** Returns Whether we are presently using HBase checksum. */ public boolean shouldUseHBaseChecksum() { return this.useHBaseChecksum; }
3.68
framework_AbstractSelect_removeContainerProperty
/** * Removes the property from all items. Removes a property with given id * from all the items in the container. * * This functionality is optional. If the function is unsupported, it always * returns false. * * @return True if the operation succeeded. * @see Container#removeContainerProperty(java.lang.Object) */ @Override public boolean removeContainerProperty(Object propertyId) throws UnsupportedOperationException { final boolean retval = items.removeContainerProperty(propertyId); if (retval && !(items instanceof Container.PropertySetChangeNotifier)) { firePropertySetChange(); } return retval; }
3.68
flink_DataSet_join
/** * Initiates a Join transformation. * * <p>A Join transformation joins the elements of two {@link DataSet DataSets} on key equality * and provides multiple ways to combine joining elements into one DataSet. * * <p>This method returns a {@link JoinOperatorSets} on which one of the {@code where} methods * can be called to define the join key of the first joining (i.e., this) DataSet. * * @param other The other DataSet with which this DataSet is joined. * @param strategy The strategy that should be used execute the join. If {@code null} is given, * then the optimizer will pick the join strategy. * @return A JoinOperatorSets to continue the definition of the Join transformation. * @see JoinOperatorSets * @see DataSet */ public <R> JoinOperatorSets<T, R> join(DataSet<R> other, JoinHint strategy) { return new JoinOperatorSets<>(this, other, strategy); }
3.68
hadoop_ZKSignerSecretProvider_pushToZK
/** * Pushes proposed data to ZooKeeper. If a different server pushes its data * first, it gives up. * @param newSecret The new secret to use * @param currentSecret The current secret * @param previousSecret The previous secret */ private synchronized void pushToZK(byte[] newSecret, byte[] currentSecret, byte[] previousSecret) { byte[] bytes = generateZKData(newSecret, currentSecret, previousSecret); try { client.setData().withVersion(zkVersion).forPath(path, bytes); } catch (KeeperException.BadVersionException bve) { LOG.debug("Unable to push to znode; another server already did it"); } catch (Exception ex) { LOG.error("An unexpected exception occurred pushing data to ZooKeeper", ex); } }
3.68
flink_JobVertex_getInvokableClassName
/** * Returns the name of the invokable class which represents the task of this vertex. * * @return The name of the invokable class, <code>null</code> if not set. */ public String getInvokableClassName() { return this.invokableClassName; }
3.68
hbase_AuthUtil_toGroupEntry
/** * Returns the group entry with the group prefix for a group principal. */ @InterfaceAudience.Private public static String toGroupEntry(String name) { return GROUP_PREFIX + name; }
3.68
flink_ExtractionUtils_isStructuredFieldDirectlyReadable
/** Checks whether a field is directly readable without a getter. */ public static boolean isStructuredFieldDirectlyReadable(Field field) { final int m = field.getModifiers(); // field is directly readable return Modifier.isPublic(m); }
3.68
framework_DefaultSQLGenerator_getStatementHelper
/** * Returns the statement helper for the generator. Override this to handle * platform specific data types. * * @see http://dev.vaadin.com/ticket/9148 * @return a new instance of the statement helper */ protected StatementHelper getStatementHelper() { if (statementHelperClass == null) { return new StatementHelper(); } try { return statementHelperClass.newInstance(); } catch (InstantiationException e) { throw new RuntimeException( "Unable to instantiate custom StatementHelper", e); } catch (IllegalAccessException e) { throw new RuntimeException( "Unable to instantiate custom StatementHelper", e); } }
3.68
flink_DataViewUtils_extractDataViews
/** Searches for data views in the data type of an accumulator and extracts them. */ public static List<DataViewSpec> extractDataViews(int aggIndex, DataType accumulatorDataType) { final LogicalType accumulatorType = accumulatorDataType.getLogicalType(); if (!accumulatorType.is(ROW) && !accumulatorType.is(STRUCTURED_TYPE)) { return Collections.emptyList(); } final List<String> fieldNames = getFieldNames(accumulatorType); final List<DataType> fieldDataTypes = accumulatorDataType.getChildren(); final List<DataViewSpec> specs = new ArrayList<>(); for (int fieldIndex = 0; fieldIndex < fieldDataTypes.size(); fieldIndex++) { final DataType fieldDataType = fieldDataTypes.get(fieldIndex); final LogicalType fieldType = fieldDataType.getLogicalType(); if (isDataView(fieldType, ListView.class)) { specs.add( new ListViewSpec( createStateId(aggIndex, fieldNames.get(fieldIndex)), fieldIndex, fieldDataType.getChildren().get(0))); } else if (isDataView(fieldType, MapView.class)) { specs.add( new MapViewSpec( createStateId(aggIndex, fieldNames.get(fieldIndex)), fieldIndex, fieldDataType.getChildren().get(0), false)); } if (fieldType.getChildren().stream() .anyMatch(c -> hasNested(c, t -> isDataView(t, DataView.class)))) { throw new TableException( "Data views are only supported in the first level of a composite accumulator type."); } } return specs; }
3.68
framework_AbstractInMemoryContainer_internalAddItemAt
/** * Add an item at a given (visible after filtering) item index, and perform * filtering. An event is fired if the filtered view changes. * * @param index * position where to add the item (visible/view index) * @param newItemId * @param item * new item to add * @param filter * true to perform filtering and send event after adding the * item, false to skip these operations for batch inserts - if * false, caller needs to make sure these operations are * performed at the end of the batch * @return item added or null if no item was added */ protected ITEMCLASS internalAddItemAt(int index, ITEMIDTYPE newItemId, ITEMCLASS item, boolean filter) { if (index < 0 || index > size()) { return null; } else if (index == 0) { // add before any item, visible or not return internalAddItemAfter(null, newItemId, item, filter); } else { // if index==size(), adds immediately after last visible item return internalAddItemAfter(getIdByIndex(index - 1), newItemId, item, filter); } }
3.68
flink_RetryingExecutor_execute
/** * Execute the given action according to the retry policy. * * <p>NOTE: the action must be idempotent because multiple instances of it can be executed * concurrently (if the policy allows retries). */ <T> void execute(RetryPolicy retryPolicy, RetriableAction<T> action) { LOG.debug("execute with retryPolicy: {}", retryPolicy); RetriableActionAttempt<T> task = RetriableActionAttempt.initialize( action, retryPolicy, blockingExecutor, attemptsPerTaskHistogram, totalAttemptsPerTaskHistogram, timer); blockingExecutor.submit(task); }
3.68
hbase_TableInputFormat_getConf
/** * Returns the current configuration. * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @Override public Configuration getConf() { return conf; }
3.68
hbase_Subprocedure_waitForReachedGlobalBarrier
/** * Wait for the reached global barrier notification. Package visibility for testing */ void waitForReachedGlobalBarrier() throws ForeignException, InterruptedException { Procedure.waitForLatch(inGlobalBarrier, monitor, wakeFrequency, barrierName + ":remote acquired"); }
3.68
hbase_OutputSink_finishWriterThreads
/** * Wait for writer threads to dump all info to the sink * @return true when there is no error */ boolean finishWriterThreads() throws IOException { LOG.debug("Waiting for split writer threads to finish"); boolean progressFailed = false; for (WriterThread t : writerThreads) { t.finish(); } for (WriterThread t : writerThreads) { if (!progressFailed && reporter != null && !reporter.progress()) { progressFailed = true; } try { t.join(); } catch (InterruptedException ie) { IOException iie = new InterruptedIOException(); iie.initCause(ie); throw iie; } } controller.checkForErrors(); final String msg = this.writerThreads.size() + " split writer threads finished"; LOG.info(msg); updateStatusWithMsg(msg); return (!progressFailed); }
3.68
framework_VFilterSelect_selectFirstItem
/** * @deprecated use {@link SuggestionPopup#selectFirstItem()} instead. */ @Deprecated public void selectFirstItem() { debug("VFS.SM: selectFirstItem()"); MenuItem firstItem = getItems().get(0); selectItem(firstItem); }
3.68
framework_AbstractConnector_unregisterRpc
/** * Unregisters an implementation for a server to client RPC interface. * * @param <T> * The type of the RPC interface that is being unregistered * @param rpcInterface * RPC interface * @param implementation * implementation to unregister */ protected <T extends ClientRpc> void unregisterRpc(Class<T> rpcInterface, T implementation) { String rpcInterfaceId = rpcInterface.getName().replaceAll("\\$", "."); if (null != rpcImplementations && null != rpcImplementations.get(rpcInterfaceId)) { rpcImplementations.get(rpcInterfaceId).remove(implementation); } }
3.68
hbase_MasterRpcServices_getServices
/** Returns list of blocking services and their security info classes that this server supports */ @Override protected List<BlockingServiceAndInterface> getServices() { List<BlockingServiceAndInterface> bssi = new ArrayList<>(5); bssi.add(new BlockingServiceAndInterface(MasterService.newReflectiveBlockingService(this), MasterService.BlockingInterface.class)); bssi.add( new BlockingServiceAndInterface(RegionServerStatusService.newReflectiveBlockingService(this), RegionServerStatusService.BlockingInterface.class)); bssi.add(new BlockingServiceAndInterface(LockService.newReflectiveBlockingService(this), LockService.BlockingInterface.class)); bssi.add(new BlockingServiceAndInterface(HbckService.newReflectiveBlockingService(this), HbckService.BlockingInterface.class)); bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this), ClientMetaService.BlockingInterface.class)); bssi.add(new BlockingServiceAndInterface(AdminService.newReflectiveBlockingService(this), AdminService.BlockingInterface.class)); return bssi; }
3.68
hbase_StripeStoreFileManager_isInvalid
/** * Checks whether the key is invalid (e.g. from an L0 file, or non-stripe-compacted files). */ private static final boolean isInvalid(byte[] key) { // No need to use Arrays.equals because INVALID_KEY is null return key == INVALID_KEY; }
3.68
hadoop_HsController_jobPage
/* * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#jobPage() */ @Override protected Class<? extends View> jobPage() { return HsJobPage.class; }
3.68
druid_PagerUtils_getLimit
/** * @param sql * @param dbType * @return if not exists limit, return -1; */ public static int getLimit(String sql, DbType dbType) { List<SQLStatement> stmtList = SQLUtils.parseStatements(sql, dbType); if (stmtList.size() != 1) { return -1; } SQLStatement stmt = stmtList.get(0); if (stmt instanceof SQLSelectStatement) { SQLSelectStatement selectStmt = (SQLSelectStatement) stmt; SQLSelectQuery query = selectStmt.getSelect().getQuery(); if (query instanceof SQLSelectQueryBlock) { if (query instanceof MySqlSelectQueryBlock) { SQLLimit limit = ((MySqlSelectQueryBlock) query).getLimit(); if (limit == null) { return -1; } SQLExpr rowCountExpr = limit.getRowCount(); if (rowCountExpr instanceof SQLNumericLiteralExpr) { int rowCount = ((SQLNumericLiteralExpr) rowCountExpr).getNumber().intValue(); return rowCount; } return Integer.MAX_VALUE; } if (query instanceof OdpsSelectQueryBlock) { SQLLimit limit = ((OdpsSelectQueryBlock) query).getLimit(); SQLExpr rowCountExpr = limit != null ? limit.getRowCount() : null; if (rowCountExpr instanceof SQLNumericLiteralExpr) { int rowCount = ((SQLNumericLiteralExpr) rowCountExpr).getNumber().intValue(); return rowCount; } return Integer.MAX_VALUE; } return -1; } } return -1; }
3.68
framework_VaadinService_loadSession
/** * Called when the VaadinSession should be loaded from the underlying HTTP * session. * * @since 7.6 * @param wrappedSession * the underlying HTTP session * @return the VaadinSession in the HTTP session or null if not found */ protected VaadinSession loadSession(WrappedSession wrappedSession) { assert VaadinSession.hasLock(this, wrappedSession); VaadinSession vaadinSession = readFromHttpSession(wrappedSession); if (vaadinSession == null) { return null; } vaadinSession.refreshTransients(wrappedSession, this); return vaadinSession; }
3.68
flink_StreamExecutionEnvironment_setDefaultSavepointDirectory
/** * Sets the default savepoint directory, where savepoints will be written to if no is explicitly * provided when triggered. * * @return This StreamExecutionEnvironment itself, to allow chaining of function calls. * @see #getDefaultSavepointDirectory() */ @PublicEvolving public StreamExecutionEnvironment setDefaultSavepointDirectory(Path savepointDirectory) { this.defaultSavepointDirectory = Preconditions.checkNotNull(savepointDirectory); return this; }
3.68
hadoop_TFile_isSorted
/** * Is the TFile sorted? * * @return true if TFile is sorted. */ public boolean isSorted() { return tfileMeta.isSorted(); }
3.68
hadoop_AltKerberosAuthenticationHandler_getType
/** * Returns the authentication type of the authentication handler, * 'alt-kerberos'. * * @return the authentication type of the authentication handler, * 'alt-kerberos'. */ @Override public String getType() { return TYPE; }
3.68
hbase_RegionNormalizerManager_planSkipped
/** * Call-back for the case where plan couldn't be executed due to constraint violation, such as * namespace quota. * @param type type of plan that was skipped. */ public void planSkipped(NormalizationPlan.PlanType type) { // TODO: this appears to be used only for testing. if (worker != null) { worker.planSkipped(type); } }
3.68
hbase_TableSchemaModel_addColumnFamily
/** * Add a column family to the table descriptor * @param family the column family model */ public void addColumnFamily(ColumnSchemaModel family) { columns.add(family); }
3.68
flink_IncrementalKeyedStateHandle_replaceHandle
/** Replace the StreamStateHandle with the registry returned one. */ public void replaceHandle(StreamStateHandle registryReturned) { checkNotNull(registryReturned); this.handle = registryReturned; }
3.68
flink_TypeInferenceUtil_generateSignature
/** Generates a signature of the given {@link FunctionDefinition}. */ public static String generateSignature( TypeInference typeInference, String name, FunctionDefinition definition) { if (typeInference.getTypedArguments().isPresent()) { return formatNamedOrTypedArguments(name, typeInference); } return typeInference.getInputTypeStrategy().getExpectedSignatures(definition).stream() .map(s -> formatSignature(name, s)) .collect(Collectors.joining("\n")); }
3.68
hbase_ZkSplitLogWorkerCoordination_nodeDataChanged
/** * Override handler from {@link ZKListener} */ @Override public void nodeDataChanged(String path) { // there will be a self generated dataChanged event every time attemptToOwnTask() // heartbeats the task znode by upping its version synchronized (grabTaskLock) { if (workerInGrabTask) { // currentTask can change String taskpath = currentTask; if (taskpath != null && taskpath.equals(path)) { getDataSetWatchAsync(); } } } }
3.68
hbase_Scan_addColumn
/** * Get the column from the specified family with the specified qualifier. * <p> * Overrides previous calls to addFamily for this family. * @param family family name * @param qualifier column qualifier */ public Scan addColumn(byte[] family, byte[] qualifier) { NavigableSet<byte[]> set = familyMap.get(family); if (set == null) { set = new TreeSet<>(Bytes.BYTES_COMPARATOR); familyMap.put(family, set); } if (qualifier == null) { qualifier = HConstants.EMPTY_BYTE_ARRAY; } set.add(qualifier); return this; }
3.68
flink_Costs_addHeuristicDiskCost
/** * Adds the heuristic costs for disk to the current heuristic disk costs for this Costs object. * * @param cost The heuristic disk cost to add. */ public void addHeuristicDiskCost(double cost) { if (cost <= 0) { throw new IllegalArgumentException("Heuristic costs must be positive."); } this.heuristicDiskCost += cost; // check for overflow if (this.heuristicDiskCost < 0) { this.heuristicDiskCost = Double.MAX_VALUE; } }
3.68
framework_VOptionGroupBase_getColumns
/** * For internal use only. May be removed or replaced in the future. * * @return "cols" specified in uidl, 0 if not specified */ public int getColumns() { return cols; }
3.68
hmily_HmilyExecuteTemplate_execute
/** * Execute. * * @param sql SQL * @param parameters parameters * @param connectionInformation connection information */ public void execute(final String sql, final List<Object> parameters, final ConnectionInformation connectionInformation) { if (check()) { return; } HmilyStatement statement; try { statement = HmilySqlParserEngineFactory.newInstance().parser(sql, DatabaseTypes.INSTANCE.getDatabaseType()); log.debug("TAC-parse-sql ::: statement: {}", statement); } catch (final Exception ex) { return; } String resourceId = ResourceIdUtils.INSTANCE.getResourceId(connectionInformation.getUrl()); HmilySQLComputeEngine sqlComputeEngine = HmilySQLComputeEngineFactory.newInstance(statement); HmilyTransactionContext transactionContext = HmilyContextHolder.get(); int lockRetryInterval = transactionContext.getLockRetryInterval(); int lockRetryTimes = transactionContext.getLockRetryTimes(); // select SQL if (statement instanceof HmilySelectStatement) { if (IsolationLevelEnum.READ_COMMITTED.getValue() == transactionContext.getIsolationLevel()) { // read committed level need check locks executeSelect(sql, parameters, connectionInformation, sqlComputeEngine, resourceId, lockRetryInterval, lockRetryTimes); } return; } // update delete insert SQL new HmilyLockRetryPolicy(lockRetryInterval, lockRetryTimes).execute(() -> { HmilyDataSnapshot snapshot = sqlComputeEngine.execute(sql, parameters, connectionInformation.getConnection(), resourceId); log.debug("TAC-compute-sql ::: {}", snapshot); HmilyUndoContext undoContext = buildUndoContext(HmilyContextHolder.get(), snapshot, resourceId); HmilyLockManager.INSTANCE.tryAcquireLocks(undoContext.getHmilyLocks()); log.debug("TAC-try-lock ::: {}", undoContext.getHmilyLocks()); HmilyUndoContextCacheManager.INSTANCE.set(undoContext); }); }
3.68
hbase_QuotaObserverChore_addNamespaceQuotaTable
/** * Adds a table with a namespace quota. */ public void addNamespaceQuotaTable(TableName tn) { tablesWithNamespaceQuotas.add(tn); }
3.68
flink_AbstractFileSource_setSplitAssigner
/** * Configures the {@link FileSplitAssigner} for the source. The File Split Assigner * determines which parallel reader instance gets which {@link FileSourceSplit}, and in * which order these splits are assigned. */ public SELF setSplitAssigner(FileSplitAssigner.Provider splitAssigner) { this.splitAssigner = checkNotNull(splitAssigner); return self(); }
3.68
hbase_ByteArrayOutputStream_size
/** Returns The current size of the buffer. */ public int size() { return this.pos; }
3.68
flink_Plan_addDataSink
/** * Adds a data sink to the set of sinks in this program. * * @param sink The data sink to add. */ public void addDataSink(GenericDataSinkBase<?> sink) { checkNotNull(sink, "The data sink must not be null."); if (!this.sinks.contains(sink)) { this.sinks.add(sink); } }
3.68
hmily_ConfigEnv_putBean
/** * Register an object that needs to interpret configuration information . * * @param parent parent. */ public void putBean(final Config parent) { if (parent != null && StringUtils.isNotBlank(parent.prefix())) { if (CONFIGS.containsKey(parent.getClass())) { return; } CONFIGS.put(parent.getClass(), parent); } }
3.68
cron-utils_RebootCron_equivalent
/** * Provides means to compare if two cron expressions are equivalent. * Assumes same cron definition. * * @param cron - any cron instance, never null * @return boolean - true if equivalent; false otherwise. */ public boolean equivalent(final Cron cron) { return asString().equals(cron.asString()); }
3.68
hbase_HRegion_getSplitPolicy
/** Returns split policy for this region. */ RegionSplitPolicy getSplitPolicy() { return this.splitPolicy; }
3.68
dubbo_AbstractDirectory_getConnectivityCheckFuture
/** * for ut only */ @Deprecated public ScheduledFuture<?> getConnectivityCheckFuture() { return connectivityCheckFuture; }
3.68
flink_CommittableCollector_copy
/** * Returns a new committable collector that deep copies all internals. * * @return {@link CommittableCollector} */ public CommittableCollector<CommT> copy() { return new CommittableCollector<>( checkpointCommittables.entrySet().stream() .map(e -> Tuple2.of(e.getKey(), e.getValue().copy())) .collect(Collectors.toMap((t) -> t.f0, (t) -> t.f1)), subtaskId, numberOfSubtasks, metricGroup); }
3.68
hadoop_ImageLoader_getLoader
/** * Find an image loader capable of interpreting the specified * layout version number. If none, return null; * * @param version fsimage layout version number to be processed * @return ImageLoader that can interpret specified version, or null */ static public ImageLoader getLoader(int version) { // Easy to add more image processors as they are written ImageLoader[] loaders = { new ImageLoaderCurrent() }; for (ImageLoader l : loaders) { if (l.canLoadVersion(version)) return l; } return null; }
3.68
dubbo_DubboConfigBeanInitializer_prepareDubboConfigBeans
/** * Initializes there Dubbo's Config Beans before @Reference bean autowiring */ private void prepareDubboConfigBeans() { logger.info("loading dubbo config beans ..."); // Make sure all these config beans are initialed and registered to ConfigManager // load application config beans loadConfigBeansOfType(ApplicationConfig.class, configManager); loadConfigBeansOfType(RegistryConfig.class, configManager); loadConfigBeansOfType(ProtocolConfig.class, configManager); loadConfigBeansOfType(MonitorConfig.class, configManager); loadConfigBeansOfType(ConfigCenterBean.class, configManager); loadConfigBeansOfType(MetadataReportConfig.class, configManager); loadConfigBeansOfType(MetricsConfig.class, configManager); loadConfigBeansOfType(TracingConfig.class, configManager); loadConfigBeansOfType(SslConfig.class, configManager); // load module config beans loadConfigBeansOfType(ModuleConfig.class, moduleModel.getConfigManager()); loadConfigBeansOfType(ProviderConfig.class, moduleModel.getConfigManager()); loadConfigBeansOfType(ConsumerConfig.class, moduleModel.getConfigManager()); // load ConfigCenterBean from properties, fix https://github.com/apache/dubbo/issues/9207 List<ConfigCenterBean> configCenterBeans = configManager.loadConfigsOfTypeFromProps(ConfigCenterBean.class); for (ConfigCenterBean configCenterBean : configCenterBeans) { String beanName = configCenterBean.getId() != null ? configCenterBean.getId() : "configCenterBean"; beanFactory.initializeBean(configCenterBean, beanName); } logger.info("dubbo config beans are loaded."); }
3.68
hbase_KeyOnlyFilter_areSerializedFieldsEqual
/** * Returns true if and only if the fields of the filter that are serialized are equal to the * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) { return true; } if (!(o instanceof KeyOnlyFilter)) { return false; } KeyOnlyFilter other = (KeyOnlyFilter) o; return this.lenAsVal == other.lenAsVal; }
3.68
hbase_HRegionServer_createRegionLoad
/** * @param r Region to get RegionLoad for. * @param regionLoadBldr the RegionLoad.Builder, can be null * @param regionSpecifier the RegionSpecifier.Builder, can be null * @return RegionLoad instance. */ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, RegionSpecifier.Builder regionSpecifier) throws IOException { byte[] name = r.getRegionInfo().getRegionName(); String regionEncodedName = r.getRegionInfo().getEncodedName(); int stores = 0; int storefiles = 0; int storeRefCount = 0; int maxCompactedStoreFileRefCount = 0; long storeUncompressedSize = 0L; long storefileSize = 0L; long storefileIndexSize = 0L; long rootLevelIndexSize = 0L; long totalStaticIndexSize = 0L; long totalStaticBloomSize = 0L; long totalCompactingKVs = 0L; long currentCompactedKVs = 0L; long totalRegionSize = 0L; List<HStore> storeList = r.getStores(); stores += storeList.size(); for (HStore store : storeList) { storefiles += store.getStorefilesCount(); int currentStoreRefCount = store.getStoreRefCount(); storeRefCount += currentStoreRefCount; int currentMaxCompactedStoreFileRefCount = store.getMaxCompactedStoreFileRefCount(); maxCompactedStoreFileRefCount = Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount); storeUncompressedSize += store.getStoreSizeUncompressed(); storefileSize += store.getStorefilesSize(); totalRegionSize += store.getHFilesSize(); // TODO: storefileIndexSizeKB is same with rootLevelIndexSizeKB? storefileIndexSize += store.getStorefilesRootLevelIndexSize(); CompactionProgress progress = store.getCompactionProgress(); if (progress != null) { totalCompactingKVs += progress.getTotalCompactingKVs(); currentCompactedKVs += progress.currentCompactedKVs; } rootLevelIndexSize += store.getStorefilesRootLevelIndexSize(); totalStaticIndexSize += store.getTotalStaticIndexSize(); totalStaticBloomSize += store.getTotalStaticBloomSize(); } int memstoreSizeMB = roundSize(r.getMemStoreDataSize(), unitMB); int storeUncompressedSizeMB = roundSize(storeUncompressedSize, unitMB); int storefileSizeMB = roundSize(storefileSize, unitMB); int storefileIndexSizeKB = roundSize(storefileIndexSize, unitKB); int rootLevelIndexSizeKB = roundSize(rootLevelIndexSize, unitKB); int totalStaticIndexSizeKB = roundSize(totalStaticIndexSize, unitKB); int totalStaticBloomSizeKB = roundSize(totalStaticBloomSize, unitKB); int regionSizeMB = roundSize(totalRegionSize, unitMB); final MutableFloat currentRegionCachedRatio = new MutableFloat(0.0f); computeIfPersistentBucketCache(bc -> { if (bc.getRegionCachedInfo().containsKey(regionEncodedName)) { currentRegionCachedRatio.setValue(regionSizeMB == 0 ? 0.0f : (float) roundSize(bc.getRegionCachedInfo().get(regionEncodedName), unitMB) / regionSizeMB); } }); HDFSBlocksDistribution hdfsBd = r.getHDFSBlocksDistribution(); float dataLocality = hdfsBd.getBlockLocalityIndex(serverName.getHostname()); float dataLocalityForSsd = hdfsBd.getBlockLocalityIndexForSsd(serverName.getHostname()); long blocksTotalWeight = hdfsBd.getUniqueBlocksTotalWeight(); long blocksLocalWeight = hdfsBd.getBlocksLocalWeight(serverName.getHostname()); long blocksLocalWithSsdWeight = hdfsBd.getBlocksLocalWithSsdWeight(serverName.getHostname()); if (regionLoadBldr == null) { regionLoadBldr = RegionLoad.newBuilder(); } if (regionSpecifier == null) { regionSpecifier = RegionSpecifier.newBuilder(); } regionSpecifier.setType(RegionSpecifierType.REGION_NAME); regionSpecifier.setValue(UnsafeByteOperations.unsafeWrap(name)); regionLoadBldr.setRegionSpecifier(regionSpecifier.build()).setStores(stores) .setStorefiles(storefiles).setStoreRefCount(storeRefCount) .setMaxCompactedStoreFileRefCount(maxCompactedStoreFileRefCount) .setStoreUncompressedSizeMB(storeUncompressedSizeMB).setStorefileSizeMB(storefileSizeMB) .setMemStoreSizeMB(memstoreSizeMB).setStorefileIndexSizeKB(storefileIndexSizeKB) .setRootIndexSizeKB(rootLevelIndexSizeKB).setTotalStaticIndexSizeKB(totalStaticIndexSizeKB) .setTotalStaticBloomSizeKB(totalStaticBloomSizeKB) .setReadRequestsCount(r.getReadRequestsCount()).setCpRequestsCount(r.getCpRequestsCount()) .setFilteredReadRequestsCount(r.getFilteredReadRequestsCount()) .setWriteRequestsCount(r.getWriteRequestsCount()).setTotalCompactingKVs(totalCompactingKVs) .setCurrentCompactedKVs(currentCompactedKVs).setDataLocality(dataLocality) .setDataLocalityForSsd(dataLocalityForSsd).setBlocksLocalWeight(blocksLocalWeight) .setBlocksLocalWithSsdWeight(blocksLocalWithSsdWeight).setBlocksTotalWeight(blocksTotalWeight) .setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad(r.getCompactionState())) .setLastMajorCompactionTs(r.getOldestHfileTs(true)).setRegionSizeMB(regionSizeMB) .setCurrentRegionCachedRatio(currentRegionCachedRatio.floatValue()); r.setCompleteSequenceId(regionLoadBldr); return regionLoadBldr.build(); }
3.68
framework_CustomFieldConnector_getContentWidget
/** * Returns the widget (if any) of the content of the container. * * @return widget of the only/first connector of the container, null if no * content or if there is no widget for the connector */ protected Widget getContentWidget() { ComponentConnector content = getContent(); if (null != content) { return content.getWidget(); } else { return null; } }
3.68
flink_ServerConnection_close
/** * Close the channel with a cause. * * @param cause The cause to close the channel with. * @return Channel close future */ private CompletableFuture<Void> close(final Throwable cause) { synchronized (lock) { if (running) { running = false; channel.close() .addListener( finished -> { stats.reportInactiveConnection(); for (long requestId : pendingRequests.keySet()) { EstablishedConnection.TimestampedCompletableFuture<RESP> pending = pendingRequests.remove(requestId); if (pending != null && pending.completeExceptionally(cause)) { stats.reportFailedRequest(); } } // when finishing, if netty successfully closes the channel, // then the provided exception is used // as the reason for the closing. If there was something // wrong // at the netty side, then that exception // is prioritized over the provided one. if (finished.isSuccess()) { closeFuture.completeExceptionally(cause); } else { LOG.warn( "Something went wrong when trying to close connection due to : ", cause); closeFuture.completeExceptionally(finished.cause()); } }); } } return closeFuture; }
3.68
flink_HiveParserBaseSemanticAnalyzer_unescapeIdentifier
/** * Remove the encapsulating "`" pair from the identifier. We allow users to use "`" to escape * identifier for table names, column names and aliases, in case that coincide with Hive * language keywords. */ public static String unescapeIdentifier(String val) { if (val == null) { return null; } if (val.charAt(0) == '`' && val.charAt(val.length() - 1) == '`') { val = val.substring(1, val.length() - 1); } return val; }
3.68
hadoop_RpcProgramPortmap_nullOp
/** * This procedure does no work. By convention, procedure zero of any protocol * takes no parameters and returns no results. */ private XDR nullOp(int xid, XDR in, XDR out) { return PortmapResponse.voidReply(out, xid); }
3.68
hbase_StorageClusterStatusModel_getTotalCompactingKVs
/** Returns The total number of compacting key-values */ @XmlAttribute public long getTotalCompactingKVs() { return totalCompactingKVs; }
3.68
flink_AdaptiveScheduler_transitionToState
/** * Transition the scheduler to another state. This method guards against state transitions while * there is already a transition ongoing. This effectively means that you can not call this * method from a State constructor or State#onLeave. * * @param targetState State to transition to * @param <T> Type of the target state * @return A target state instance */ @VisibleForTesting <T extends State> T transitionToState(StateFactory<T> targetState) { Preconditions.checkState( !isTransitioningState, "State transitions must not be triggered while another state transition is in progress."); Preconditions.checkState( state.getClass() != targetState.getStateClass(), "Attempted to transition into the very state the scheduler is already in."); componentMainThreadExecutor.assertRunningInMainThread(); try { isTransitioningState = true; LOG.debug( "Transition from state {} to {}.", state.getClass().getSimpleName(), targetState.getStateClass().getSimpleName()); final JobStatus previousJobStatus = state.getJobStatus(); state.onLeave(targetState.getStateClass()); T targetStateInstance = targetState.getState(); state = targetStateInstance; final JobStatus newJobStatus = state.getJobStatus(); if (previousJobStatus != newJobStatus) { final long timestamp = System.currentTimeMillis(); jobStatusListeners.forEach( listener -> listener.jobStatusChanges( jobInformation.getJobID(), newJobStatus, timestamp)); } return targetStateInstance; } finally { isTransitioningState = false; } }
3.68
hbase_MunkresAssignment_preliminaries
/** * Corresponds to the "preliminaries" step of the original algorithm. Guarantees that the matrix * is an equivalent non-negative matrix with at least one zero in each row. */ private void preliminaries() { for (int r = 0; r < rows; r++) { // Find the minimum cost of each row. float min = Float.POSITIVE_INFINITY; for (int c = 0; c < cols; c++) { min = Math.min(min, cost[r][c]); } // Subtract that minimum cost from each element in the row. for (int c = 0; c < cols; c++) { cost[r][c] -= min; // If the element is now zero and there are no zeroes in the same row // or column which are already starred, then star this one. There // must be at least one zero because of subtracting the min cost. if (cost[r][c] == 0 && !rowsCovered[r] && !colsCovered[c]) { mask[r][c] = STAR; // Cover this row and column so that no other zeroes in them can be // starred. rowsCovered[r] = true; colsCovered[c] = true; } } } // Clear the covered rows and columns. Arrays.fill(rowsCovered, false); Arrays.fill(colsCovered, false); }
3.68
hbase_ScanQueryMatcher_getStartKey
/** Returns the start key */ public Cell getStartKey() { return startKey; }
3.68
hadoop_HostsFileReader_getHostDetails
/** * Retrieve an atomic view of the included and excluded hosts. * * @return the included and excluded hosts */ public HostDetails getHostDetails() { return current.get(); }
3.68