name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_StringUtils_writeString
/** * Writes a String to the given output. The written string can be read with {@link * #readString(DataInputView)}. * * @param str The string to write * @param out The output to write to * @throws IOException Thrown, if the writing or the serialization fails. */ public static void writeString(@Nonnull String str, DataOutputView out) throws IOException { checkNotNull(str); StringValue.writeString(str, out); }
3.68
hbase_HBaseConfiguration_merge
/** * Merge two configurations. * @param destConf the configuration that will be overwritten with items from the srcConf * @param srcConf the source configuration **/ public static void merge(Configuration destConf, Configuration srcConf) { for (Map.Entry<String, String> e : srcConf) { destConf.set(e.getKey(), e.getValue()); } }
3.68
framework_VFilterSelect_updatePopupPositionOnScroll
/** * Make the popup follow the position of the ComboBox when the page is * scrolled. */ private void updatePopupPositionOnScroll() { if (!scrollPending) { AnimationScheduler.get() .requestAnimationFrame(new AnimationCallback() { public void execute(double timestamp) { if (isShowing()) { leftPosition = getDesiredLeftPosition(); topPosition = getDesiredTopPosition(); setPopupPosition(leftPosition, topPosition); } scrollPending = false; } }); scrollPending = true; } }
3.68
flink_StreamExecutionEnvironment_registerJobListener
/** * Register a {@link JobListener} in this environment. The {@link JobListener} will be notified * on specific job status changed. */ @PublicEvolving public void registerJobListener(JobListener jobListener) { checkNotNull(jobListener, "JobListener cannot be null"); jobListeners.add(jobListener); }
3.68
flink_FutureUtils_composeAfterwards
/** * Run the given asynchronous action after the completion of the given future. The given future * can be completed normally or exceptionally. In case of an exceptional completion, the * asynchronous action's exception will be added to the initial exception. * * @param future to wait for its completion * @param composedAction asynchronous action which is triggered after the future's completion * @return Future which is completed after the asynchronous action has completed. This future * can contain an exception if an error occurred in the given future or asynchronous action. */ public static CompletableFuture<Void> composeAfterwards( CompletableFuture<?> future, Supplier<CompletableFuture<?>> composedAction) { final CompletableFuture<Void> resultFuture = new CompletableFuture<>(); future.whenComplete( (Object outerIgnored, Throwable outerThrowable) -> { final CompletableFuture<?> composedActionFuture = composedAction.get(); composedActionFuture.whenComplete( (Object innerIgnored, Throwable innerThrowable) -> { if (innerThrowable != null) { resultFuture.completeExceptionally( ExceptionUtils.firstOrSuppressed( innerThrowable, outerThrowable)); } else if (outerThrowable != null) { resultFuture.completeExceptionally(outerThrowable); } else { resultFuture.complete(null); } }); }); return resultFuture; }
3.68
framework_HierarchicalDataProvider_fetch
/** * Fetches data from this HierarchicalDataProvider using given * {@code query}. Only the immediate children of * {@link HierarchicalQuery#getParent()} will be returned. * * @param query * given query to request data with * @return a stream of data objects resulting from the query * * @throws IllegalArgumentException * if the query is not of type HierarchicalQuery */ @Override public default Stream<T> fetch(Query<T, F> query) { if (query instanceof HierarchicalQuery<?, ?>) { return fetchChildren((HierarchicalQuery<T, F>) query); } throw new IllegalArgumentException( "Hierarchical data provider doesn't support non-hierarchical queries"); }
3.68
hbase_RegionSplitter_split2
/** * Divide 2 numbers in half (for split algorithm) * @param a number #1 * @param b number #2 * @return the midpoint of the 2 numbers */ public BigInteger split2(BigInteger a, BigInteger b) { return a.add(b).divide(BigInteger.valueOf(2)).abs(); }
3.68
hudi_ClusteringCommand_runClustering
/** * Run clustering table service. * <p> * Example: * > connect --path {path to hudi table} * > clustering scheduleAndExecute --sparkMaster local --sparkMemory 2g */ @ShellMethod(key = "clustering scheduleAndExecute", value = "Run Clustering. Make a cluster plan first and execute that plan immediately") public String runClustering( @ShellOption(value = "--sparkMaster", defaultValue = SparkUtil.DEFAULT_SPARK_MASTER, help = "Spark master") final String master, @ShellOption(value = "--sparkMemory", help = "Spark executor memory", defaultValue = "4g") final String sparkMemory, @ShellOption(value = "--parallelism", help = "Parallelism for hoodie clustering", defaultValue = "1") final String parallelism, @ShellOption(value = "--retry", help = "Number of retries", defaultValue = "1") final String retry, @ShellOption(value = "--propsFilePath", help = "path to properties file on localfs or dfs with configurations for " + "hoodie client for compacting", defaultValue = "") final String propsFilePath, @ShellOption(value = "--hoodieConfigs", help = "Any configuration that can be set in the properties file can be " + "passed here in the form of an array", defaultValue = "") final String[] configs) throws Exception { HoodieTableMetaClient client = HoodieCLI.getTableMetaClient(); boolean initialized = HoodieCLI.initConf(); HoodieCLI.initFS(initialized); String sparkPropertiesPath = Utils.getDefaultPropertiesFile(JavaConverters.mapAsScalaMapConverter(System.getenv()).asScala()); SparkLauncher sparkLauncher = SparkUtil.initLauncher(sparkPropertiesPath); sparkLauncher.addAppArgs(SparkCommand.CLUSTERING_SCHEDULE_AND_EXECUTE.toString(), master, sparkMemory, client.getBasePath(), client.getTableConfig().getTableName(), parallelism, retry, propsFilePath); UtilHelpers.validateAndAddProperties(configs, sparkLauncher); Process process = sparkLauncher.launch(); InputStreamConsumer.captureOutput(process); int exitCode = process.waitFor(); if (exitCode != 0) { return "Failed to run clustering for scheduleAndExecute."; } return "Succeeded to run clustering for scheduleAndExecute"; }
3.68
querydsl_NumberExpression_divide
/** * Create a {@code this / right} expression * * <p>Get the result of the operation this / right</p> * * @param right * @return this / right */ public <N extends Number & Comparable<?>> NumberExpression<T> divide(N right) { @SuppressWarnings("unchecked") Class<T> type = (Class< T>) getDivisionType(getType(), right.getClass()); return Expressions.numberOperation(type, Ops.DIV, mixin, ConstantImpl.create(right)); }
3.68
hbase_QuotaUtil_disableTableIfNotDisabled
/** * Method to disable a table, if not already disabled. This method suppresses * {@link TableNotEnabledException}, if thrown while disabling the table. * @param conn connection to re-use * @param tableName table name which has moved into space quota violation */ public static void disableTableIfNotDisabled(Connection conn, TableName tableName) throws IOException { try { conn.getAdmin().disableTable(tableName); } catch (TableNotEnabledException | TableNotFoundException e) { // ignore } }
3.68
hibernate-validator_ValidationXmlParser_run
/** * Runs the given privileged action, using a privileged block if required. * <p> * <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary * privileged actions within HV's protection domain. */ @IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17") private static <T> T run(PrivilegedAction<T> action) { return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run(); }
3.68
hbase_RegionStateNode_offline
/** * Put region into OFFLINE mode (set state and clear location). * @return Last recorded server deploy */ public ServerName offline() { setState(State.OFFLINE); return setRegionLocation(null); }
3.68
flink_TableConfig_setPlannerConfig
/** * Sets the configuration of Planner for Table API and SQL queries. Changing the configuration * has no effect after the first query has been defined. */ public void setPlannerConfig(PlannerConfig plannerConfig) { this.plannerConfig = Preconditions.checkNotNull(plannerConfig); }
3.68
framework_StringToBooleanConverter_getTrueString
/** * Gets the locale-depended string representation for true. Default is * locale-independent value provided by {@link #getTrueString()} * * @since 7.5.4 * @param locale * to be used * @return the string representation for true */ protected String getTrueString(Locale locale) { return getTrueString(); }
3.68
hbase_ProcedureExecutor_submitProcedure
// ========================================================================== // Submit/Abort Procedure // ========================================================================== /** * Add a new root-procedure to the executor. * @param proc the new procedure to execute. * @return the procedure id, that can be used to monitor the operation */ public long submitProcedure(Procedure<TEnvironment> proc) { return submitProcedure(proc, null); }
3.68
flink_StreamOperatorWrapper_close
/** Close the operator. */ public void close() throws Exception { closed = true; wrapped.close(); }
3.68
querydsl_StringExpression_trim
/** * Create a {@code this.trim()} expression * * <p>Create a copy of the string, with leading and trailing whitespace * omitted.</p> * * @return this.trim() * @see java.lang.String#trim() */ public StringExpression trim() { if (trim == null) { trim = Expressions.stringOperation(Ops.TRIM, mixin); } return trim; }
3.68
hadoop_FederationMembershipStateStoreInputValidator_checkTimestamp
/** * Validate if the timestamp is positive or not. * * @param timestamp the timestamp to be verified * @throws FederationStateStoreInvalidInputException if the timestamp is * invalid */ private static void checkTimestamp(long timestamp) throws FederationStateStoreInvalidInputException { if (timestamp < 0) { String message = "Invalid timestamp information." + " Please try again by specifying valid Timestamp Information."; LOG.warn(message); throw new FederationStateStoreInvalidInputException(message); } }
3.68
rocketmq-connect_WorkerSinkTask_pauseAll
//pause all consumer topic queue private void pauseAll() { consumer.pause(messageQueues); }
3.68
hadoop_NamenodeStatusReport_statsValid
/** * If the statistics are valid. * * @return If the statistics are valid. */ public boolean statsValid() { return this.statsValid; }
3.68
framework_AbsoluteLayoutRelativeSizeContent_createFullOnFixed
/** * Creates an {@link AbsoluteLayout} of fixed size that contains a * fixed-sized {@link AbsoluteLayout}. * * @return the created layout */ private Component createFullOnFixed() { AbsoluteLayout absoluteLayout = new AbsoluteLayout(); absoluteLayout.setWidth(200, Unit.PIXELS); absoluteLayout.setHeight(200, Unit.PIXELS); absoluteLayout.setId("fullonfixed-outer"); absoluteLayout.addStyleName("green"); absoluteLayout.setCaption("yellow area expected"); AbsoluteLayout absoluteLayout2 = new AbsoluteLayout(); absoluteLayout2.setSizeFull(); absoluteLayout2.setId("fullonfixed-inner"); absoluteLayout2.addStyleName("yellow"); absoluteLayout.addComponent(absoluteLayout2, "top:50px;left:100px;"); return absoluteLayout; }
3.68
hadoop_BaseService_getServiceDependencies
/** * Returns the service dependencies of this service. The service will be * instantiated only if all the service dependencies are already initialized. * <p> * This method returns an empty array (size 0) * * @return an empty array (size 0). */ @Override public Class[] getServiceDependencies() { return new Class[0]; }
3.68
framework_CellReference_set
/** * Sets the identifying information for this cell. * <p> * The difference between {@link #columnIndexDOM} and {@link #columnIndex} * comes from hidden columns. * * @param columnIndexDOM * the index of the column in the DOM * @param columnIndex * the index of the column * @param column * the column object */ public void set(int columnIndexDOM, int columnIndex, Grid.Column<?, T> column) { this.columnIndexDOM = columnIndexDOM; this.columnIndex = columnIndex; this.column = column; }
3.68
flink_CsvBulkWriter_forPojo
/** * Builds a writer based on a POJO class definition. * * @param pojoClass The class of the POJO. * @param stream The output stream. * @param <T> The type of the elements accepted by this writer. */ static <T> CsvBulkWriter<T, T, Void> forPojo(Class<T> pojoClass, FSDataOutputStream stream) { final Converter<T, T, Void> converter = (value, context) -> value; final CsvMapper csvMapper = JacksonMapperFactory.createCsvMapper(); final CsvSchema schema = csvMapper.schemaFor(pojoClass).withoutQuoteChar(); return new CsvBulkWriter<>(csvMapper, schema, converter, null, stream); }
3.68
zxing_PDF417Writer_bitMatrixFromEncoder
/** * Takes encoder, accounts for width/height, and retrieves bit matrix */ private static BitMatrix bitMatrixFromEncoder(PDF417 encoder, String contents, int errorCorrectionLevel, int width, int height, int margin, boolean autoECI) throws WriterException { encoder.generateBarcodeLogic(contents, errorCorrectionLevel, autoECI); int aspectRatio = 4; byte[][] originalScale = encoder.getBarcodeMatrix().getScaledMatrix(1, aspectRatio); boolean rotated = false; if ((height > width) != (originalScale[0].length < originalScale.length)) { originalScale = rotateArray(originalScale); rotated = true; } int scaleX = width / originalScale[0].length; int scaleY = height / originalScale.length; int scale = Math.min(scaleX, scaleY); if (scale > 1) { byte[][] scaledMatrix = encoder.getBarcodeMatrix().getScaledMatrix(scale, scale * aspectRatio); if (rotated) { scaledMatrix = rotateArray(scaledMatrix); } return bitMatrixFromBitArray(scaledMatrix, margin); } return bitMatrixFromBitArray(originalScale, margin); }
3.68
flink_WindowSavepointReader_process
/** * Reads window state generated without any preaggregation such as {@code WindowedStream#apply} * and {@code WindowedStream#process}. * * @param uid The uid of the operator. * @param readerFunction The window reader function. * @param keyType The key type of the window. * @param stateType The type of records stored in state. * @param outputType The output type of the reader function. * @param <K> The type of the key. * @param <T> The type of the records stored in state. * @param <OUT> The output type of the reader function. * @return A {@code DataStream} of objects read from keyed state. * @throws IOException If the savepoint does not contain the specified uid. */ public <K, T, OUT> DataStream<OUT> process( String uid, WindowReaderFunction<T, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<T> stateType, TypeInformation<OUT> outputType) throws IOException { WindowReaderOperator<?, K, T, W, OUT> operator = WindowReaderOperator.process(readerFunction, keyType, windowSerializer, stateType); return readWindowOperator(uid, outputType, operator); }
3.68
framework_VTree_getNextSibling
/** * Gets the next sibling in the tree * * @param node * The node to get the sibling for * @return The sibling node or null if the node is the last sibling */ private TreeNode getNextSibling(TreeNode node) { TreeNode parent = node.getParentNode(); List<TreeNode> children; if (parent == null) { children = getRootNodes(); } else { children = parent.getChildren(); } int idx = children.indexOf(node); if (idx < children.size() - 1) { return children.get(idx + 1); } return null; }
3.68
flink_RemoteInputChannel_notifyBufferAvailable
/** * The unannounced credit is increased by the given amount and might notify increased credit to * the producer. */ @Override public void notifyBufferAvailable(int numAvailableBuffers) throws IOException { if (numAvailableBuffers > 0 && unannouncedCredit.getAndAdd(numAvailableBuffers) == 0) { notifyCreditAvailable(); } }
3.68
flink_OrcLegacyTimestampColumnVector_createFromConstant
// creates a Hive ColumnVector of constant timestamp value public static ColumnVector createFromConstant(int batchSize, Object value) { LongColumnVector res = new LongColumnVector(batchSize); if (value == null) { res.noNulls = false; res.isNull[0] = true; res.isRepeating = true; } else { Timestamp timestamp = value instanceof LocalDateTime ? Timestamp.valueOf((LocalDateTime) value) : (Timestamp) value; res.fill(fromTimestamp(timestamp)); res.isNull[0] = false; } return res; }
3.68
framework_AbstractColorPicker_getSwatchesVisibility
/** * Gets the visibility of the Swatches Tab. * * @since 7.5.0 * @return visibility of the swatches tab */ public boolean getSwatchesVisibility() { return swatchesVisible; }
3.68
flink_AbstractKeyedStateBackend_getOrCreateKeyedState
/** @see KeyedStateBackend */ @Override @SuppressWarnings("unchecked") public <N, S extends State, V> S getOrCreateKeyedState( final TypeSerializer<N> namespaceSerializer, StateDescriptor<S, V> stateDescriptor) throws Exception { checkNotNull(namespaceSerializer, "Namespace serializer"); checkNotNull( keySerializer, "State key serializer has not been configured in the config. " + "This operation cannot use partitioned state."); InternalKvState<K, ?, ?> kvState = keyValueStatesByName.get(stateDescriptor.getName()); if (kvState == null) { if (!stateDescriptor.isSerializerInitialized()) { stateDescriptor.initializeSerializerUnlessSet(executionConfig); } kvState = LatencyTrackingStateFactory.createStateAndWrapWithLatencyTrackingIfEnabled( TtlStateFactory.createStateAndWrapWithTtlIfEnabled( namespaceSerializer, stateDescriptor, this, ttlTimeProvider), stateDescriptor, latencyTrackingStateConfig); keyValueStatesByName.put(stateDescriptor.getName(), kvState); publishQueryableStateIfEnabled(stateDescriptor, kvState); } return (S) kvState; }
3.68
graphhopper_MiniPerfTest_getMin
/** * @return minimum time of every call, in ms */ public double getMin() { return min / NS_PER_MS; }
3.68
hadoop_BufferData_setDone
/** * Indicates that this block is no longer of use and can be reclaimed. */ public synchronized void setDone() { if (this.checksum != 0) { if (getChecksum(this.buffer) != this.checksum) { throw new IllegalStateException("checksum changed after setReady()"); } } this.state = State.DONE; this.action = null; }
3.68
framework_VTabsheet_setFocusedTab
/** * Sets the tab that has the focus currently. * * @param focusedTab * the focused tab or {@code null} if no tab should be * focused anymore */ private void setFocusedTab(Tab focusedTab) { this.focusedTab = focusedTab; }
3.68
flink_BinarySegmentUtils_readBinary
/** * Get binary, if len less than 8, will be include in variablePartOffsetAndLen. * * <p>Note: Need to consider the ByteOrder. * * @param baseOffset base offset of composite binary format. * @param fieldOffset absolute start offset of 'variablePartOffsetAndLen'. * @param variablePartOffsetAndLen a long value, real data or offset and len. */ public static byte[] readBinary( MemorySegment[] segments, int baseOffset, int fieldOffset, long variablePartOffsetAndLen) { long mark = variablePartOffsetAndLen & HIGHEST_FIRST_BIT; if (mark == 0) { final int subOffset = (int) (variablePartOffsetAndLen >> 32); final int len = (int) variablePartOffsetAndLen; return BinarySegmentUtils.copyToBytes(segments, baseOffset + subOffset, len); } else { int len = (int) ((variablePartOffsetAndLen & HIGHEST_SECOND_TO_EIGHTH_BIT) >>> 56); if (BinarySegmentUtils.LITTLE_ENDIAN) { return BinarySegmentUtils.copyToBytes(segments, fieldOffset, len); } else { // fieldOffset + 1 to skip header. return BinarySegmentUtils.copyToBytes(segments, fieldOffset + 1, len); } } }
3.68
framework_FocusableFlowPanel_addKeyPressHandler
/* * (non-Javadoc) * * @see * com.google.gwt.event.dom.client.HasKeyPressHandlers#addKeyPressHandler * (com.google.gwt.event.dom.client.KeyPressHandler) */ @Override public HandlerRegistration addKeyPressHandler(KeyPressHandler handler) { return addDomHandler(handler, KeyPressEvent.getType()); }
3.68
hadoop_TextView_echoWithoutEscapeHtml
/** * Print strings as is (no newline, a la php echo). * @param args the strings to print */ public void echoWithoutEscapeHtml(Object... args) { PrintWriter out = writer(); for (Object s : args) { out.print(s); } }
3.68
hbase_SegmentFactory_createImmutableSegmentByCompaction
// create new flat immutable segment from compacting old immutable segments // for compaction public ImmutableSegment createImmutableSegmentByCompaction(final Configuration conf, final CellComparator comparator, MemStoreSegmentsIterator iterator, int numOfCells, CompactingMemStore.IndexType idxType, MemStoreCompactionStrategy.Action action) throws IOException { MemStoreLAB memStoreLAB = MemStoreLAB.newInstance(conf); return createImmutableSegment(conf, comparator, iterator, memStoreLAB, numOfCells, action, idxType); }
3.68
flink_ProjectOperator_projectTuple8
/** * Projects a {@link Tuple} {@link DataSet} to the previously selected fields. * * @return The projected DataSet. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3, T4, T5, T6, T7> ProjectOperator<T, Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> projectTuple8() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType()); TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> tType = new TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>(fTypes); return new ProjectOperator<T, Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>( this.ds, this.fieldIndexes, tType); }
3.68
hadoop_ExitUtil_getFirstExitException
/** * @return the first {@code ExitException} thrown, null if none thrown yet. */ public static ExitException getFirstExitException() { return FIRST_EXIT_EXCEPTION.get(); }
3.68
flink_SplitFetcher_pauseOrResumeSplits
/** * Called when some splits of this source instance progressed too much beyond the global * watermark of all subtasks. If the split reader implements {@link SplitReader}, it will relay * the information asynchronously through the split fetcher thread. * * @param splitsToPause the splits to pause * @param splitsToResume the splits to resume */ public void pauseOrResumeSplits( Collection<SplitT> splitsToPause, Collection<SplitT> splitsToResume) { lock.lock(); try { enqueueTaskUnsafe( new PauseOrResumeSplitsTask<>( splitReader, splitsToPause, splitsToResume, allowUnalignedSourceSplits)); wakeUpUnsafe(true); } finally { lock.unlock(); } }
3.68
framework_UIEvents_getUI
/** * Get the {@link UI} instance that received the poll request. * * @return the {@link UI} that received the poll request. Never * <code>null</code>. */ public UI getUI() { /* * This cast is safe to make, since this class' constructor * constrains the source to be a UI instance. */ return (UI) getComponent(); }
3.68
flink_FlinkContainersSettings_jobManagerHostname
/** * Sets the job manager hostname and returns a reference to this Builder enabling method * chaining. * * @param jobManagerHostname The job manager hostname to set. * @return A reference to this Builder. */ public Builder jobManagerHostname(String jobManagerHostname) { return setConfigOption(JobManagerOptions.ADDRESS, jobManagerHostname); }
3.68
framework_ComponentRendererConnector_createConnectorHierarchyChangeHandler
/** * Adds a listener for grid hierarchy changes to find detached connectors * previously handled by this renderer in order to detach from DOM their * widgets before {@link AbstractComponentConnector#onUnregister()} is * invoked otherwise an error message is logged. */ private void createConnectorHierarchyChangeHandler() { if (handlerRegistration == null) { handlerRegistration = getGridConnector() .addConnectorHierarchyChangeHandler(event -> { Iterator<String> iterator = knownConnectors.iterator(); while (iterator.hasNext()) { ComponentConnector connector = (ComponentConnector) ConnectorMap .get(getConnection()) .getConnector(iterator.next()); if (connector != null && connector.getParent() == null) { connector.getWidget().removeFromParent(); iterator.remove(); } } }); } }
3.68
hadoop_StartupProgress_beginPhase
/** * Begins execution of the specified phase. * * @param phase Phase to begin */ public void beginPhase(Phase phase) { if (!isComplete()) { phases.get(phase).beginTime = monotonicNow(); } LOG.debug("Beginning of the phase: {}", phase); }
3.68
dubbo_SerializingExecutor_execute
/** * Runs the given runnable strictly after all Runnables that were submitted * before it, and using the {@code executor} passed to the constructor. . */ @Override public void execute(Runnable r) { runQueue.add(r); schedule(r); }
3.68
hbase_TaskMonitor_getTasks
/** * Produces a list containing copies of the current state of all non-expired MonitoredTasks * handled by this TaskMonitor. * @param filter type of wanted tasks * @return A filtered list of MonitoredTasks. */ public synchronized List<MonitoredTask> getTasks(String filter) { purgeExpiredTasks(); TaskFilter taskFilter = createTaskFilter(filter); ArrayList<MonitoredTask> results = Lists.newArrayListWithCapacity(tasks.size() + rpcTasks.size()); processTasks(tasks, taskFilter, results); processTasks(rpcTasks, taskFilter, results); return results; }
3.68
flink_TieredStorageConfiguration_getMinReserveDiskSpaceFraction
/** * Minimum reserved disk space fraction in disk tier. * * @return the fraction. */ public float getMinReserveDiskSpaceFraction() { return minReserveDiskSpaceFraction; }
3.68
hadoop_RemoteSASKeyGeneratorImpl_makeRemoteRequest
/** * Helper method to make a remote request. * * @param urls - Urls to use for the remote request * @param path - hadoop.auth token for the remote request * @param queryParams - queryParams to be used. * @return RemoteSASKeyGenerationResponse */ private RemoteSASKeyGenerationResponse makeRemoteRequest(String[] urls, String path, List<NameValuePair> queryParams) throws SASKeyGenerationException { try { String responseBody = remoteCallHelper .makeRemoteRequest(urls, path, queryParams, HttpGet.METHOD_NAME); return RESPONSE_READER.readValue(responseBody); } catch (WasbRemoteCallException remoteCallEx) { throw new SASKeyGenerationException("Encountered RemoteCallException" + " while retrieving SAS key from remote service", remoteCallEx); } catch (JsonParseException jsonParserEx) { throw new SASKeyGenerationException("Encountered JsonParseException " + "while parsing the response from remote" + " service into RemoteSASKeyGenerationResponse object", jsonParserEx); } catch (JsonMappingException jsonMappingEx) { throw new SASKeyGenerationException("Encountered JsonMappingException" + " while mapping the response from remote service into " + "RemoteSASKeyGenerationResponse object", jsonMappingEx); } catch (IOException ioEx) { throw new SASKeyGenerationException("Encountered IOException while " + "accessing remote service to retrieve SAS Key", ioEx); } }
3.68
flink_AbstractBlobCache_setBlobServerAddress
/** * Sets the address of the {@link BlobServer}. * * @param blobServerAddress address of the {@link BlobServer}. */ public void setBlobServerAddress(InetSocketAddress blobServerAddress) { serverAddress = checkNotNull(blobServerAddress); }
3.68
morf_SelectStatement_useImplicitJoinOrder
/** * If supported by the dialect, hints to the database that joins should be applied in the order * they are written in the SQL statement. * * <p>This is supported to greater or lesser extends on different SQL dialects. For instance, * MySQL has no means to force ordering on anything except inner joins, but we do our best. As * a result, this is not a panacea and may need to be combined with * {@link #useIndex(TableReference, String)} to achieve a consistent effect across * platforms.</p> * * <p>In general, as with all query plan modification, <strong>do not use this unless you know * exactly what you are doing</strong>.</p> * * <p>As for all query plan modification (see also {@link #optimiseForRowCount(int)} * and {@link #useIndex(TableReference, String)}): where supported on the target database, these directives * applied in the SQL in the order they are called on {@link SelectStatement}. This usually * affects their precedence or relative importance, depending on the platform.</p> * * @return a new select statement with the change applied. */ public SelectStatement useImplicitJoinOrder() { return copyOnWriteOrMutate( (SelectStatementBuilder b) -> b.useImplicitJoinOrder(), () -> this.hints.add(new UseImplicitJoinOrder()) ); }
3.68
morf_Criterion_like
/** * Helper method to create a new "LIKE" expression. * * <blockquote><pre> * Criterion.like(new Field("agreementnumber"), "A%");</pre></blockquote> * * <p>Note the escape character is set to '\' (backslash) by the underlying system.</p> * * @param field the field to evaluate in the expression (the left hand side of the expression) * @param value the value to evaluate in the expression (the right hand side) * @return a new Criterion object */ public static Criterion like(AliasedField field, Object value) { return new Criterion(Operator.LIKE, field, value); }
3.68
pulsar_SubscriptionStatsImpl_add
// if the stats are added for the 1st time, we will need to make a copy of these stats and add it to the current // stats public SubscriptionStatsImpl add(SubscriptionStatsImpl stats) { Objects.requireNonNull(stats); this.msgRateOut += stats.msgRateOut; this.msgThroughputOut += stats.msgThroughputOut; this.bytesOutCounter += stats.bytesOutCounter; this.msgOutCounter += stats.msgOutCounter; this.msgRateRedeliver += stats.msgRateRedeliver; this.messageAckRate += stats.messageAckRate; this.chunkedMessageRate += stats.chunkedMessageRate; this.msgBacklog += stats.msgBacklog; this.backlogSize += stats.backlogSize; this.msgBacklogNoDelayed += stats.msgBacklogNoDelayed; this.msgDelayed += stats.msgDelayed; this.unackedMessages += stats.unackedMessages; this.type = stats.type; this.msgRateExpired += stats.msgRateExpired; this.totalMsgExpired += stats.totalMsgExpired; this.isReplicated |= stats.isReplicated; this.isDurable |= stats.isDurable; if (this.consumers.size() != stats.consumers.size()) { for (int i = 0; i < stats.consumers.size(); i++) { ConsumerStatsImpl consumerStats = new ConsumerStatsImpl(); this.consumers.add(consumerStats.add(stats.consumers.get(i))); } } else { for (int i = 0; i < stats.consumers.size(); i++) { this.consumers.get(i).add(stats.consumers.get(i)); } } this.allowOutOfOrderDelivery |= stats.allowOutOfOrderDelivery; this.consumersAfterMarkDeletePosition.putAll(stats.consumersAfterMarkDeletePosition); this.nonContiguousDeletedMessagesRanges += stats.nonContiguousDeletedMessagesRanges; this.nonContiguousDeletedMessagesRangesSerializedSize += stats.nonContiguousDeletedMessagesRangesSerializedSize; if (this.earliestMsgPublishTimeInBacklog != 0 && stats.earliestMsgPublishTimeInBacklog != 0) { this.earliestMsgPublishTimeInBacklog = Math.min( this.earliestMsgPublishTimeInBacklog, stats.earliestMsgPublishTimeInBacklog ); } else { this.earliestMsgPublishTimeInBacklog = Math.max( this.earliestMsgPublishTimeInBacklog, stats.earliestMsgPublishTimeInBacklog ); } this.delayedMessageIndexSizeInBytes += stats.delayedMessageIndexSizeInBytes; this.subscriptionProperties.putAll(stats.subscriptionProperties); this.filterProcessedMsgCount += stats.filterProcessedMsgCount; this.filterAcceptedMsgCount += stats.filterAcceptedMsgCount; this.filterRejectedMsgCount += stats.filterRejectedMsgCount; this.filterRescheduledMsgCount += stats.filterRescheduledMsgCount; stats.bucketDelayedIndexStats.forEach((k, v) -> { TopicMetricBean topicMetricBean = this.bucketDelayedIndexStats.computeIfAbsent(k, __ -> new TopicMetricBean()); topicMetricBean.name = v.name; topicMetricBean.labelsAndValues = v.labelsAndValues; topicMetricBean.value += v.value; }); return this; }
3.68
hadoop_DomainColumn_getColumnQualifier
/** * @return the column name value */ private String getColumnQualifier() { return columnQualifier; }
3.68
hbase_MasterRpcServices_checkMasterProcedureExecutor
/** * @throws ServiceException If no MasterProcedureExecutor */ private void checkMasterProcedureExecutor() throws ServiceException { if (this.server.getMasterProcedureExecutor() == null) { throw new ServiceException("Master's ProcedureExecutor not initialized; retry later"); } }
3.68
flink_GatewayRetriever_getNow
/** * Returns the currently retrieved gateway if there is such an object. Otherwise it returns an * empty optional. * * @return Optional object to retrieve */ default Optional<T> getNow() { CompletableFuture<T> leaderFuture = getFuture(); if (leaderFuture != null) { if (leaderFuture.isCompletedExceptionally() || leaderFuture.isCancelled()) { return Optional.empty(); } else if (leaderFuture.isDone()) { try { return Optional.of(leaderFuture.get()); } catch (Exception e) { // this should never happen throw new FlinkRuntimeException( "Unexpected error while accessing the retrieved gateway.", e); } } else { return Optional.empty(); } } else { return Optional.empty(); } }
3.68
flink_SharedBufferAccessor_releaseNode
/** * Decreases the reference counter for the given entry so that it can be removed once the * reference counter reaches 0. * * @param node id of the entry * @param version dewey number of the (potential) edge that locked the given node * @throws Exception Thrown if the system cannot access the state. */ public void releaseNode(final NodeId node, final DeweyNumber version) throws Exception { // the stack used to detect all nodes that needs to be released. Stack<NodeId> nodesToExamine = new Stack<>(); Stack<DeweyNumber> versionsToExamine = new Stack<>(); nodesToExamine.push(node); versionsToExamine.push(version); while (!nodesToExamine.isEmpty()) { NodeId curNode = nodesToExamine.pop(); Lockable<SharedBufferNode> curBufferNode = sharedBuffer.getEntry(curNode); if (curBufferNode == null) { break; } DeweyNumber currentVersion = versionsToExamine.pop(); List<Lockable<SharedBufferEdge>> edges = curBufferNode.getElement().getEdges(); Iterator<Lockable<SharedBufferEdge>> edgesIterator = edges.iterator(); while (edgesIterator.hasNext()) { Lockable<SharedBufferEdge> sharedBufferEdge = edgesIterator.next(); SharedBufferEdge edge = sharedBufferEdge.getElement(); if (currentVersion.isCompatibleWith(edge.getDeweyNumber())) { if (sharedBufferEdge.release()) { edgesIterator.remove(); NodeId targetId = edge.getTarget(); if (targetId != null) { nodesToExamine.push(targetId); versionsToExamine.push(edge.getDeweyNumber()); } } } } if (curBufferNode.release()) { // first release the current node sharedBuffer.removeEntry(curNode); releaseEvent(curNode.getEventId()); } else { sharedBuffer.upsertEntry(curNode, curBufferNode); } } }
3.68
querydsl_AbstractCollQuery_bind
/** * Bind the given collection to an already existing query source * * @param <A> type of expression * @param entity Path for the source * @param col content of the source * @return current object */ public <A> Q bind(Path<A> entity, Iterable<? extends A> col) { iterables.put(entity, col); return queryMixin.getSelf(); }
3.68
hbase_HRegionServer_getRegions
/** * Gets the online regions of the specified table. This method looks at the in-memory * onlineRegions. It does not go to <code>hbase:meta</code>. Only returns <em>online</em> regions. * If a region on this table has been closed during a disable, etc., it will not be included in * the returned list. So, the returned list may not necessarily be ALL regions in this table, its * all the ONLINE regions in the table. * @param tableName table to limit the scope of the query * @return Online regions from <code>tableName</code> */ @Override public List<HRegion> getRegions(TableName tableName) { List<HRegion> tableRegions = new ArrayList<>(); synchronized (this.onlineRegions) { for (HRegion region : this.onlineRegions.values()) { RegionInfo regionInfo = region.getRegionInfo(); if (regionInfo.getTable().equals(tableName)) { tableRegions.add(region); } } } return tableRegions; }
3.68
hbase_MetricsHeapMemoryManager_setCurBlockCacheSizeGauge
/** * Set the current blockcache size used gauge * @param blockCacheSize the current memory usage in blockcache, in bytes. */ public void setCurBlockCacheSizeGauge(final long blockCacheSize) { source.setCurBlockCacheSizeGauge(blockCacheSize); }
3.68
flink_Costs_addHeuristicCpuCost
/** * Adds the given heuristic CPU cost to the current heuristic CPU cost for this Costs object. * * @param cost The heuristic CPU cost to add. */ public void addHeuristicCpuCost(double cost) { if (cost <= 0) { throw new IllegalArgumentException("Heuristic costs must be positive."); } this.heuristicCpuCost += cost; // check for overflow if (this.heuristicCpuCost < 0) { this.heuristicCpuCost = Double.MAX_VALUE; } }
3.68
framework_VDragAndDropManager_executeWhenReady
/** * Method to execute commands when all existing dd related tasks are * completed (some may require server visit). * <p> * Using this method may be handy if criterion that uses lazy initialization * are used. Check * <p> * TODO Optimization: consider if we actually only need to keep the last * command in queue here. * * @param command * the command to execute */ public void executeWhenReady(Command command) { if (isBusy()) { defer(command); } else { command.execute(); } }
3.68
morf_OracleDialect_getSqlForNow
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForNow(org.alfasoftware.morf.sql.element.Function) */ @Override protected String getSqlForNow(Function function) { return "SYSTIMESTAMP AT TIME ZONE 'UTC'"; }
3.68
flink_FlinkDatabaseMetaData_storesMixedCaseIdentifiers
/** Flink sql is mixed case as sensitive. */ @Override public boolean storesMixedCaseIdentifiers() throws SQLException { return true; }
3.68
flink_KubernetesEntrypointUtils_loadConfiguration
/** * For non-HA cluster, {@link JobManagerOptions#ADDRESS} has be set to Kubernetes service name * on client side. See {@link KubernetesClusterDescriptor#deployClusterInternal}. So the * TaskManager will use service address to contact with JobManager. For HA cluster, {@link * JobManagerOptions#ADDRESS} will be set to the pod ip address. The TaskManager use Zookeeper * or other high-availability service to find the address of JobManager. * * @return Updated configuration */ static Configuration loadConfiguration(Configuration dynamicParameters) { final String configDir = System.getenv(ConfigConstants.ENV_FLINK_CONF_DIR); Preconditions.checkNotNull( configDir, "Flink configuration directory (%s) in environment should not be null!", ConfigConstants.ENV_FLINK_CONF_DIR); final Configuration configuration = GlobalConfiguration.loadConfiguration(configDir, dynamicParameters); if (KubernetesUtils.isHostNetwork(configuration)) { configuration.setString(RestOptions.BIND_PORT, "0"); configuration.setInteger(JobManagerOptions.PORT, 0); configuration.setString(BlobServerOptions.PORT, "0"); configuration.setString(HighAvailabilityOptions.HA_JOB_MANAGER_PORT_RANGE, "0"); configuration.setString(TaskManagerOptions.RPC_PORT, "0"); } if (HighAvailabilityMode.isHighAvailabilityModeActivated(configuration)) { final String ipAddress = System.getenv().get(Constants.ENV_FLINK_POD_IP_ADDRESS); Preconditions.checkState( ipAddress != null, "JobManager ip address environment variable %s not set", Constants.ENV_FLINK_POD_IP_ADDRESS); configuration.setString(JobManagerOptions.ADDRESS, ipAddress); configuration.setString(RestOptions.ADDRESS, ipAddress); } return configuration; }
3.68
querydsl_PathBuilder_getBoolean
/** * Create a new Boolean typed path * * @param propertyName property name * @return property path */ public BooleanPath getBoolean(String propertyName) { validate(propertyName, Boolean.class); return super.createBoolean(propertyName); }
3.68
pulsar_Commands_peekAndCopyMessageMetadata
/** * Peek the message metadata from the buffer and return a deep copy of the metadata. * * If you want to hold multiple {@link MessageMetadata} instances from multiple buffers, you must call this method * rather than {@link Commands#peekMessageMetadata(ByteBuf, String, long)}, which returns a thread local reference, * see {@link Commands#LOCAL_MESSAGE_METADATA}. */ public static MessageMetadata peekAndCopyMessageMetadata( ByteBuf metadataAndPayload, String subscription, long consumerId) { final MessageMetadata localMetadata = peekMessageMetadata(metadataAndPayload, subscription, consumerId); if (localMetadata == null) { return null; } final MessageMetadata metadata = new MessageMetadata(); metadata.copyFrom(localMetadata); return metadata; }
3.68
flink_UnorderedStreamElementQueue_emitCompleted
/** * Pops one completed elements into the given output. Because an input element may produce * an arbitrary number of output elements, there is no correlation between the size of the * collection and the popped elements. * * @return the number of popped input elements. */ int emitCompleted(TimestampedCollector<OUT> output) { final StreamElementQueueEntry<OUT> completedEntry = completedElements.poll(); if (completedEntry == null) { return 0; } completedEntry.emitResult(output); return 1; }
3.68
open-banking-gateway_PathHeadersBodyMapperTemplate_forExecution
/** * Converts context object into object that can be used for ASPSP API call. * @param context Context to convert * @return Object that can be used with {@code Xs2aAdapter} to perform ASPSP API calls */ public ValidatedPathHeadersBody<P, H, B> forExecution(C context) { return new ValidatedPathHeadersBody<>( toPath.map(context), toHeaders.map(context), toBody.map(toValidatableBody.map(context)) ); }
3.68
flink_RestServerEndpoint_shutDownInternal
/** * Stops this REST server endpoint. * * @return Future which is completed once the shut down has been finished. */ protected CompletableFuture<Void> shutDownInternal() { synchronized (lock) { CompletableFuture<?> channelFuture = new CompletableFuture<>(); if (serverChannel != null) { serverChannel .close() .addListener( finished -> { if (finished.isSuccess()) { channelFuture.complete(null); } else { channelFuture.completeExceptionally(finished.cause()); } }); serverChannel = null; } final CompletableFuture<Void> channelTerminationFuture = new CompletableFuture<>(); channelFuture.thenRun( () -> { CompletableFuture<?> groupFuture = new CompletableFuture<>(); CompletableFuture<?> childGroupFuture = new CompletableFuture<>(); final Time gracePeriod = Time.seconds(10L); if (bootstrap != null) { final ServerBootstrapConfig config = bootstrap.config(); final EventLoopGroup group = config.group(); if (group != null) { group.shutdownGracefully( 0L, gracePeriod.toMilliseconds(), TimeUnit.MILLISECONDS) .addListener( finished -> { if (finished.isSuccess()) { groupFuture.complete(null); } else { groupFuture.completeExceptionally( finished.cause()); } }); } else { groupFuture.complete(null); } final EventLoopGroup childGroup = config.childGroup(); if (childGroup != null) { childGroup .shutdownGracefully( 0L, gracePeriod.toMilliseconds(), TimeUnit.MILLISECONDS) .addListener( finished -> { if (finished.isSuccess()) { childGroupFuture.complete(null); } else { childGroupFuture.completeExceptionally( finished.cause()); } }); } else { childGroupFuture.complete(null); } bootstrap = null; } else { // complete the group futures since there is nothing to stop groupFuture.complete(null); childGroupFuture.complete(null); } CompletableFuture<Void> combinedFuture = FutureUtils.completeAll( Arrays.asList(groupFuture, childGroupFuture)); combinedFuture.whenComplete( (Void ignored, Throwable throwable) -> { if (throwable != null) { channelTerminationFuture.completeExceptionally(throwable); } else { channelTerminationFuture.complete(null); } }); }); return channelTerminationFuture; } }
3.68
hbase_RequestConverter_buildGetServerInfoRequest
/** * Create a new GetServerInfoRequest * @return a GetServerInfoRequest */ public static GetServerInfoRequest buildGetServerInfoRequest() { return GetServerInfoRequest.getDefaultInstance(); }
3.68
hbase_HBaseTestingUtility_getOtherRegionServer
/** * Find any other region server which is different from the one identified by parameter * @return another region server */ public HRegionServer getOtherRegionServer(HRegionServer rs) { for (JVMClusterUtil.RegionServerThread rst : getMiniHBaseCluster().getRegionServerThreads()) { if (!(rst.getRegionServer() == rs)) { return rst.getRegionServer(); } } return null; }
3.68
hbase_CellCodec_write
/** * Write int length followed by array bytes. */ private void write(final byte[] bytes, final int offset, final int length) throws IOException { // TODO add BB backed os check and do for write. Pass Cell this.out.write(Bytes.toBytes(length)); this.out.write(bytes, offset, length); }
3.68
hadoop_WriteOperationHelper_select
/** * Execute an S3 Select operation. * On a failure, the request is only logged at debug to avoid the * select exception being printed. * * @param source source for selection * @param request Select request to issue. * @param action the action for use in exception creation * @return response * @throws IOException failure */ @Retries.RetryTranslated public SelectEventStreamPublisher select( final Path source, final SelectObjectContentRequest request, final String action) throws IOException { // no setting of span here as the select binding is (statically) created // without any span. String bucketName = request.bucket(); Preconditions.checkArgument(bucket.equals(bucketName), "wrong bucket: %s", bucketName); if (LOG.isDebugEnabled()) { LOG.debug("Initiating select call {} {}", source, request.expression()); LOG.debug(SelectBinding.toString(request)); } return invoker.retry( action, source.toString(), true, withinAuditSpan(getAuditSpan(), () -> { try (DurationInfo ignored = new DurationInfo(LOG, "S3 Select operation")) { try { return SelectObjectContentHelper.select( writeOperationHelperCallbacks, source, request, action); } catch (Throwable e) { LOG.error("Failure of S3 Select request against {}", source); LOG.debug("S3 Select request against {}:\n{}", source, SelectBinding.toString(request), e); throw e; } } })); }
3.68
pulsar_Schema_generic
/** * Returns a generic schema of existing schema info. * * <p>Only supports AVRO and JSON. * * @param schemaInfo schema info * @return a generic schema instance */ static GenericSchema<GenericRecord> generic(SchemaInfo schemaInfo) { return DefaultImplementation.getDefaultImplementation().getGenericSchema(schemaInfo); }
3.68
hbase_RegionLocations_mergeLocations
/** * Merges this RegionLocations list with the given list assuming same range, and keeping the most * up to date version of the HRegionLocation entries from either list according to seqNum. If * seqNums are equal, the location from the argument (other) is taken. * @param other the locations to merge with * @return an RegionLocations object with merged locations or the same object if nothing is merged */ @SuppressWarnings("ReferenceEquality") public RegionLocations mergeLocations(RegionLocations other) { assert other != null; HRegionLocation[] newLocations = null; // Use the length from other, since it is coming from meta. Otherwise, // in case of region replication going down, we might have a leak here. int max = other.locations.length; RegionInfo regionInfo = null; for (int i = 0; i < max; i++) { HRegionLocation thisLoc = this.getRegionLocation(i); HRegionLocation otherLoc = other.getRegionLocation(i); if (regionInfo == null && otherLoc != null && otherLoc.getRegion() != null) { // regionInfo is the first non-null HRI from other RegionLocations. We use it to ensure that // all replica region infos belong to the same region with same region id. regionInfo = otherLoc.getRegion(); } HRegionLocation selectedLoc = selectRegionLocation(thisLoc, otherLoc, true, false); if (selectedLoc != thisLoc) { if (newLocations == null) { newLocations = new HRegionLocation[max]; System.arraycopy(locations, 0, newLocations, 0, i); } } if (newLocations != null) { newLocations[i] = selectedLoc; } } // ensure that all replicas share the same start code. Otherwise delete them if (newLocations != null && regionInfo != null) { for (int i = 0; i < newLocations.length; i++) { if (newLocations[i] != null) { if (!RegionReplicaUtil.isReplicasForSameRegion(regionInfo, newLocations[i].getRegion())) { newLocations[i] = null; } } } } return newLocations == null ? this : new RegionLocations(newLocations); }
3.68
hibernate-validator_AnnotationTypeMemberCheck_checkMessageAttribute
/** * Checks that the given type element * <p/> * <ul> * <li>has a method with name "message",</li> * <li>the return type of this method is {@link String}.</li> * </ul> * * @param element The element of interest. * * @return A possibly non-empty set of constraint check errors, never null. */ private Set<ConstraintCheckIssue> checkMessageAttribute(TypeElement element) { ExecutableElement messageMethod = getMethod( element, "message" ); if ( messageMethod == null ) { return CollectionHelper.asSet( ConstraintCheckIssue.error( element, null, "CONSTRAINT_TYPE_MUST_DECLARE_MESSAGE_MEMBER" ) ); } if ( !typeUtils.isSameType( annotationApiHelper.getMirrorForType( String.class ), messageMethod.getReturnType() ) ) { return CollectionHelper.asSet( ConstraintCheckIssue.error( messageMethod, null, "RETURN_TYPE_MUST_BE_STRING" ) ); } return Collections.emptySet(); }
3.68
dubbo_MetadataResolver_resolveConsumerServiceMetadata
/** * for consumer * * @param targetClass target service class * @param url consumer url * @return rest metadata * @throws CodeStyleNotSupportException not support type */ public static ServiceRestMetadata resolveConsumerServiceMetadata( Class<?> targetClass, URL url, String contextPathFromUrl) { ExtensionLoader<ServiceRestMetadataResolver> extensionLoader = url.getOrDefaultApplicationModel().getExtensionLoader(ServiceRestMetadataResolver.class); for (ServiceRestMetadataResolver serviceRestMetadataResolver : extensionLoader.getSupportedExtensionInstances()) { if (serviceRestMetadataResolver.supports(targetClass, true)) { ServiceRestMetadata serviceRestMetadata = new ServiceRestMetadata(url.getServiceInterface(), url.getVersion(), url.getGroup(), true); serviceRestMetadata.setContextPathFromUrl(contextPathFromUrl); ServiceRestMetadata resolve = serviceRestMetadataResolver.resolve(targetClass, serviceRestMetadata); return resolve; } } // TODO support Dubbo style service throw new CodeStyleNotSupportException("service is: " + targetClass + ", only support " + extensionLoader.getSupportedExtensions() + " annotation"); }
3.68
framework_UIDL_getIntAttribute
/** * Gets the named attribute as an int. * * @param name * the name of the attribute to get * @return the attribute value */ public int getIntAttribute(String name) { return attr().getInt(name); }
3.68
hudi_S3EventsMetaSelector_createSourceSelector
/** * Factory method for creating custom CloudObjectsMetaSelector. Default selector to use is {@link * S3EventsMetaSelector} */ public static S3EventsMetaSelector createSourceSelector(TypedProperties props) { String sourceSelectorClass = getStringWithAltKeys( props, DFSPathSelectorConfig.SOURCE_INPUT_SELECTOR, S3EventsMetaSelector.class.getName()); try { S3EventsMetaSelector selector = (S3EventsMetaSelector) ReflectionUtils.loadClass( sourceSelectorClass, new Class<?>[] {TypedProperties.class}, props); log.info("Using path selector " + selector.getClass().getName()); return selector; } catch (Exception e) { throw new HoodieException("Could not load source selector class " + sourceSelectorClass, e); } }
3.68
framework_UIDL_getStringArrayAttribute
/** * Gets the named attribute as an array of Strings. * * @param name * the name of the attribute to get * @return the attribute value */ public String[] getStringArrayAttribute(String name) { return attr().getStringArray(name); }
3.68
flink_CheckpointProperties_discardOnJobFailed
/** * Returns whether the checkpoint should be discarded when the owning job reaches the {@link * JobStatus#FAILED} state. * * @return <code>true</code> if the checkpoint should be discarded when the owning job reaches * the {@link JobStatus#FAILED} state; <code>false</code> otherwise. * @see CompletedCheckpointStore */ boolean discardOnJobFailed() { return discardFailed; }
3.68
flink_IOUtils_skipFully
/** * Similar to readFully(). Skips bytes in a loop. * * @param in The InputStream to skip bytes from * @param len number of bytes to skip * @throws IOException if it could not skip requested number of bytes for any reason (including * EOF) */ public static void skipFully(final InputStream in, long len) throws IOException { while (len > 0) { final long ret = in.skip(len); if (ret < 0) { throw new IOException("Premeture EOF from inputStream"); } len -= ret; } }
3.68
graphhopper_VectorTile_addValues
/** * <pre> * Dictionary encoding for values * </pre> * * <code>repeated .vector_tile.Tile.Value values = 4;</code> */ public Builder addValues( int index, vector_tile.VectorTile.Tile.Value.Builder builderForValue) { if (valuesBuilder_ == null) { ensureValuesIsMutable(); values_.add(index, builderForValue.build()); onChanged(); } else { valuesBuilder_.addMessage(index, builderForValue.build()); } return this; }
3.68
querydsl_Expressions_list
/** * Combine the given expressions into a list expression * * @param exprs list elements * @return list expression */ public static Expression<Tuple> list(Expression<?>... exprs) { return list(Tuple.class, exprs); }
3.68
hbase_PrivateCellUtil_writeRow
/** * Writes the row from the given cell to the output stream * @param out The outputstream to which the data has to be written * @param cell The cell whose contents has to be written * @param rlength the row length */ public static void writeRow(OutputStream out, Cell cell, short rlength) throws IOException { if (cell instanceof ByteBufferExtendedCell) { ByteBufferUtils.copyBufferToStream(out, ((ByteBufferExtendedCell) cell).getRowByteBuffer(), ((ByteBufferExtendedCell) cell).getRowPosition(), rlength); } else { out.write(cell.getRowArray(), cell.getRowOffset(), rlength); } }
3.68
hadoop_HdfsFileStatus_isEmptyLocalName
/** * Check if the local name is empty. * @return true if the name is empty */ default boolean isEmptyLocalName() { return getLocalNameInBytes().length == 0; }
3.68
hudi_HoodieAvroUtils_generateProjectionSchema
/** * Generate a reader schema off the provided writeSchema, to just project out the provided columns. */ public static Schema generateProjectionSchema(Schema originalSchema, List<String> fieldNames) { Map<String, Field> schemaFieldsMap = originalSchema.getFields().stream() .map(r -> Pair.of(r.name().toLowerCase(), r)).collect(Collectors.toMap(Pair::getLeft, Pair::getRight)); List<Schema.Field> projectedFields = new ArrayList<>(); for (String fn : fieldNames) { Schema.Field field = schemaFieldsMap.get(fn.toLowerCase()); if (field == null) { throw new HoodieException("Field " + fn + " not found in log schema. Query cannot proceed! " + "Derived Schema Fields: " + new ArrayList<>(schemaFieldsMap.keySet())); } else { projectedFields.add(new Schema.Field(field.name(), field.schema(), field.doc(), field.defaultVal())); } } Schema projectedSchema = Schema.createRecord(originalSchema.getName(), originalSchema.getDoc(), originalSchema.getNamespace(), originalSchema.isError()); projectedSchema.setFields(projectedFields); return projectedSchema; }
3.68
hbase_Query_setReplicaId
/** * Specify region replica id where Query will fetch data from. Use this together with * {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from a * specific replicaId. <br> * <b> Expert: </b>This is an advanced API exposed. Only use it if you know what you are doing */ public Query setReplicaId(int Id) { this.targetReplicaId = Id; return this; }
3.68
hbase_StorageClusterStatusModel_setMemStoreSizeMB
/** * @param memstoreSizeMB memstore size, in MB */ public void setMemStoreSizeMB(int memstoreSizeMB) { this.memstoreSizeMB = memstoreSizeMB; }
3.68
morf_WindowFunction_getPartitionBys
/** * @return the fields to partition by. */ public ImmutableList<AliasedField> getPartitionBys() { return partitionBys; }
3.68
flink_JaasModule_generateDefaultConfigFile
/** Generate the default JAAS config file. */ private static File generateDefaultConfigFile(String workingDir) { checkArgument(workingDir != null, "working directory should not be null."); final File jaasConfFile; try { Path path = Paths.get(workingDir); if (Files.notExists(path)) { // We intentionally favored Path.toRealPath over Files.readSymbolicLinks as the // latter one might return a // relative path if the symbolic link refers to it. Path.toRealPath resolves the // relative path instead. Path parent = path.getParent().toRealPath(); Path resolvedPath = Paths.get(parent.toString(), path.getFileName().toString()); path = Files.createDirectories(resolvedPath); } Path jaasConfPath = Files.createTempFile(path, "jaas-", ".conf"); try (InputStream resourceStream = JaasModule.class .getClassLoader() .getResourceAsStream(JAAS_CONF_RESOURCE_NAME)) { Files.copy(resourceStream, jaasConfPath, StandardCopyOption.REPLACE_EXISTING); } jaasConfFile = new File(workingDir, jaasConfPath.getFileName().toString()); jaasConfFile.deleteOnExit(); } catch (IOException e) { throw new RuntimeException("unable to generate a JAAS configuration file", e); } return jaasConfFile; }
3.68
hbase_SimpleRpcServer_getNumOpenConnections
/** * The number of open RPC conections * @return the number of open rpc connections */ @Override public int getNumOpenConnections() { return connectionManager.size(); }
3.68
pulsar_ReaderInterceptor_onPartitionsChange
/** * This method is called when partitions of the topic (partitioned-topic) changes. * * @param topicName topic name * @param partitions new updated number of partitions */ default void onPartitionsChange(String topicName, int partitions) { }
3.68
hbase_CompactingMemStore_snapshot
/** * Push the current active memstore segment into the pipeline and create a snapshot of the tail of * current compaction pipeline Snapshot must be cleared by call to {@link #clearSnapshot}. * {@link #clearSnapshot(long)}. * @return {@link MemStoreSnapshot} */ @Override public MemStoreSnapshot snapshot() { // If snapshot currently has entries, then flusher failed or didn't call // cleanup. Log a warning. if (!this.snapshot.isEmpty()) { LOG.warn("Snapshot called again without clearing previous. " + "Doing nothing. Another ongoing flush or did we fail last attempt?"); } else { LOG.debug("FLUSHING TO DISK {}, store={}", getRegionServices().getRegionInfo().getEncodedName(), getFamilyName()); stopCompaction(); // region level lock ensures pushing active to pipeline is done in isolation // no concurrent update operations trying to flush the active segment pushActiveToPipeline(getActive(), true); resetTimeOfOldestEdit(); snapshotId = EnvironmentEdgeManager.currentTime(); // in both cases whatever is pushed to snapshot is cleared from the pipeline if (compositeSnapshot) { pushPipelineToSnapshot(); } else { pushTailToSnapshot(); } compactor.resetStats(); } return new MemStoreSnapshot(snapshotId, this.snapshot); }
3.68
flink_QueryableStateUtils_createKvStateServer
/** * Initializes the {@link KvStateServer server} responsible for sending the requested internal * state to the {@link KvStateClientProxy client proxy}. * * @param address the address to bind to. * @param ports the range of ports the state server will attempt to listen to (see {@link * org.apache.flink.configuration.QueryableStateOptions#SERVER_PORT_RANGE * QueryableStateOptions.SERVER_PORT_RANGE}). * @param eventLoopThreads the number of threads to be used to process incoming requests. * @param queryThreads the number of threads to be used to send the actual state. * @param kvStateRegistry the registry with the queryable state. * @param stats statistics to be gathered about the incoming requests. * @return the {@link KvStateServer state server}. */ public static KvStateServer createKvStateServer( final String address, final Iterator<Integer> ports, final int eventLoopThreads, final int queryThreads, final KvStateRegistry kvStateRegistry, final KvStateRequestStats stats) { Preconditions.checkNotNull(address, "address"); Preconditions.checkNotNull(kvStateRegistry, "registry"); Preconditions.checkNotNull(stats, "stats"); Preconditions.checkArgument(eventLoopThreads >= 1); Preconditions.checkArgument(queryThreads >= 1); try { String classname = "org.apache.flink.queryablestate.server.KvStateServerImpl"; Class<? extends KvStateServer> clazz = Class.forName(classname).asSubclass(KvStateServer.class); Constructor<? extends KvStateServer> constructor = clazz.getConstructor( String.class, Iterator.class, Integer.class, Integer.class, KvStateRegistry.class, KvStateRequestStats.class); return constructor.newInstance( address, ports, eventLoopThreads, queryThreads, kvStateRegistry, stats); } catch (ClassNotFoundException e) { final String msg = "Could not load Queryable State Server. " + ERROR_MESSAGE_ON_LOAD_FAILURE; if (LOG.isDebugEnabled()) { LOG.debug(msg + " Cause: " + e.getMessage()); } else { LOG.info(msg); } return null; } catch (InvocationTargetException e) { LOG.error("Queryable State Server could not be created: ", e.getTargetException()); return null; } catch (Throwable t) { LOG.error("Failed to instantiate the Queryable State Server.", t); return null; } }
3.68
hbase_ReplicationSourceManager_releaseBufferQuota
/** * To release the buffer quota which acquired by * {@link ReplicationSourceManager#acquireBufferQuota}. */ void releaseBufferQuota(long size) { if (size < 0) { throw new IllegalArgumentException("size should not less than 0"); } addTotalBufferUsed(-size); }
3.68
hadoop_RouterObserverReadProxyProvider_isRead
/** * Check if a method is read-only. * * @return whether the 'method' is a read-only operation. */ private static boolean isRead(Method method) { if (!method.isAnnotationPresent(ReadOnly.class)) { return false; } return !method.getAnnotationsByType(ReadOnly.class)[0].activeOnly(); }
3.68
hadoop_EntityTypeReader_getNextRowKey
/** * Gets the possibly next row key prefix given current prefix and type. * * @param currRowKeyPrefix The current prefix that contains user, cluster, * flow, run, and application id. * @param entityType Current entity type. * @return A new prefix for the possibly immediately next row key. */ private static byte[] getNextRowKey(byte[] currRowKeyPrefix, String entityType) { if (currRowKeyPrefix == null || entityType == null) { return null; } byte[] entityTypeEncoded = Separator.QUALIFIERS.join( Separator.encode(entityType, Separator.SPACE, Separator.TAB, Separator.QUALIFIERS), Separator.EMPTY_BYTES); byte[] currRowKey = new byte[currRowKeyPrefix.length + entityTypeEncoded.length]; System.arraycopy(currRowKeyPrefix, 0, currRowKey, 0, currRowKeyPrefix.length); System.arraycopy(entityTypeEncoded, 0, currRowKey, currRowKeyPrefix.length, entityTypeEncoded.length); return HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix( currRowKey); }
3.68
framework_AbstractOrderedLayoutConnector_needsExpand
/** * Does the layout need to expand? */ private boolean needsExpand() { return needsExpand; }
3.68