name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_AMRMClientAsyncImpl_unregisterApplicationMaster
/** * Unregister the application master. This must be called in the end. * @param appStatus Success/Failure status of the master * @param appMessage Diagnostics message on failure * @param appTrackingUrl New URL to get master info * @throws YarnException * @throws IOException */ public void unregisterApplicationMaster(FinalApplicationStatus appStatus, String appMessage, String appTrackingUrl) throws YarnException, IOException { synchronized (unregisterHeartbeatLock) { keepRunning = false; client.unregisterApplicationMaster(appStatus, appMessage, appTrackingUrl); } }
3.68
hudi_ImmutableTriple_getLeft
/** * {@inheritDoc} */ @Override public L getLeft() { return left; }
3.68
querydsl_SimpleExpression_in
/** * Create a {@code this in right} expression * * @param right rhs of the comparison * @return this in right */ public BooleanExpression in(Expression<? extends T>... right) { return Expressions.booleanOperation(Ops.IN, mixin, Expressions.set(right)); }
3.68
morf_FieldReference_getName
/** * Gets the name of the field. * * @return the name */ public String getName() { return name; }
3.68
flink_SerializedJobExecutionResult_getNetRuntime
/** * Gets the net execution time of the job, i.e., the execution time in the parallel system, * without the pre-flight steps like the optimizer in a desired time unit. * * @param desiredUnit the unit of the <tt>NetRuntime</tt> * @return The net execution time in the desired unit. */ public long getNetRuntime(TimeUnit desiredUnit) { return desiredUnit.convert(getNetRuntime(), TimeUnit.MILLISECONDS); }
3.68
framework_Table_getConverter
/** * Returns the converter used to format the given propertyId. * * @param propertyId * The propertyId to check * @return The converter used to format the propertyId or null if no * converter has been set */ public Converter<String, Object> getConverter(Object propertyId) { return propertyValueConverters.get(propertyId); }
3.68
flink_ExecutionConfig_setMaxParallelism
/** * Sets the maximum degree of parallelism defined for the program. * * <p>The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also * defines the number of key groups used for partitioned state. * * @param maxParallelism Maximum degree of parallelism to be used for the program. */ @PublicEvolving public void setMaxParallelism(int maxParallelism) { checkArgument(maxParallelism > 0, "The maximum parallelism must be greater than 0."); configuration.set(PipelineOptions.MAX_PARALLELISM, maxParallelism); }
3.68
framework_DragSourceExtension_addDragEndListener
/** * Attaches dragend listener for the current drag source. * {@link DragEndListener#dragEnd(DragEndEvent)} is called when dragend * event happens on the client side. * * @param listener * Listener to handle dragend event. * @return Handle to be used to remove this listener. */ public Registration addDragEndListener(DragEndListener<T> listener) { return addListener(DragSourceState.EVENT_DRAGEND, DragEndEvent.class, listener, DragEndListener.DRAGEND_METHOD); }
3.68
flink_SplitFetcherManager_maybeShutdownFinishedFetchers
/** * Check and shutdown the fetchers that have completed their work. * * @return true if all the fetchers have completed the work, false otherwise. */ public boolean maybeShutdownFinishedFetchers() { Iterator<Map.Entry<Integer, SplitFetcher<E, SplitT>>> iter = fetchers.entrySet().iterator(); while (iter.hasNext()) { Map.Entry<Integer, SplitFetcher<E, SplitT>> entry = iter.next(); SplitFetcher<E, SplitT> fetcher = entry.getValue(); if (fetcher.isIdle()) { LOG.info("Closing splitFetcher {} because it is idle.", entry.getKey()); fetcher.shutdown(); iter.remove(); } } return fetchers.isEmpty(); }
3.68
flink_PekkoRpcActor_handleRunAsync
/** * Handle asynchronous {@link Runnable}. This method simply executes the given {@link Runnable} * in the context of the actor thread. * * @param runAsync Run async message */ private void handleRunAsync(RunAsync runAsync) { final long timeToRun = runAsync.getTimeNanos(); final long delayNanos; if (timeToRun == 0 || (delayNanos = timeToRun - System.nanoTime()) <= 0) { // run immediately try { runWithContextClassLoader(() -> runAsync.getRunnable().run(), flinkClassLoader); } catch (Throwable t) { log.error("Caught exception while executing runnable in main thread.", t); ExceptionUtils.rethrowIfFatalErrorOrOOM(t); } } else { // schedule for later. send a new message after the delay, which will then be // immediately executed FiniteDuration delay = new FiniteDuration(delayNanos, TimeUnit.NANOSECONDS); RunAsync message = new RunAsync(runAsync.getRunnable(), timeToRun); final Object envelopedSelfMessage = envelopeSelfMessage(message); getContext() .system() .scheduler() .scheduleOnce( delay, getSelf(), envelopedSelfMessage, getContext().dispatcher(), ActorRef.noSender()); } }
3.68
flink_SinkContextUtil_forTimestamp
/** * Creates a {@link SinkFunction.Context} that throws an exception when trying to access the * current watermark or processing time. */ public static SinkFunction.Context forTimestamp(long timestamp) { return new SinkFunction.Context() { @Override public long currentProcessingTime() { throw new RuntimeException("Not implemented"); } @Override public long currentWatermark() { throw new RuntimeException("Not implemented"); } @Override public Long timestamp() { return timestamp; } }; }
3.68
framework_VaadinService_isAtmosphereAvailable
/** * Checks whether Atmosphere is available for use. * * @since 7.6 * @return true if Atmosphere is available, false otherwise */ protected boolean isAtmosphereAvailable() { if (atmosphereAvailable == null) { atmosphereAvailable = checkAtmosphereSupport(); } return atmosphereAvailable; }
3.68
framework_Escalator_scrollToSpacer
/** * Scrolls the body vertically so that the spacer at the given row index is * visible and there is at least {@literal padding} pixesl to the given * scroll destination. * * @since 7.5.0 * @param spacerIndex * the row index of the spacer to scroll to * @param destination * where the spacer should be aligned visually after scrolling * @param padding * the number of pixels to place between the scrolled-to spacer * and the viewport edge * @throws IllegalArgumentException * if {@code spacerIndex} is not an opened spacer; or if * {@code destination} is {@link ScrollDestination#MIDDLE} and * padding is nonzero; or if {@code destination == null} * @see #scrollToRow(int, ScrollDestination, int) * @see #scrollToRowAndSpacer(int, ScrollDestination, int) */ public void scrollToSpacer(final int spacerIndex, ScrollDestination destination, final int padding) throws IllegalArgumentException { validateScrollDestination(destination, padding); body.scrollToSpacer(spacerIndex, destination, padding); }
3.68
hbase_ServerRpcConnection_setupCellBlockCodecs
/** * Set up cell block codecs */ private void setupCellBlockCodecs() throws FatalConnectionException { // TODO: Plug in other supported decoders. if (!connectionHeader.hasCellBlockCodecClass()) { return; } String className = connectionHeader.getCellBlockCodecClass(); if (className == null || className.length() == 0) { return; } try { this.codec = (Codec) Class.forName(className).getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new UnsupportedCellCodecException(className, e); } if (!connectionHeader.hasCellBlockCompressorClass()) { return; } className = connectionHeader.getCellBlockCompressorClass(); try { this.compressionCodec = (CompressionCodec) Class.forName(className).getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new UnsupportedCompressionCodecException(className, e); } }
3.68
hbase_ByteBuff_readCompressedInt
/** Read integer from ByteBuff coded in 7 bits and increment position. */ public static int readCompressedInt(ByteBuff buf) { byte b = buf.get(); if ((b & ByteBufferUtils.NEXT_BIT_MASK) != 0) { return (b & ByteBufferUtils.VALUE_MASK) + (readCompressedInt(buf) << ByteBufferUtils.NEXT_BIT_SHIFT); } return b & ByteBufferUtils.VALUE_MASK; }
3.68
hbase_StoreFileTrackerValidationUtils_checkForNewFamily
// should not use MigrationStoreFileTracker for new family private static void checkForNewFamily(Configuration conf, TableDescriptor table, ColumnFamilyDescriptor family) throws IOException { Configuration mergedConf = StoreUtils.createStoreConfiguration(conf, table, family); Class<? extends StoreFileTracker> tracker = StoreFileTrackerFactory.getTrackerClass(mergedConf); if (MigrationStoreFileTracker.class.isAssignableFrom(tracker)) { throw new DoNotRetryIOException( "Should not use " + Trackers.MIGRATION + " as store file tracker for new family " + family.getNameAsString() + " of table " + table.getTableName()); } }
3.68
flink_HiveParallelismInference_infer
/** * Infer parallelism by number of files and number of splits. If {@link * HiveOptions#TABLE_EXEC_HIVE_INFER_SOURCE_PARALLELISM} is not set this method does nothing. */ HiveParallelismInference infer( SupplierWithException<Integer, IOException> numFiles, SupplierWithException<Integer, IOException> numSplits) { if (!infer) { return this; } try { // `createInputSplits` is costly, // so we try to avoid calling it by first checking the number of files // which is the lower bound of the number of splits int lowerBound = logRunningTime("getNumFiles", numFiles); if (lowerBound >= inferMaxParallelism) { parallelism = inferMaxParallelism; return this; } int splitNum = logRunningTime("createInputSplits", numSplits); parallelism = Math.min(splitNum, inferMaxParallelism); } catch (IOException e) { throw new FlinkHiveException(e); } return this; }
3.68
framework_ValoColorPickerTestUI_updateDisplay
// This is called whenever a colorpicker popup is closed /** * Update display. * * @param fg * the fg * @param bg * the bg */ public void updateDisplay(Color fg, Color bg) { java.awt.Color awtFg = new java.awt.Color(fg.getRed(), fg.getGreen(), fg.getBlue()); java.awt.Color awtBg = new java.awt.Color(bg.getRed(), bg.getGreen(), bg.getBlue()); StreamResource.StreamSource imagesource = new MyImageSource(awtFg, awtBg); Date now = new Date(); SimpleDateFormat format = new SimpleDateFormat("hhmmss"); StreamResource imageresource = new StreamResource(imagesource, "myimage" + format.format(now) + ".png"); imageresource.setCacheTime(0); display.setSource(imageresource); }
3.68
streampipes_DataStreamApi_subscribe
/** * Subscribe to a data stream * * @param stream The data stream to subscribe to * @param kafkaConfig Additional kafka settings which will override the default value (see docs) * @param callback The callback where events will be received */ @Override public ISubscription subscribe(SpDataStream stream, IBrokerConfigOverride kafkaConfig, EventProcessor callback) { return new SubscriptionManager(kafkaConfig, stream.getEventGrounding(), callback).subscribe(); }
3.68
dubbo_DynamicConfiguration_getDefaultGroup
/** * Get the default group for the operations * * @return The default value is {@link #DEFAULT_GROUP "dubbo"} * @since 2.7.5 */ default String getDefaultGroup() { return DEFAULT_GROUP; }
3.68
hbase_TerminatedWrapper_skip
/** * Skip {@code src}'s position forward over one encoded value. * @param src the buffer containing the encoded value. * @return number of bytes skipped. * @throws IllegalArgumentException when the terminator sequence is not found. */ @Override public int skip(PositionedByteRange src) { if (wrapped.isSkippable()) { int ret = wrapped.skip(src); src.setPosition(src.getPosition() + term.length); return ret + term.length; } else { // find the terminator position final int start = src.getPosition(); int skipped = terminatorPosition(src); if (-1 == skipped) { throw new IllegalArgumentException("Terminator sequence not found."); } skipped += term.length; src.setPosition(skipped); return skipped - start; } }
3.68
framework_AbstractComponent_getDefaultAttributes
/** * Returns a collection of attributes that do not require custom handling * when reading or writing design. These are typically attributes of some * primitive type. The default implementation searches setters with * primitive values * * @return a collection of attributes that can be read and written using the * default approach. */ private Collection<String> getDefaultAttributes() { Collection<String> attributes = DesignAttributeHandler .getSupportedAttributes(this.getClass()); attributes.removeAll(getCustomAttributes()); return attributes; }
3.68
flink_FileCatalogStore_removeCatalog
/** * Removes the specified catalog from the catalog store. * * @param catalogName the name of the catalog to remove * @param ignoreIfNotExists whether to ignore if the catalog does not exist in the catalog store * @throws CatalogException if the catalog store is not open or if there is an error removing * the catalog */ @Override public void removeCatalog(String catalogName, boolean ignoreIfNotExists) throws CatalogException { checkOpenState(); Path catalogPath = getCatalogPath(catalogName); try { FileSystem fs = catalogPath.getFileSystem(); if (fs.exists(catalogPath)) { fs.delete(catalogPath, false); } else if (!ignoreIfNotExists) { throw new CatalogException( String.format( "Catalog %s's store file %s does not exist.", catalogName, catalogPath)); } } catch (CatalogException e) { throw e; } catch (Exception e) { throw new CatalogException( String.format("Failed to remove catalog %s's store file.", catalogName), e); } }
3.68
hadoop_BalanceProcedureScheduler_findJob
/** * Find job in scheduler. * * @return the job in scheduler. Null if the schedule has no job with the * same id. */ public BalanceJob findJob(BalanceJob job) { BalanceJob found = null; for (BalanceJob j : jobSet.keySet()) { if (j.getId().equals(job.getId())) { found = j; break; } } return found; }
3.68
pulsar_IOUtils_confirmPrompt
/** * Confirm prompt for the console operations. * * @param prompt * Prompt message to be displayed on console * @return Returns true if confirmed as 'Y', returns false if confirmed as 'N' * @throws IOException */ public static boolean confirmPrompt(String prompt) throws IOException { while (true) { System.out.print(prompt + " (Y or N) "); StringBuilder responseBuilder = new StringBuilder(); while (true) { int c = System.in.read(); if (c == -1 || c == '\r' || c == '\n') { break; } responseBuilder.append((char) c); } String response = responseBuilder.toString(); if (response.equalsIgnoreCase("y") || response.equalsIgnoreCase("yes")) { return true; } else if (response.equalsIgnoreCase("n") || response.equalsIgnoreCase("no")) { return false; } System.out.println("Invalid input: " + response); // else ask them again } }
3.68
hadoop_HsController_task
/* * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#task() */ @Override public void task() { super.task(); }
3.68
flink_IOUtils_closeAll
/** * Closes all {@link AutoCloseable} objects in the parameter, suppressing exceptions. Exception * will be emitted after calling close() on every object. * * @param closeables iterable with closeables to close. * @param suppressedException class of exceptions which should be suppressed during the closing. * @throws Exception collected exceptions that occurred during closing */ public static <T extends Throwable> void closeAll( Iterable<? extends AutoCloseable> closeables, Class<T> suppressedException) throws Exception { if (null != closeables) { Exception collectedExceptions = null; for (AutoCloseable closeable : closeables) { try { if (null != closeable) { closeable.close(); } } catch (Throwable e) { if (!suppressedException.isAssignableFrom(e.getClass())) { throw e; } Exception ex = e instanceof Exception ? (Exception) e : new Exception(e); collectedExceptions = ExceptionUtils.firstOrSuppressed(ex, collectedExceptions); } } if (null != collectedExceptions) { throw collectedExceptions; } } }
3.68
flink_MemorySegment_getDouble
/** * Reads a double-precision floating point value (64bit, 8 bytes) from the given position, in * the system's native byte order. This method offers the best speed for double reading and * should be used unless a specific byte order is required. In most cases, it suffices to know * that the byte order in which the value is written is the same as the one in which it is read * (such as transient storage in memory, or serialization for I/O and network), making this * method the preferable choice. * * @param index The position from which the value will be read. * @return The double value at the given position. * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the * segment size minus 8. */ public double getDouble(int index) { return Double.longBitsToDouble(getLong(index)); }
3.68
hbase_ScannerModel_getCaching
/** Returns the number of rows that the scanner to fetch at once */ @XmlAttribute public int getCaching() { return caching; }
3.68
querydsl_CurveExpression_length
/** * The length of this Curve in its associated spatial reference. * * @return length */ public NumberExpression<Double> length() { if (length == null) { length = Expressions.numberOperation(Double.class, SpatialOps.LENGTH, mixin); } return length; }
3.68
hadoop_OBSFileSystem_isObsClientDFSListEnable
/** * Return a flag that indicates if OBS client specific depth first search * (DFS) list is enabled. * * @return the flag */ boolean isObsClientDFSListEnable() { return obsClientDFSListEnable; }
3.68
pulsar_ProducerConfiguration_getMessageRouter
/** * Get the message router set by {@link #setMessageRouter(MessageRouter)}. * * @return message router set by {@link #setMessageRouter(MessageRouter)}. */ public MessageRouter getMessageRouter() { return conf.getCustomMessageRouter(); }
3.68
framework_VScrollTable_disableBrowserIntelligence
/** * Disable browser measurement of the table width. */ public void disableBrowserIntelligence() { hTableContainer.getStyle().setWidth(WRAPPER_WIDTH, Unit.PX); }
3.68
hudi_FlinkTables_createTable
/** * Creates the hoodie flink table. * * <p>This expects to be used by driver. */ public static HoodieFlinkTable<?> createTable(Configuration conf) { HoodieWriteConfig writeConfig = FlinkWriteClients.getHoodieClientConfig(conf, true, false); return HoodieFlinkTable.create(writeConfig, HoodieFlinkEngineContext.DEFAULT); }
3.68
hudi_HoodieHeartbeatUtils_isHeartbeatExpired
/** * Whether a heartbeat is expired. * * @param instantTime Instant time. * @param maxAllowableHeartbeatIntervalInMs Heartbeat timeout in milliseconds. * @param fs {@link FileSystem} instance. * @param basePath Base path of the table. * @return {@code true} if expired; {@code false} otherwise. * @throws IOException upon errors. */ public static boolean isHeartbeatExpired(String instantTime, long maxAllowableHeartbeatIntervalInMs, FileSystem fs, String basePath) throws IOException { Long currentTime = System.currentTimeMillis(); Long lastHeartbeatTime = getLastHeartbeatTime(fs, basePath, instantTime); if (currentTime - lastHeartbeatTime > maxAllowableHeartbeatIntervalInMs) { LOG.warn("Heartbeat expired, for instant: " + instantTime); return true; } return false; }
3.68
AreaShop_RentRegion_setRenter
/** * Set the renter of this region. * @param renter The UUID of the player that should be set as the renter */ public void setRenter(UUID renter) { if(renter == null) { setSetting("rent.renter", null); setSetting("rent.renterName", null); } else { setSetting("rent.renter", renter.toString()); setSetting("rent.renterName", Utils.toName(renter)); } }
3.68
hadoop_OBSBlockOutputStream_write
/** * Writes a range of bytes from to the memory buffer. If this causes the * buffer to reach its limit, the actual upload is submitted to the threadpool * and the remainder of the array is written to memory (recursively). * * @param source byte array containing * @param offset offset in array where to start * @param len number of bytes to be written * @throws IOException on any problem */ @Override public synchronized void write(@NotNull final byte[] source, final int offset, final int len) throws IOException { if (hasException.get()) { String closeWarning = String.format( "write has error. bs : pre upload obs[%s] has error.", key); LOG.warn(closeWarning); throw new IOException(closeWarning); } OBSDataBlocks.validateWriteArgs(source, offset, len); checkOpen(); if (len == 0) { return; } OBSDataBlocks.DataBlock block = createBlockIfNeeded(); int written = block.write(source, offset, len); int remainingCapacity = block.remainingCapacity(); try { innerWrite(source, offset, len, written, remainingCapacity); } catch (IOException e) { LOG.error( "Write data for key {} of bucket {} error, error message {}", key, fs.getBucket(), e.getMessage()); throw e; } }
3.68
hbase_AuthResult_concatenateExtraParams
/** Returns extra parameter key/value string */ private String concatenateExtraParams() { final StringBuilder sb = new StringBuilder(); boolean first = true; for (Entry<String, String> entry : extraParams.entrySet()) { if (entry.getKey() != null && entry.getValue() != null) { if (!first) { sb.append(','); } first = false; sb.append(entry.getKey() + '='); sb.append(entry.getValue()); } } return sb.toString(); }
3.68
flink_SlotProfile_getPhysicalSlotResourceProfile
/** Returns the desired resource profile for the physical slot to host this task slot. */ public ResourceProfile getPhysicalSlotResourceProfile() { return physicalSlotResourceProfile; }
3.68
hadoop_BalanceProcedureScheduler_recoverAllJobs
/** * Search all jobs and add them to recoverQueue. It's called once after the * scheduler starts. */ private void recoverAllJobs() throws IOException { BalanceJob[] jobs = journal.listAllJobs(); for (BalanceJob job : jobs) { recoverQueue.add(job); jobSet.put(job, job); LOG.info("Recover federation balance job {}.", job); } }
3.68
flink_CopyOnWriteStateMap_releaseSnapshot
/** * Releases a snapshot for this {@link CopyOnWriteStateMap}. This method should be called once a * snapshot is no more needed, so that the {@link CopyOnWriteStateMap} can stop considering this * snapshot for copy-on-write, thus avoiding unnecessary object creation. * * @param snapshotToRelease the snapshot to release, which was previously created by this state * map. */ @Override public void releaseSnapshot( StateMapSnapshot<K, N, S, ? extends StateMap<K, N, S>> snapshotToRelease) { CopyOnWriteStateMapSnapshot<K, N, S> copyOnWriteStateMapSnapshot = (CopyOnWriteStateMapSnapshot<K, N, S>) snapshotToRelease; Preconditions.checkArgument( copyOnWriteStateMapSnapshot.isOwner(this), "Cannot release snapshot which is owned by a different state map."); releaseSnapshot(copyOnWriteStateMapSnapshot.getSnapshotVersion()); }
3.68
morf_UpdateStatement_getFields
/** * Gets the list of fields * * @return the fields */ public List<AliasedField> getFields() { return fields; }
3.68
hudi_InternalSchemaChangeApplier_applyColumnNullabilityChange
/** * Update col nullability for hudi table. * * @param colName col name to be changed. if we want to change col from a nested filed, the fullName should be specify * @param nullable . */ public InternalSchema applyColumnNullabilityChange(String colName, boolean nullable) { TableChanges.ColumnUpdateChange updateChange = TableChanges.ColumnUpdateChange.get(latestSchema); updateChange.updateColumnNullability(colName, nullable); return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, updateChange); }
3.68
hudi_ExpressionPredicates_getFunctionDefinition
/** * Returns function definition of predicate. * * @return A function definition of predicate. */ public FunctionDefinition getFunctionDefinition() { return null; }
3.68
hbase_AsyncTableRegionLocator_getRegionLocations
/** * Find all the replicas for the region on which the given row is being served. * @param row Row to find. * @return Locations for all the replicas of the row. */ default CompletableFuture<List<HRegionLocation>> getRegionLocations(byte[] row) { return getRegionLocations(row, false); }
3.68
flink_ThrowingRunnable_unchecked
/** * Converts a {@link ThrowingRunnable} into a {@link Runnable} which throws all checked * exceptions as unchecked. * * @param throwingRunnable to convert into a {@link Runnable} * @return {@link Runnable} which throws all checked exceptions as unchecked. */ static Runnable unchecked(ThrowingRunnable<?> throwingRunnable) { return () -> { try { throwingRunnable.run(); } catch (Throwable t) { ExceptionUtils.rethrow(t); } }; }
3.68
framework_Calendar_isEventCaptionAsHtml
/** * Checks whether event captions are rendered as HTML * <p> * The default is false, i.e. to render that caption as plain text. * * @return true if the captions are rendered as HTML, false if rendered as * plain text */ public boolean isEventCaptionAsHtml() { return getState(false).eventCaptionAsHtml; }
3.68
morf_Function_leftPad
/** * Convenience helper method to create an instance of the <code>LPAD</code> SQL function. * <p>Pads the <code>character</code> on the left of <code>field</code> so that the size equals <code>length</code></p> * <p>The field should be of type {@link org.alfasoftware.morf.metadata.DataType#STRING}</p> * * @param field String field to pad. * @param length target length. * @param character character to pad. * @return an instance of LPAD function. */ public static Function leftPad(AliasedField field, int length, String character) { return new Function(FunctionType.LEFT_PAD, field, literal(length), literal(character)); }
3.68
zilla_ManyToOneRingBuffer_commit
/** * {@inheritDoc} */ public void commit(final int index) { final int recordIndex = computeRecordIndex(index); final AtomicBuffer buffer = this.buffer; final int recordLength = verifyClaimedSpaceNotReleased(buffer, recordIndex); buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength); }
3.68
hadoop_RollingWindow_isStaleNow
/** * Check whether the last time that the bucket was updated is no longer * covered by rolling window. * * @param time the current time * @return true if the bucket state is stale */ boolean isStaleNow(long time) { long utime = updateTime.get(); return (utime == -1) || (time - utime >= windowLenMs); }
3.68
hadoop_OBSLoginHelper_extractLoginDetailsWithWarnings
/** * Extract the login details from a URI, logging a warning if the URI contains * these. * * @param name URI of the filesystem * @return a login tuple, possibly empty. */ public static Login extractLoginDetailsWithWarnings(final URI name) { Login login = extractLoginDetails(name); if (login.hasLogin()) { LOG.warn(LOGIN_WARNING); } return login; }
3.68
flink_ExecutionFailureHandler_getGlobalFailureHandlingResult
/** * Return result of failure handling on a global failure. Can be a set of task vertices to * restart and a delay of the restarting. Or that the failure is not recoverable and the reason * for it. * * @param cause of the task failure * @param timestamp of the task failure * @return result of the failure handling */ public FailureHandlingResult getGlobalFailureHandlingResult( final Throwable cause, long timestamp) { return handleFailure( null, cause, timestamp, IterableUtils.toStream(schedulingTopology.getVertices()) .map(SchedulingExecutionVertex::getId) .collect(Collectors.toSet()), true); }
3.68
hbase_HFileBlock_getBufferReadOnly
/** * Returns a read-only duplicate of the buffer this block stores internally ready to be read. * Clients must not modify the buffer object though they may set position and limit on the * returned buffer since we pass back a duplicate. This method has to be public because it is used * in {@link CompoundBloomFilter} to avoid object creation on every Bloom filter lookup, but has * to be used with caution. Buffer holds header, block content, and any follow-on checksums if * present. * @return the buffer of this block for read-only operations,the buffer includes header,but not * checksum. */ public ByteBuff getBufferReadOnly() { // TODO: ByteBuf does not support asReadOnlyBuffer(). Fix. ByteBuff dup = this.bufWithoutChecksum.duplicate(); assert dup.position() == 0; return dup; }
3.68
zxing_CaptureActivity_handleDecodeInternally
// Put up our own UI for how to handle the decoded contents. private void handleDecodeInternally(Result rawResult, ResultHandler resultHandler, Bitmap barcode) { maybeSetClipboard(resultHandler); SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(this); if (resultHandler.getDefaultButtonID() != null && prefs.getBoolean(PreferencesActivity.KEY_AUTO_OPEN_WEB, false)) { resultHandler.handleButtonPress(resultHandler.getDefaultButtonID()); return; } statusView.setVisibility(View.GONE); viewfinderView.setVisibility(View.GONE); resultView.setVisibility(View.VISIBLE); ImageView barcodeImageView = (ImageView) findViewById(R.id.barcode_image_view); if (barcode == null) { barcodeImageView.setImageBitmap(BitmapFactory.decodeResource(getResources(), R.drawable.launcher_icon)); } else { barcodeImageView.setImageBitmap(barcode); } TextView formatTextView = (TextView) findViewById(R.id.format_text_view); formatTextView.setText(rawResult.getBarcodeFormat().toString()); TextView typeTextView = (TextView) findViewById(R.id.type_text_view); typeTextView.setText(resultHandler.getType().toString()); DateFormat formatter = DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.SHORT); TextView timeTextView = (TextView) findViewById(R.id.time_text_view); timeTextView.setText(formatter.format(rawResult.getTimestamp())); TextView metaTextView = (TextView) findViewById(R.id.meta_text_view); View metaTextViewLabel = findViewById(R.id.meta_text_view_label); metaTextView.setVisibility(View.GONE); metaTextViewLabel.setVisibility(View.GONE); Map<ResultMetadataType,Object> metadata = rawResult.getResultMetadata(); if (metadata != null) { StringBuilder metadataText = new StringBuilder(20); for (Map.Entry<ResultMetadataType,Object> entry : metadata.entrySet()) { if (DISPLAYABLE_METADATA_TYPES.contains(entry.getKey())) { metadataText.append(entry.getValue()).append('\n'); } } if (metadataText.length() > 0) { metadataText.setLength(metadataText.length() - 1); metaTextView.setText(metadataText); metaTextView.setVisibility(View.VISIBLE); metaTextViewLabel.setVisibility(View.VISIBLE); } } CharSequence displayContents = resultHandler.getDisplayContents(); TextView contentsTextView = (TextView) findViewById(R.id.contents_text_view); contentsTextView.setText(displayContents); int scaledSize = Math.max(22, 32 - displayContents.length() / 4); contentsTextView.setTextSize(TypedValue.COMPLEX_UNIT_SP, scaledSize); TextView supplementTextView = (TextView) findViewById(R.id.contents_supplement_text_view); supplementTextView.setText(""); supplementTextView.setOnClickListener(null); if (PreferenceManager.getDefaultSharedPreferences(this).getBoolean( PreferencesActivity.KEY_SUPPLEMENTAL, true)) { SupplementalInfoRetriever.maybeInvokeRetrieval(supplementTextView, resultHandler.getResult(), historyManager, this); } int buttonCount = resultHandler.getButtonCount(); ViewGroup buttonView = (ViewGroup) findViewById(R.id.result_button_view); buttonView.requestFocus(); for (int x = 0; x < ResultHandler.MAX_BUTTON_COUNT; x++) { TextView button = (TextView) buttonView.getChildAt(x); if (x < buttonCount) { button.setVisibility(View.VISIBLE); button.setText(resultHandler.getButtonText(x)); button.setOnClickListener(new ResultButtonListener(resultHandler, x)); } else { button.setVisibility(View.GONE); } } }
3.68
morf_H2Dialect_indexDeploymentStatements
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#indexDeploymentStatements(org.alfasoftware.morf.metadata.Table, * org.alfasoftware.morf.metadata.Index) */ @Override protected Collection<String> indexDeploymentStatements(Table table, Index index) { StringBuilder statement = new StringBuilder(); statement.append("CREATE "); if (index.isUnique()) { statement.append("UNIQUE "); } statement.append("INDEX ").append(index.getName()).append(" ON ").append(schemaNamePrefix()).append(table.getName()).append(" (") .append(Joiner.on(',').join(index.columnNames())).append(")"); return Collections.singletonList(statement.toString()); }
3.68
hbase_FilterBase_filterAllRemaining
/** * Filters that never filter all remaining can inherit this implementation that never stops the * filter early. {@inheritDoc} */ @Override public boolean filterAllRemaining() throws IOException { return false; }
3.68
framework_StringToLongConverter_convertToModel
/* * (non-Javadoc) * * @see * com.vaadin.data.util.converter.Converter#convertToModel(java.lang.Object, * java.lang.Class, java.util.Locale) */ @Override public Long convertToModel(String value, Class<? extends Long> targetType, Locale locale) throws ConversionException { Number n = convertToNumber(value, targetType, locale); return n == null ? null : n.longValue(); }
3.68
framework_AbstractOrderedLayoutConnector_hasTooltip
/* * (non-Javadoc) * * @see com.vaadin.client.ui.AbstractComponentConnector#hasTooltip() */ @Override public boolean hasTooltip() { /* * Tooltips are fetched from child connectors -> there's no quick way of * checking whether there might a tooltip hiding somewhere */ return true; }
3.68
rocketmq-connect_MemoryClusterManagementServiceImpl_registerListener
/** * Register a worker status listener to listen the change of alive workers. * * @param listener */ @Override public void registerListener(WorkerStatusListener listener) { }
3.68
hibernate-validator_UUIDValidator_extractVersion
/** * Get the 4 bit UUID version from the current value * * @param version The old version (in case the version has already been extracted) * @param index The index of the current value to find the version to extract * @param value The numeric value at the character position */ private static int extractVersion(int version, int index, int value) { if ( index == 14 ) { return value; } return version; }
3.68
pulsar_LoadManagerShared_shouldAntiAffinityNamespaceUnload
/** * * It checks if given anti-affinity namespace should be unloaded by broker due to load-shedding. If all the brokers * are owning same number of anti-affinity namespaces then unloading this namespace again ends up at the same broker * from which it was unloaded. So, this util checks that given namespace should be unloaded only if it can be loaded * by different broker. * * @param namespace * @param bundle * @param currentBroker * @param pulsar * @param brokerToNamespaceToBundleRange * @param candidateBrokers * @return * @throws Exception */ public static boolean shouldAntiAffinityNamespaceUnload( String namespace, String bundle, String currentBroker, final PulsarService pulsar, final ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> brokerToNamespaceToBundleRange, Set<String> candidateBrokers) throws Exception { Map<String, Integer> brokerNamespaceCount = getAntiAffinityNamespaceOwnedBrokers(pulsar, namespace, brokerToNamespaceToBundleRange).get(10, TimeUnit.SECONDS); return shouldAntiAffinityNamespaceUnload(currentBroker, candidateBrokers, brokerNamespaceCount); }
3.68
flink_MapNode_computeOperatorSpecificDefaultEstimates
/** * Computes the estimates for the Map operator. We assume that by default, Map takes one value * and transforms it into another value. The cardinality consequently stays the same. */ @Override protected void computeOperatorSpecificDefaultEstimates(DataStatistics statistics) { this.estimatedNumRecords = getPredecessorNode().getEstimatedNumRecords(); }
3.68
flink_SqlFunctionUtils_sround
/** SQL <code>ROUND</code> operator applied to DecimalData values. */ public static DecimalData sround(DecimalData b0, int b1) { return DecimalDataUtils.sround(b0, b1); }
3.68
flink_ObjectIdentifier_toObjectPath
/** * Convert this {@link ObjectIdentifier} to {@link ObjectPath}. * * @throws TableException if the identifier cannot be converted */ public ObjectPath toObjectPath() throws TableException { if (catalogName == null) { throw new TableException( "This ObjectIdentifier instance refers to an anonymous object, " + "hence it cannot be converted to ObjectPath and cannot be serialized."); } return new ObjectPath(databaseName, objectName); }
3.68
morf_SqlDialect_addTableFromStatementsWithCasting
/** * Generates the SQL to create a table and insert the data specified in the {@link SelectStatement}. * * For supported dialects, this method casts each field in the provided select using the column definition of the provided table. * * Validation is performed to confirm that the fields included in the select statement correspond to the table columns. * * @param table The table to create. * @param selectStatement The {@link SelectStatement} * @return A collection of SQL statements */ public Collection<String> addTableFromStatementsWithCasting(Table table, SelectStatement selectStatement) { return addTableFromStatements(table, selectStatement); }
3.68
hbase_FilterBase_filterRow
/** * Filters that never filter by rows based on previously gathered state from * {@link #filterCell(Cell)} can inherit this implementation that never filters a row. * {@inheritDoc} */ @Override public boolean filterRow() throws IOException { return false; }
3.68
flink_UpsertTestSinkBuilder_setOutputFile
/** * Sets the output {@link File} to write to. * * @param outputFile * @return {@link UpsertTestSinkBuilder} */ public UpsertTestSinkBuilder<IN> setOutputFile(File outputFile) { this.outputFile = checkNotNull(outputFile); return this; }
3.68
flink_PythonConfigUtil_extractPythonConfiguration
/** Extract the configurations which is used in the Python operators. */ public static Configuration extractPythonConfiguration( List<Tuple2<String, DistributedCache.DistributedCacheEntry>> cachedFiles, ReadableConfig config) { final Configuration pythonDependencyConfig = PythonDependencyUtils.configurePythonDependencies(cachedFiles, config); final PythonConfig pythonConfig = new PythonConfig(config, pythonDependencyConfig); return pythonConfig.toConfiguration(); }
3.68
hbase_PermissionStorage_getUserTablePermissions
/** * Returns the currently granted permissions for a given table as the specified user plus * associated permissions. */ public static List<UserPermission> getUserTablePermissions(Configuration conf, TableName tableName, byte[] cf, byte[] cq, String userName, boolean hasFilterUser) throws IOException { return getUserPermissions(conf, tableName == null ? null : tableName.getName(), cf, cq, userName, hasFilterUser); }
3.68
hbase_CellUtil_isPut
/** Returns True if this cell is a Put. */ @SuppressWarnings("deprecation") public static boolean isPut(Cell cell) { return cell.getTypeByte() == KeyValue.Type.Put.getCode(); }
3.68
flink_Tuple6_toString
/** * Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5), where the * individual fields are the value returned by calling {@link Object#toString} on that field. * * @return The string representation of the tuple. */ @Override public String toString() { return "(" + StringUtils.arrayAwareToString(this.f0) + "," + StringUtils.arrayAwareToString(this.f1) + "," + StringUtils.arrayAwareToString(this.f2) + "," + StringUtils.arrayAwareToString(this.f3) + "," + StringUtils.arrayAwareToString(this.f4) + "," + StringUtils.arrayAwareToString(this.f5) + ")"; }
3.68
hadoop_WriteOperationHelper_finalizeMultipartUpload
/** * Finalize a multipart PUT operation. * This completes the upload, and, if that works, calls * {@link S3AFileSystem#finishedWrite(String, long, String, String, org.apache.hadoop.fs.s3a.impl.PutObjectOptions)} * to update the filesystem. * Retry policy: retrying, translated. * @param destKey destination of the commit * @param uploadId multipart operation Id * @param partETags list of partial uploads * @param length length of the upload * @param putOptions put object options * @param retrying retrying callback * @return the result of the operation. * @throws IOException on problems. */ @Retries.RetryTranslated private CompleteMultipartUploadResponse finalizeMultipartUpload( String destKey, String uploadId, List<CompletedPart> partETags, long length, PutObjectOptions putOptions, Retried retrying) throws IOException { if (partETags.isEmpty()) { throw new PathIOException(destKey, "No upload parts in multipart upload"); } try (AuditSpan span = activateAuditSpan()) { CompleteMultipartUploadResponse uploadResult; uploadResult = invoker.retry("Completing multipart upload", destKey, true, retrying, () -> { final CompleteMultipartUploadRequest.Builder requestBuilder = getRequestFactory().newCompleteMultipartUploadRequestBuilder( destKey, uploadId, partETags); return writeOperationHelperCallbacks.completeMultipartUpload(requestBuilder.build()); }); owner.finishedWrite(destKey, length, uploadResult.eTag(), uploadResult.versionId(), putOptions); return uploadResult; } }
3.68
pulsar_BrokerInterceptor_txnOpened
/** * Intercept when a transaction begins. * * @param tcId Transaction Coordinator Id * @param txnID Transaction ID */ default void txnOpened(long tcId, String txnID) { }
3.68
framework_Window_getTabStopBottomAssistiveText
/** * Gets the message that is provided to users of assistive devices when the * user reaches the bottom of the window when leaving a window with the tab * key is prevented. * * @return the bottom message */ public String getTabStopBottomAssistiveText() { return getState(false).assistiveTabStopBottomText; }
3.68
hbase_KeyValue_toStringMap
/** * Produces a string map for this key/value pair. Useful for programmatic use and manipulation of * the data stored in an WALKey, for example, printing as JSON. Values are left out due to their * tendency to be large. If needed, they can be added manually. * @return the Map&lt;String,?&gt; containing data from this key */ public Map<String, Object> toStringMap() { Map<String, Object> stringMap = new HashMap<>(); stringMap.put("row", Bytes.toStringBinary(getRowArray(), getRowOffset(), getRowLength())); stringMap.put("family", Bytes.toStringBinary(getFamilyArray(), getFamilyOffset(), getFamilyLength())); stringMap.put("qualifier", Bytes.toStringBinary(getQualifierArray(), getQualifierOffset(), getQualifierLength())); stringMap.put("timestamp", getTimestamp()); stringMap.put("vlen", getValueLength()); Iterator<Tag> tags = getTags(); if (tags != null) { List<String> tagsString = new ArrayList<String>(); while (tags.hasNext()) { tagsString.add(tags.next().toString()); } stringMap.put("tag", tagsString); } return stringMap; }
3.68
hadoop_NoopAuditManagerS3A_checkAccess
/** * Forward to the auditor. * @param path path to check * @param status status of the path. * @param mode access mode. * @throws IOException failure */ @Override public boolean checkAccess(final Path path, final S3AFileStatus status, final FsAction mode) throws IOException { return auditor.checkAccess(path, status, mode); }
3.68
framework_Escalator_recalculateScrollbarsForVirtualViewport
/** * Recalculates the virtual viewport represented by the scrollbars, so * that the sizes of the scroll handles appear correct in the browser */ public void recalculateScrollbarsForVirtualViewport() { double scrollContentHeight = body.calculateTotalRowHeight() + body.spacerContainer.getSpacerHeightsSum(); double scrollContentWidth = columnConfiguration.calculateRowWidth(); double tableWrapperHeight = heightOfEscalator; double tableWrapperWidth = widthOfEscalator; boolean verticalScrollNeeded = scrollContentHeight > tableWrapperHeight + WidgetUtil.PIXEL_EPSILON - header.getHeightOfSection() - footer.getHeightOfSection(); boolean horizontalScrollNeeded = scrollContentWidth > tableWrapperWidth + WidgetUtil.PIXEL_EPSILON; // One dimension got scrollbars, but not the other. Recheck time! if (verticalScrollNeeded != horizontalScrollNeeded) { if (!verticalScrollNeeded && horizontalScrollNeeded) { verticalScrollNeeded = scrollContentHeight > tableWrapperHeight + WidgetUtil.PIXEL_EPSILON - header.getHeightOfSection() - footer.getHeightOfSection() - horizontalScrollbar.getScrollbarThickness(); } else { horizontalScrollNeeded = scrollContentWidth > tableWrapperWidth + WidgetUtil.PIXEL_EPSILON - verticalScrollbar.getScrollbarThickness(); } } // let's fix the table wrapper size, since it's now stable. if (verticalScrollNeeded) { tableWrapperWidth -= verticalScrollbar.getScrollbarThickness(); tableWrapperWidth = Math.max(0, tableWrapperWidth); } if (horizontalScrollNeeded) { tableWrapperHeight -= horizontalScrollbar .getScrollbarThickness(); tableWrapperHeight = Math.max(0, tableWrapperHeight); } tableWrapper.getStyle().setHeight(tableWrapperHeight, Unit.PX); tableWrapper.getStyle().setWidth(tableWrapperWidth, Unit.PX); double footerHeight = footer.getHeightOfSection(); double headerHeight = header.getHeightOfSection(); double vScrollbarHeight = Math.max(0, tableWrapperHeight - footerHeight - headerHeight); verticalScrollbar.setOffsetSize(vScrollbarHeight); verticalScrollbar.setScrollSize(scrollContentHeight); /* * If decreasing the amount of frozen columns, and scrolled to the * right, the scroll position might reset. So we need to remember * the scroll position, and re-apply it once the scrollbar size has * been adjusted. */ double prevScrollPos = horizontalScrollbar.getScrollPos(); double unfrozenPixels = columnConfiguration .getCalculatedColumnsWidth(Range.between( columnConfiguration.getFrozenColumnCount(), columnConfiguration.getColumnCount())); double frozenPixels = scrollContentWidth - unfrozenPixels; double hScrollOffsetWidth = tableWrapperWidth - frozenPixels; horizontalScrollbar.setOffsetSize(hScrollOffsetWidth); horizontalScrollbar.setScrollSize(unfrozenPixels); horizontalScrollbar.getElement().getStyle().setLeft(frozenPixels, Unit.PX); horizontalScrollbar.setScrollPos(prevScrollPos); /* * only show the scrollbar wrapper if the scrollbar itself is * visible. */ if (horizontalScrollbar.showsScrollHandle()) { horizontalScrollbarDeco.getStyle().clearDisplay(); } else { horizontalScrollbarDeco.getStyle().setDisplay(Display.NONE); } /* * only show corner background divs if the vertical scrollbar is * visible. */ Style hCornerStyle = headerDeco.getStyle(); Style fCornerStyle = footerDeco.getStyle(); if (verticalScrollbar.showsScrollHandle()) { hCornerStyle.clearDisplay(); fCornerStyle.clearDisplay(); if (horizontalScrollbar.showsScrollHandle()) { double offset = horizontalScrollbar.getScrollbarThickness(); fCornerStyle.setBottom(offset, Unit.PX); } else { fCornerStyle.clearBottom(); } } else { hCornerStyle.setDisplay(Display.NONE); fCornerStyle.setDisplay(Display.NONE); } }
3.68
flink_AbstractMultipleInputTransformation_getOperatorFactory
/** Returns the {@code StreamOperatorFactory} of this Transformation. */ public StreamOperatorFactory<OUT> getOperatorFactory() { return operatorFactory; }
3.68
zxing_PDF417_setEncoding
/** * @param encoding sets character encoding to use */ public void setEncoding(Charset encoding) { this.encoding = encoding; }
3.68
graphhopper_Helper_intToEle
/** * Converts the integer value retrieved from storage into elevation (in meters). Do not expect * more precision than meters although it currently is! */ public static double intToEle(int integEle) { if (integEle == Integer.MAX_VALUE) return Double.MAX_VALUE; return integEle / ELE_FACTOR; }
3.68
flink_ZooKeeperUtils_createCheckpointIDCounter
/** * Creates a {@link ZooKeeperCheckpointIDCounter} instance. * * @param client The {@link CuratorFramework} ZooKeeper client to use * @return {@link ZooKeeperCheckpointIDCounter} instance */ public static ZooKeeperCheckpointIDCounter createCheckpointIDCounter(CuratorFramework client) { return new ZooKeeperCheckpointIDCounter( client, new DefaultLastStateConnectionStateListener()); }
3.68
framework_FilesystemContainer_removeAllItems
/* * (non-Javadoc) * * @see com.vaadin.data.Container#removeAllItems() */ @Override public boolean removeAllItems() throws UnsupportedOperationException { throw new UnsupportedOperationException( "File system container does not support this operation"); }
3.68
morf_AbstractSqlDialectTest_testAlterRemoveColumnFromCompositeKey
/** * Test changing a column to remove it from a composite primary key. */ @Test public void testAlterRemoveColumnFromCompositeKey() { testAlterTableColumn(COMPOSITE_PRIMARY_KEY_TABLE, AlterationType.ALTER, getColumn(COMPOSITE_PRIMARY_KEY_TABLE, SECOND_PRIMARY_KEY), column(SECOND_PRIMARY_KEY, DataType.STRING, 5).nullable(), expectedAlterRemoveColumnFromCompositeKeyStatements()); }
3.68
hadoop_AbstractDelegationTokenBinding_deployUnbonded
/** * Perform any actions when deploying unbonded, and return a list * of credential providers. * @return non-empty list of AWS credential providers to use for * authenticating this client with AWS services. * @throws IOException any failure. * @throws UnsupportedOperationException in the base implementation. */ public AWSCredentialProviderList deployUnbonded() throws IOException { throw new UnsupportedOperationException("unimplemented"); }
3.68
hibernate-validator_ExecutableMetaData_assertCorrectnessOfConfiguration
/** * <p> * Checks the configuration of this method for correctness as per the * rules outlined in the Bean Validation specification, section 4.5.5 * ("Method constraints in inheritance hierarchies"). * </p> * <p> * In particular, overriding methods in sub-types may not add parameter * constraints and the return value of an overriding method may not be * marked as cascaded if the return value is marked as cascaded already * on the overridden method. * </p> * * @throws jakarta.validation.ConstraintDeclarationException In case any of the rules mandated by the * specification are violated. */ private void assertCorrectnessOfConfiguration() { for ( Entry<Class<?>, ConstrainedExecutable> entry : executablesByDeclaringType.entrySet() ) { for ( Entry<Class<?>, ConstrainedExecutable> otherEntry : executablesByDeclaringType.entrySet() ) { for ( MethodConfigurationRule rule : rules ) { rule.apply( entry.getValue(), otherEntry.getValue() ); } } } }
3.68
hadoop_JobBase_getReport
/** * log the counters * */ protected String getReport() { StringBuffer sb = new StringBuffer(); Iterator iter = this.longCounters.entrySet().iterator(); while (iter.hasNext()) { Entry e = (Entry) iter.next(); sb.append(e.getKey().toString()).append("\t").append(e.getValue()) .append("\n"); } iter = this.doubleCounters.entrySet().iterator(); while (iter.hasNext()) { Entry e = (Entry) iter.next(); sb.append(e.getKey().toString()).append("\t").append(e.getValue()) .append("\n"); } return sb.toString(); }
3.68
hadoop_AbfsConfiguration_getAccountSpecificClass
/** * Returns the account-specific class if it exists, else returns default value. * @param name Account-agnostic configuration key * @param defaultValue Class returned if none is configured * @param xface Interface shared by all possible values * @param <U> Interface class type * @return Account specific Class object that was found */ public <U> Class<? extends U> getAccountSpecificClass(String name, Class<? extends U> defaultValue, Class<U> xface) { return rawConfig.getClass(accountConf(name), defaultValue, xface); }
3.68
morf_SchemaUtils_autoNumbered
/** * @see org.alfasoftware.morf.metadata.SchemaUtils.ColumnBuilder#autoNumbered(int) */ @Override public ColumnBuilder autoNumbered(int from) { return new ColumnBuilderImpl(this, isNullable(), getDefaultValue(), isPrimaryKey(), true, from); }
3.68
flink_SourceReader_pauseOrResumeSplits
/** * Pauses or resumes reading of individual source splits. * * <p>Note that no other methods can be called in parallel, so updating subscriptions can be * done atomically. This method is simply providing connectors with more expressive APIs the * opportunity to update all subscriptions at once. * * <p>This is currently used to align the watermarks of splits, if watermark alignment is used * and the source reads from more than one split. * * <p>The default implementation throws an {@link UnsupportedOperationException} where the * default implementation will be removed in future releases. To be compatible with future * releases, it is recommended to implement this method and override the default implementation. * * @param splitsToPause the splits to pause * @param splitsToResume the splits to resume */ @PublicEvolving default void pauseOrResumeSplits( Collection<String> splitsToPause, Collection<String> splitsToResume) { throw new UnsupportedOperationException( "This source reader does not support pausing or resuming splits which can lead to unaligned splits.\n" + "Unaligned splits are splits where the output watermarks of the splits have diverged more than the allowed limit.\n" + "It is highly discouraged to use unaligned source splits, as this leads to unpredictable\n" + "watermark alignment if there is more than a single split per reader. It is recommended to implement pausing splits\n" + "for this source. At your own risk, you can allow unaligned source splits by setting the\n" + "configuration parameter `pipeline.watermark-alignment.allow-unaligned-source-splits' to true.\n" + "Beware that this configuration parameter will be dropped in a future Flink release."); }
3.68
flink_InputChannel_getChannelInfo
/** * Returns the info of this channel, which uniquely identifies the channel in respect to its * operator instance. */ public InputChannelInfo getChannelInfo() { return channelInfo; }
3.68
flink_ResourceCounter_getResources
/** * Gets all stored {@link ResourceProfile ResourceProfiles}. * * @return collection of stored {@link ResourceProfile ResourceProfiles} */ public Set<ResourceProfile> getResources() { return resources.keySet(); }
3.68
morf_MySqlDialect_changePrimaryKeyColumns
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#changePrimaryKeyColumns(Table, List, List) */ @Override public Collection<String> changePrimaryKeyColumns(Table table, List<String> oldPrimaryKeyColumns, List<String> newPrimaryKeyColumns) { ArrayList<String> result = Lists.newArrayList(); if (!oldPrimaryKeyColumns.isEmpty()) { result.add(dropPrimaryKey(table.getName())); } if (!newPrimaryKeyColumns.isEmpty()) { result.add(new StringBuilder() .append("ALTER TABLE `") .append(table.getName()) .append("` ADD ") .append(buildPrimaryKeyConstraint(table.getName(), newPrimaryKeyColumns)).toString()); } return result; }
3.68
flink_QueryableStateConfiguration_disabled
/** Gets the configuration describing the queryable state as deactivated. */ public static QueryableStateConfiguration disabled() { final Iterator<Integer> proxyPorts = NetUtils.getPortRangeFromString( QueryableStateOptions.PROXY_PORT_RANGE.defaultValue()); final Iterator<Integer> serverPorts = NetUtils.getPortRangeFromString( QueryableStateOptions.SERVER_PORT_RANGE.defaultValue()); return new QueryableStateConfiguration(proxyPorts, serverPorts, 0, 0, 0, 0); }
3.68
hbase_SyncReplicationReplayWALManager_addUsedPeerWorker
/** * Will only be called when loading procedures, where we need to construct the used worker set for * each peer. */ public void addUsedPeerWorker(String peerId, ServerName worker) { usedWorkersByPeer.get(peerId).used(worker); }
3.68
flink_RpcEndpoint_start
/** * Triggers start of the rpc endpoint. This tells the underlying rpc server that the rpc * endpoint is ready to process remote procedure calls. */ public final void start() { rpcServer.start(); }
3.68
hbase_ReplicationPeer_trackPeerConfigChanges
/** * @deprecated since 2.1.0 and will be removed in 4.0.0. Use * {@link #registerPeerConfigListener(ReplicationPeerConfigListener)} instead. * @see #registerPeerConfigListener(ReplicationPeerConfigListener) * @see <a href="https://issues.apache.org/jira/browse/HBASE-10573">HBASE-19573</a> */ @Deprecated default void trackPeerConfigChanges(ReplicationPeerConfigListener listener) { registerPeerConfigListener(listener); }
3.68
flink_TumblingWindowAssigner_of
/** * Creates a new {@code TumblingWindowAssigner} {@link WindowAssigner} that assigns elements to * time windows based on the element timestamp. * * @param size The size of the generated windows. * @return The time policy. */ public static TumblingWindowAssigner of(Duration size) { return new TumblingWindowAssigner(size.toMillis(), 0, true); }
3.68
framework_AbstractOrderedLayout_getComponentCount
/** * Gets the number of contained components. Consistent with the iterator * returned by {@link #getComponentIterator()}. * * @return the number of contained components */ @Override public int getComponentCount() { return components.size(); }
3.68
zxing_BitMatrix_setRow
/** * @param y row to set * @param row {@link BitArray} to copy from */ public void setRow(int y, BitArray row) { System.arraycopy(row.getBitArray(), 0, bits, y * rowSize, rowSize); }
3.68
morf_AbstractSqlDialectTest_testSelectSpecificFields
/** * Tests a simple select with fields specified */ @Test public void testSelectSpecificFields() { SelectStatement stmt = new SelectStatement(new FieldReference(STRING_FIELD), new FieldReference(INT_FIELD), new FieldReference(DATE_FIELD).as("aliasDate")) .from(new TableReference(TEST_TABLE)); String expectedSql = "SELECT stringField, intField, dateField AS aliasDate FROM " + tableName(TEST_TABLE); assertEquals("Select specific fields", expectedSql, testDialect.convertStatementToSQL(stmt)); }
3.68