name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
pulsar_KeyStoreSSLContext_createClientSslContext
// for web client public static SSLContext createClientSslContext(String keyStoreTypeString, String keyStorePath, String keyStorePassword, String trustStoreTypeString, String trustStorePath, String trustStorePassword) throws GeneralSecurityException, IOException { KeyStoreSSLContext keyStoreSSLContext = new KeyStoreSSLContext(Mode.CLIENT, null, keyStoreTypeString, keyStorePath, keyStorePassword, false, trustStoreTypeString, trustStorePath, trustStorePassword, false, null, null); return keyStoreSSLContext.createSSLContext(); }
3.68
framework_Form_setFormFieldFactory
/** * Sets the field factory used by this Form to genarate Fields for * properties. * * {@link FormFieldFactory} is used to create fields for form properties. * {@link DefaultFieldFactory} is used by default. * * @param fieldFactory * the new factory used to create the fields. * @see Field * @see FormFieldFactory */ public void setFormFieldFactory(FormFieldFactory fieldFactory) { this.fieldFactory = fieldFactory; }
3.68
flink_CheckedThread_sync
/** * Waits with timeout until the thread is completed and checks whether any error occurred during * the execution. In case of timeout an {@link Exception} is thrown. * * <p>This method blocks like {@link #join()}, but performs an additional check for exceptions * thrown from the {@link #go()} method. */ public void sync(long timeout) throws Exception { trySync(timeout); checkFinished(); }
3.68
cron-utils_Preconditions_checkNotNull
/** * Ensures that an object reference passed as a parameter to the calling method is not null. * * @param reference an object reference * @param errorMessage the exception message to use if the check fails; will be converted to a * string using {@link String#valueOf(Object)} * @return the non-null reference that was validated * @throws NullPointerException if {@code reference} is null */ @SuppressWarnings(value = "NullPointerException") public static <T> T checkNotNull(final T reference, final Object errorMessage) { if (reference == null) { throw new NullPointerException(String.valueOf(errorMessage)); } return reference; }
3.68
framework_DesignContext_shouldWriteChildren
/** * Helper method for component write implementors to determine whether their * children should be written out or not. * * @param c * The component being written * @param defaultC * The default instance for the component * @return whether the children of c should be written */ public boolean shouldWriteChildren(Component c, Component defaultC) { if (c == getRootComponent()) { // The root component should always write its children - otherwise // the result is empty return true; } if (defaultC instanceof HasComponents && ((HasComponents) defaultC).iterator().hasNext()) { // Easy version which assumes that this is a custom component if the // constructor adds children return false; } return true; }
3.68
hadoop_HsAboutPage_preHead
/* * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ @Override protected void preHead(Page.HTML<__> html) { commonPreHead(html); //override the nav config from commonPReHead set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}"); setTitle("About History Server"); }
3.68
framework_Page_removeUriFragmentChangedListener
/** * Removes a URI fragment listener that was previously added to this page. * * @param listener * the URI fragment listener to remove * * @see Page#addUriFragmentChangedListener(UriFragmentChangedListener) * * @deprecated As of 8.0, replaced by {@link Registration#remove()} in the * registration object returned from * {@link #addUriFragmentChangedListener(UriFragmentChangedListener)}. */ @Deprecated public void removeUriFragmentChangedListener( Page.UriFragmentChangedListener listener) { removeListener(UriFragmentChangedEvent.class, listener, URI_FRAGMENT_CHANGED_METHOD); }
3.68
framework_TableElement_getFooterCell
/** * Function to get footer cell with given column index. * * @param column * 0 based column index * @return TestBenchElement containing wanted footer cell */ public TestBenchElement getFooterCell(int column) { TestBenchElement footerCell = wrapElement( findElement(By.vaadin("#footer[" + column + "]")), getCommandExecutor()); return footerCell; }
3.68
hudi_BaseHoodieWriteClient_updateColumnType
/** * update col Type for hudi table. * only support update primitive type to primitive type. * cannot update nest type to nest type or primitive type eg: RecordType -> MapType, MapType -> LongType. * * @param colName col name to be changed. if we want to change col from a nested filed, the fullName should be specified * @param newType . */ public void updateColumnType(String colName, Type newType) { Pair<InternalSchema, HoodieTableMetaClient> pair = getInternalSchemaAndMetaClient(); InternalSchema newSchema = new InternalSchemaChangeApplier(pair.getLeft()).applyColumnTypeChange(colName, newType); commitTableChange(newSchema, pair.getRight()); }
3.68
graphhopper_CHStorageBuilder_addShortcutEdgeBased
/** * @param origKeyFirst The first original edge key that is skipped by this shortcut *in the direction of the shortcut*. * This definition assumes that edge-based shortcuts are one-directional, and they are. * For example for the following shortcut edge from x to y: x->u->v->w->y , * which skips the shortcuts x->v and v->y the first original edge key would be the one of the edge x->u * @param origKeyLast like origKeyFirst, but the last orig edge key, i.e. the key of w->y in above example */ public int addShortcutEdgeBased(int a, int b, int accessFlags, double weight, int skippedEdge1, int skippedEdge2, int origKeyFirst, int origKeyLast) { checkNewShortcut(a, b); int shortcut = storage.shortcutEdgeBased(a, b, accessFlags, weight, skippedEdge1, skippedEdge2, origKeyFirst, origKeyLast); setLastShortcut(a, shortcut); return shortcut; }
3.68
hbase_BlockIOUtils_readWithExtraOnHeap
/** * Read from an input stream at least <code>necessaryLen</code> and if possible, * <code>extraLen</code> also if available. Analogous to * {@link IOUtils#readFully(InputStream, byte[], int, int)}, but specifies a number of "extra" * bytes to also optionally read. * @param in the input stream to read from * @param buf the buffer to read into * @param bufOffset the destination offset in the buffer * @param necessaryLen the number of bytes that are absolutely necessary to read * @param extraLen the number of extra bytes that would be nice to read * @return true if succeeded reading the extra bytes * @throws IOException if failed to read the necessary bytes */ private static boolean readWithExtraOnHeap(InputStream in, byte[] buf, int bufOffset, int necessaryLen, int extraLen) throws IOException { int heapBytesRead = 0; int bytesRemaining = necessaryLen + extraLen; try { while (bytesRemaining > 0) { int ret = in.read(buf, bufOffset, bytesRemaining); if (ret < 0) { if (bytesRemaining <= extraLen) { // We could not read the "extra data", but that is OK. break; } throw new IOException("Premature EOF from inputStream (read " + "returned " + ret + ", was trying to read " + necessaryLen + " necessary bytes and " + extraLen + " extra bytes, " + "successfully read " + (necessaryLen + extraLen - bytesRemaining)); } bufOffset += ret; bytesRemaining -= ret; heapBytesRead += ret; } } finally { final Span span = Span.current(); final AttributesBuilder attributesBuilder = builderFromContext(Context.current()); annotateHeapBytesRead(attributesBuilder, heapBytesRead); span.addEvent("BlockIOUtils.readWithExtra", attributesBuilder.build()); } return bytesRemaining <= 0; }
3.68
framework_VaadinPortletSession_sendPortletEvent
/** * Sends a portlet event to the indicated destination. * * Internally, an action may be created and opened, as an event cannot be * sent directly from all types of requests. * * Sending portlet events from background threads is not supported. * * The event destinations and values need to be kept in the context until * sent. Any memory leaks if the action fails are limited to the session. * * Event names for events sent and received by a portlet need to be declared * in portlet.xml . * * @param uI * a window in which a temporary action URL can be opened if * necessary * @param name * event name * @param value * event value object that is Serializable and, if appropriate, * has a valid JAXB annotation */ public void sendPortletEvent(UI uI, QName name, Serializable value) throws IllegalStateException { PortletResponse response = getCurrentResponse(); if (response instanceof MimeResponse) { String actionKey = "" + System.currentTimeMillis(); while (eventActionDestinationMap.containsKey(actionKey)) { actionKey += "."; } PortletURL actionUrl = generateActionURL(actionKey); if (actionUrl != null) { eventActionDestinationMap.put(actionKey, name); eventActionValueMap.put(actionKey, value); uI.getPage().setLocation(actionUrl.toString()); } else { // this should never happen as we already know the response is a // MimeResponse throw new IllegalStateException( "Portlet events can only be sent from a portlet request"); } } else if (response instanceof StateAwareResponse) { ((StateAwareResponse) response).setEvent(name, value); } else { throw new IllegalStateException( "Portlet events can only be sent from a portlet request"); } }
3.68
hadoop_CombinedFileRange_getUnderlying
/** * Get the list of ranges that were merged together to form this one. * @return the list of input ranges */ public List<FileRange> getUnderlying() { return underlying; }
3.68
hadoop_DateSplitter_longToDate
/** Parse the long-valued timestamp into the appropriate SQL date type. */ private Date longToDate(long val, int sqlDataType) { switch (sqlDataType) { case Types.DATE: return new java.sql.Date(val); case Types.TIME: return new java.sql.Time(val); case Types.TIMESTAMP: return new java.sql.Timestamp(val); default: // Shouldn't ever hit this case. return null; } }
3.68
hbase_PreemptiveFastFailException_getFirstFailureAt
/** Returns time of the fist failure */ public long getFirstFailureAt() { return timeOfFirstFailureMilliSec; }
3.68
flink_ImmutableMapState_values
/** * Returns all the values in the state in a {@link * Collections#unmodifiableCollection(Collection)}. * * @return A read-only iterable view of all the values in the state. */ @Override public Iterable<V> values() { return Collections.unmodifiableCollection(state.values()); }
3.68
dubbo_LFUCache_addLast
/** * Puts the node with specified key and value at the end of the deque * and returns node. * * @param key key * @param value value * @return added node */ CacheNode<K, V> addLast(final K key, final V value) { CacheNode<K, V> node = new CacheNode<>(key, value); node.owner = this; node.next = last.next; node.prev = last; node.next.prev = node; last.next = node; return node; }
3.68
flink_RocksDBCachingPriorityQueueSet_flushWriteBatch
/** Ensures that recent writes are flushed and reflect in the RocksDB instance. */ private void flushWriteBatch() { try { batchWrapper.flush(); } catch (RocksDBException e) { throw new FlinkRuntimeException(e); } }
3.68
graphhopper_ConditionalExpressionVisitor_isValidIdentifier
// allow only methods and other identifiers (constants and encoded values) boolean isValidIdentifier(String identifier) { if (variableValidator.isValid(identifier)) { if (!Character.isUpperCase(identifier.charAt(0))) result.guessedVariables.add(identifier); return true; } return false; }
3.68
morf_MySqlDialect_getColumnRepresentation
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getColumnRepresentation(org.alfasoftware.morf.metadata.DataType, * int, int) */ @Override protected String getColumnRepresentation(DataType dataType, int width, int scale) { switch (dataType) { case STRING: return String.format("VARCHAR(%d)", width); case DECIMAL: return String.format("DECIMAL(%d,%d)", width, scale); case DATE: return "DATE"; case BOOLEAN: // See http://www.xaprb.com/blog/2006/04/11/bit-values-in-mysql/ return "TINYINT(1)"; case INTEGER: return "INTEGER"; case BIG_INTEGER: return "BIGINT"; case BLOB: return "LONGBLOB"; case CLOB: return "LONGTEXT"; default: throw new UnsupportedOperationException("Cannot map column with type [" + dataType + "]"); } }
3.68
framework_Component_setStyleName
/** * Adds or removes a style name. Multiple styles can be specified as a * space-separated list of style names. * * If the {@code add} parameter is true, the style name is added to the * component. If the {@code add} parameter is false, the style name is * removed from the component. * <p> * Functionally this is equivalent to using {@link #addStyleName(String)} or * {@link #removeStyleName(String)} * * @since 8.5 * @param style * the style name to be added or removed * @param add * <code>true</code> to add the given style, <code>false</code> * to remove it * @see #addStyleName(String) * @see #removeStyleName(String) */ public default void setStyleName(String style, boolean add) { if (add) { addStyleName(style); } else { removeStyleName(style); } }
3.68
hadoop_FederationStateStoreClientMetrics_getNumFailedCallsForMethod
// Getters for unit testing @VisibleForTesting static long getNumFailedCallsForMethod(String methodName) { return API_TO_FAILED_CALLS.get(methodName).value(); }
3.68
framework_Tree_setMultiselectMode
/** * Sets the behavior of the multiselect mode. * * @param mode * The mode to set */ public void setMultiselectMode(MultiSelectMode mode) { if (multiSelectMode != mode && mode != null) { multiSelectMode = mode; markAsDirty(); } }
3.68
framework_Page_reload
/** * Reloads the page in the browser. */ public void reload() { uI.getRpcProxy(PageClientRpc.class).reload(); }
3.68
rocketmq-connect_WorkerTask_cleanup
/** * clean up */ public void cleanup() { log.info("Cleaning a task, current state {}, destination state {}", state.get().name(), WorkerTaskState.TERMINATED.name()); if (state.compareAndSet(WorkerTaskState.STOPPED, WorkerTaskState.TERMINATED) || state.compareAndSet(WorkerTaskState.ERROR, WorkerTaskState.TERMINATED)) { log.info("Cleaning a task success"); } else { log.error("[BUG] cleaning a task but it's not in STOPPED or ERROR state"); } }
3.68
framework_Page_removeBrowserWindowResizeListener
/** * Removes a {@link BrowserWindowResizeListener} from this UI. The listener * will no longer be notified when the browser window is resized. * * @param resizeListener * the listener to remove * * @deprecated As of 8.0, replaced by {@link Registration#remove()} in the * registration object returned from * {@link #addBrowserWindowResizeListener(BrowserWindowResizeListener)} * . */ @Deprecated public void removeBrowserWindowResizeListener( BrowserWindowResizeListener resizeListener) { removeListener(BrowserWindowResizeEvent.class, resizeListener, BROWSER_RESIZE_METHOD); getState(true).hasResizeListeners = hasEventRouter() && eventRouter.hasListeners(BrowserWindowResizeEvent.class); }
3.68
framework_MouseEvents_getRelativeY
/** * Returns the relative mouse position (y coordinate) when the click * took place. The position is relative to the clicked component. * * @return The mouse cursor y position relative to the clicked layout * component or -1 if no y coordinate available */ public int getRelativeY() { return details.getRelativeY(); }
3.68
hbase_ModeStrategyUtils_applyFilterAndGet
/** * Filter records as per the supplied filters, * @param records records to be processed * @param filters List of filters * @return filtered records */ public static List<Record> applyFilterAndGet(List<Record> records, List<RecordFilter> filters) { if (filters != null && !filters.isEmpty()) { return records.stream().filter(r -> filters.stream().allMatch(f -> f.execute(r))) .collect(Collectors.toList()); } return records; }
3.68
morf_CreateDeployedViews_getDescription
/** * @see org.alfasoftware.morf.upgrade.UpgradeStep#getDescription() */ @Override public String getDescription() { return "Allow database views to be managed"; }
3.68
hbase_HRegion_isFlushSucceeded
/** * Convenience method, the equivalent of checking if result is FLUSHED_NO_COMPACTION_NEEDED or * FLUSHED_NO_COMPACTION_NEEDED. * @return true if the memstores were flushed, else false. */ @Override public boolean isFlushSucceeded() { return result == Result.FLUSHED_NO_COMPACTION_NEEDED || result == Result.FLUSHED_COMPACTION_NEEDED; }
3.68
flink_ChangelogMode_all
/** Shortcut for a changelog that can contain all {@link RowKind}s. */ public static ChangelogMode all() { return ALL; }
3.68
hibernate-validator_Configuration_getVerboseOption
/** * Retrieves the value for the "verbose" property from the options. */ private boolean getVerboseOption(Map<String, String> options, Messager messager) { boolean theValue = Boolean.parseBoolean( options.get( VERBOSE_PROCESSOR_OPTION ) ); if ( theValue ) { messager.printMessage( Kind.NOTE, StringHelper.format( "Verbose reporting is activated. Some processing information will be displayed using diagnostic kind %1$s.", Kind.NOTE ) ); } return theValue; }
3.68
framework_AbstractInMemoryContainer_removeListener
/** * @deprecated As of 7.0, replaced by * {@link #removeItemSetChangeListener(Container.ItemSetChangeListener)} */ @Deprecated @Override public void removeListener(Container.ItemSetChangeListener listener) { removeItemSetChangeListener(listener); }
3.68
flink_UnsafeMemoryBudget_reserveMemory
/** * Reserve memory of certain size if it is available. * * <p>Adjusted version of {@link java.nio.Bits#reserveMemory(long, int)} taken from Java 11. */ @SuppressWarnings({"OverlyComplexMethod", "JavadocReference", "NestedTryStatement"}) void reserveMemory(long size) throws MemoryReservationException { long availableOrReserved = tryReserveMemory(size); // optimist! if (availableOrReserved >= size) { return; } // no luck throw new MemoryReservationException( String.format( "Could not allocate %d bytes, only %d bytes are remaining. This usually indicates " + "that you are requesting more memory than you have reserved. " + "However, when running an old JVM version it can also be caused by slow garbage collection. " + "Try to upgrade to Java 8u72 or higher if running on an old Java version.", size, availableOrReserved)); }
3.68
framework_BrowserInfo_isSafariOrIOS
/** * Returns true if the browser is Safari or is a browser that is running on * iOS and using the Safari rendering engine. * * @return true if the browser is using the Safari rendering engine * @since 8.1 */ public boolean isSafariOrIOS() { return browserDetails.isSafariOrIOS(); }
3.68
flink_JobVertex_setParallelism
/** * Sets the parallelism for the task. * * @param parallelism The parallelism for the task. */ public void setParallelism(int parallelism) { if (parallelism < 1 && parallelism != ExecutionConfig.PARALLELISM_DEFAULT) { throw new IllegalArgumentException( "The parallelism must be at least one, or " + ExecutionConfig.PARALLELISM_DEFAULT + " (unset)."); } this.parallelism = parallelism; }
3.68
hudi_KeyRangeNode_compareTo
/** * Compares the min record key of two nodes, followed by max record key. * * @param that the {@link KeyRangeNode} to be compared with * @return the result of comparison. 0 if both min and max are equal in both. 1 if this {@link KeyRangeNode} is * greater than the {@code that} keyRangeNode. -1 if {@code that} keyRangeNode is greater than this {@link * KeyRangeNode} */ @Override public int compareTo(KeyRangeNode that) { int compareValue = minRecordKey.compareTo(that.minRecordKey); if (compareValue == 0) { return maxRecordKey.compareTo(that.maxRecordKey); } else { return compareValue; } }
3.68
hbase_AsyncAdmin_hasUserPermissions
/** * Check if call user has specific permissions * @param permissions the specific permission list * @return True if user has the specific permissions */ default CompletableFuture<List<Boolean>> hasUserPermissions(List<Permission> permissions) { return hasUserPermissions(null, permissions); }
3.68
hadoop_NormalizedResourceEvent_getTaskType
/** * the tasktype for the event. * @return the tasktype for the event. */ public TaskType getTaskType() { return this.taskType; }
3.68
rocketmq-connect_ColumnDefinition_asPartOfPrimaryKey
/** * Obtain a {@link ColumnDefinition} that has all the same characteristics as this column except * that it does or does not belong to the table's primary key * * @param isPrimaryKey true if the resulting column definition should be part of the table's * primary key, or false otherwise * @return a definition that is the same as this definition except it is or is not part of the * tables primary key, or may be this object if {@link #isPrimaryKey()} already matches the * supplied parameter; never null */ public ColumnDefinition asPartOfPrimaryKey(boolean isPrimaryKey) { if (isPrimaryKey == isPrimaryKey()) { return this; } return new ColumnDefinition(id, jdbcType, typeName, classNameForType, nullability, mutability, precision, scale, signedNumbers, displaySize, autoIncremented, caseSensitive, searchable, currency, isPrimaryKey ); }
3.68
flink_RocksDBIncrementalCheckpointUtils_stateHandleEvaluator
/** * Evaluates state handle's "score" regarding to the target range when choosing the best state * handle to init the initial db for recovery, if the overlap fraction is less than * overlapFractionThreshold, then just return {@code Score.MIN} to mean the handle has no chance * to be the initial handle. */ private static Score stateHandleEvaluator( KeyedStateHandle stateHandle, KeyGroupRange targetKeyGroupRange, double overlapFractionThreshold) { final KeyGroupRange handleKeyGroupRange = stateHandle.getKeyGroupRange(); final KeyGroupRange intersectGroup = handleKeyGroupRange.getIntersection(targetKeyGroupRange); final double overlapFraction = (double) intersectGroup.getNumberOfKeyGroups() / handleKeyGroupRange.getNumberOfKeyGroups(); if (overlapFraction < overlapFractionThreshold) { return Score.MIN; } return new Score(intersectGroup.getNumberOfKeyGroups(), overlapFraction); }
3.68
flink_FileCatalogStore_listCatalogs
/** * Returns a set of all catalog names in the catalog store. * * @return a set of all catalog names in the catalog store * @throws CatalogException if the catalog store is not open or if there is an error retrieving * the list of catalog names */ @Override public Set<String> listCatalogs() throws CatalogException { checkOpenState(); try { FileStatus[] statusArr = catalogStorePath.getFileSystem().listStatus(catalogStorePath); return Arrays.stream(statusArr) .filter(status -> !status.isDir()) .map(FileStatus::getPath) .map(Path::getName) .map(filename -> filename.replace(FILE_EXTENSION, "")) .collect(Collectors.toSet()); } catch (Exception e) { throw new CatalogException( String.format( "Failed to list file catalog store directory %s.", catalogStorePath), e); } }
3.68
framework_Window_removeCloseListener
/** * Removes the CloseListener from the window. * * <p> * For more information on CloseListeners see {@link CloseListener}. * </p> * * @param listener * the CloseListener to remove. */ @Deprecated public void removeCloseListener(CloseListener listener) { removeListener(CloseEvent.class, listener, WINDOW_CLOSE_METHOD); }
3.68
hbase_HRegion_getWalFileSystem
/** Returns the WAL {@link FileSystem} being used by this region */ FileSystem getWalFileSystem() throws IOException { if (walFS == null) { walFS = CommonFSUtils.getWALFileSystem(conf); } return walFS; }
3.68
hudi_TimelineBasedCkpMetadata_sendRefreshRequest
/** * Refresh the ckp messages that cached in timeline server. */ private void sendRefreshRequest() { try { boolean success = httpRequestClient.executeRequestWithRetry( InstantStateHandler.REFRESH_INSTANT_STATE, getRequestParams(path.toString()), new TypeReference<Boolean>() { }, RequestMethod.POST); if (!success) { LOG.warn("Timeline server responses with failed refresh"); } } catch (Exception e) { // Do not propagate the exception because the server will also do auto refresh LOG.error("Failed to execute refresh", e); } }
3.68
zxing_BitArray_clear
/** * Clears all bits (sets to false). */ public void clear() { int max = bits.length; for (int i = 0; i < max; i++) { bits[i] = 0; } }
3.68
hbase_HFileInfo_isReservedFileInfoKey
/** Return true if the given file info key is reserved for internal use. */ public static boolean isReservedFileInfoKey(byte[] key) { return Bytes.startsWith(key, HFileInfo.RESERVED_PREFIX_BYTES); }
3.68
hbase_SnapshotInfo_addLogFile
/** * Add the specified log file to the stats * @param server server name * @param logfile log file name * @return the log information */ FileInfo addLogFile(final String server, final String logfile) throws IOException { WALLink logLink = new WALLink(conf, server, logfile); long size = -1; try { size = logLink.getFileStatus(fs).getLen(); logSize.addAndGet(size); logsCount.incrementAndGet(); } catch (FileNotFoundException e) { logsMissing.incrementAndGet(); } return new FileInfo(false, size, false); }
3.68
framework_ColorPickerHistory_removeColorChangeListener
/** * Removes a color change listener. * * @param listener * The listener */ @Override public void removeColorChangeListener(ColorChangeListener listener) { removeListener(ColorChangeEvent.class, listener); }
3.68
hadoop_IOStatisticsBinding_invokeTrackingDuration
/** * Given an IOException raising callable/lambda expression, * execute it, updating the tracker on success/failure. * @param tracker duration tracker. * @param input input callable. * @param <B> return type. * @return the result of the invocation * @throws IOException on failure. */ public static <B> B invokeTrackingDuration( final DurationTracker tracker, final CallableRaisingIOE<B> input) throws IOException { try { // exec the input function and return its value return input.apply(); } catch (IOException | RuntimeException e) { // input function failed: note it tracker.failed(); // and rethrow throw e; } finally { // update the tracker. // this is called after the catch() call would have // set the failed flag. tracker.close(); } }
3.68
framework_Escalator_paintRemoveRow
/** * Removes a row element from the DOM, invoking * {@link #getEscalatorUpdater()} * {@link EscalatorUpdater#preDetach(Row, Iterable) preDetach} and * {@link EscalatorUpdater#postDetach(Row, Iterable) postDetach} before * and after removing the row, respectively. * <p> * This method must be called for each removed DOM row by any * {@link #paintRemoveRows(int, int)} implementation. * * @param tr * the row element to remove. */ protected void paintRemoveRow(final TableRowElement tr, final int logicalRowIndex) { flyweightRow.setup(tr, logicalRowIndex, columnConfiguration.getCalculatedColumnWidths()); getEscalatorUpdater().preDetach(flyweightRow, flyweightRow.getCells()); tr.removeFromParent(); getEscalatorUpdater().postDetach(flyweightRow, flyweightRow.getCells()); /* * the "assert" guarantees that this code is run only during * development/debugging. */ assert flyweightRow.teardown(); }
3.68
hbase_HBaseMetrics2HadoopMetricsAdapter_addHistogram
/** * Add Histogram value-distribution data to a Hadoop-Metrics2 record building. * @param name A base name for this record. * @param histogram A histogram to measure distribution of values. * @param builder A Hadoop-Metrics2 record builder. */ private void addHistogram(String name, Histogram histogram, MetricsRecordBuilder builder) { MutableHistogram.snapshot(name, EMPTY_STRING, histogram, builder, true); }
3.68
hbase_MasterObserver_preSetUserQuota
/** * Called before the quota for the user on the specified namespace is stored. * @param ctx the environment to interact with the framework and master * @param userName the name of user * @param namespace the name of the namespace * @param quotas the current quota for the user on the namespace */ default void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx, final String userName, final String namespace, final GlobalQuotaSettings quotas) throws IOException { }
3.68
framework_Overlay_isFitInWindow
/** * Checks whether the overlay should be moved or shrunk to fit inside the * window. * * @see #setFitInWindow(boolean) * * @since 7.6.6 * @return <code>true</code> if the popup will be moved and/or shrunk to fit * inside the window, <code>false</code> otherwise */ public boolean isFitInWindow() { return fitInWindow; }
3.68
hadoop_CorruptFileBlockIterator_getCallsMade
/** * @return the number of calls made to the DFSClient. * This is for debugging and testing purposes. */ public int getCallsMade() { return callsMade; }
3.68
hadoop_TimelineEntity_addRelatedEntity
/** * Add an entity to the existing related entity map * * @param entityType * the entity type * @param entityId * the entity Id */ public void addRelatedEntity(String entityType, String entityId) { Set<String> thisRelatedEntity = relatedEntities.get(entityType); if (thisRelatedEntity == null) { thisRelatedEntity = new HashSet<String>(); relatedEntities.put(entityType, thisRelatedEntity); } thisRelatedEntity.add(entityId); }
3.68
framework_MenuBar_closeAllParents
/* * Closes all parent menu popups. */ void closeAllParents() { MenuBar curMenu = this; while (curMenu != null) { curMenu.close(); if ((curMenu.parentMenu == null) && (curMenu.selectedItem != null)) { curMenu.selectedItem.setSelectionStyle(false); curMenu.selectedItem = null; } curMenu = curMenu.parentMenu; } }
3.68
framework_VComboBox_setPlaceholder
/** * Set or reset the placeholder attribute for the text field. * * @param placeholder * new placeholder string or null for none */ public void setPlaceholder(String placeholder) { inputPrompt = placeholder; updatePlaceholder(); }
3.68
flink_DefaultConfigurableOptionsFactory_setMaxLogFileSize
/** * The maximum size of RocksDB's file used for logging. * * <p>If the log files becomes larger than this, a new file will be created. If 0, all logs will * be written to one log file. * * @param maxLogFileSize max file size limit * @return this options factory */ public DefaultConfigurableOptionsFactory setMaxLogFileSize(String maxLogFileSize) { Preconditions.checkArgument( MemorySize.parseBytes(maxLogFileSize) >= 0, "Invalid configuration " + maxLogFileSize + " for max log file size."); setInternal(LOG_MAX_FILE_SIZE.key(), maxLogFileSize); return this; }
3.68
hadoop_LogAggregationWebUtils_getLogEndIndex
/** * Parse end index from html. * @param html the html * @param endStr the end index string * @return the endIndex */ public static long getLogEndIndex(Block html, String endStr) throws NumberFormatException { long end = Long.MAX_VALUE; if (endStr != null && !endStr.isEmpty()) { end = Long.parseLong(endStr); } return end; }
3.68
hbase_MiniHBaseCluster_stopRegionServer
/** * Shut down the specified region server cleanly * @param serverNumber Used as index into a list. * @param shutdownFS True is we are to shutdown the filesystem as part of this regionserver's * shutdown. Usually we do but you do not want to do this if you are running * multiple regionservers in a test and you shut down one before end of the * test. * @return the region server that was stopped */ public JVMClusterUtil.RegionServerThread stopRegionServer(int serverNumber, final boolean shutdownFS) { JVMClusterUtil.RegionServerThread server = hbaseCluster.getRegionServers().get(serverNumber); LOG.info("Stopping " + server.toString()); server.getRegionServer().stop("Stopping rs " + serverNumber); return server; }
3.68
flink_HsSubpartitionFileReaderImpl_getNextOffsetToLoad
/** Returns Long.MAX_VALUE if it shouldn't load. */ private long getNextOffsetToLoad() { int bufferIndex = bufferIndexManager.getNextToLoad(); if (bufferIndex < 0) { return Long.MAX_VALUE; } else { return cachedRegionManager.getFileOffset(bufferIndex); } }
3.68
framework_GridSingleSelect_getFirstSelectedItem
/** * Get first selected data item. * * @return the first selected item. */ public Optional<T> getFirstSelectedItem() { return model.getFirstSelectedItem(); }
3.68
framework_VGridLayout_calcColumnExpandRatio
/** * Calculates column expand ratio. */ private float[] calcColumnExpandRatio() { float[] actualExpandRatio = new float[minColumnWidths.length]; for (int i = 0; i < minColumnWidths.length; i++) { if (!hiddenEmptyColumn(i)) { actualExpandRatio[i] = colExpandRatioArray[i]; } else { actualExpandRatio[i] = 0; } } return actualExpandRatio; }
3.68
flink_WindowOperatorBuilder_withInputCountIndex
/** * The index of COUNT(*) in the aggregates. -1 when the input doesn't * contain COUNT(*), i.e. * doesn't contain retraction messages. We make sure there is a * COUNT(*) if input stream * contains retraction. */ public WindowOperatorBuilder withInputCountIndex(int inputCountIndex) { this.inputCountIndex = inputCountIndex; return this; }
3.68
hbase_MetricsConnection_getHedgedReadOps
/** hedgedReadOps metric */ public Counter getHedgedReadOps() { return hedgedReadOps; }
3.68
morf_OracleDialect_truncatedTableNameWithSuffix
/** * Truncate table names to 27 characters, then add a 3 character suffix since 30 is the maximum supported by Oracle. */ private String truncatedTableNameWithSuffix(String tableName, String suffix) { return StringUtils.substring(tableName, 0, 27) + StringUtils.substring(suffix, 0, 3); }
3.68
hadoop_ZookeeperClient_checkNotNull
//Preconditions allowed to be imported from hadoop-common, but that results // in a circular dependency private void checkNotNull(Object reference, String errorMessage) { if (reference == null) { throw new NullPointerException(errorMessage); } }
3.68
flink_BinaryStringDataUtil_concatWs
/** * Concatenates input strings together into a single string using the separator. Returns NULL If * the separator is NULL. * * <p>Note: CONCAT_WS() does not skip any empty strings, however it does skip any NULL values * after the separator. For example, concat_ws(",", "a", null, "c") would yield "a,c". */ public static BinaryStringData concatWs( BinaryStringData separator, BinaryStringData... inputs) { return concatWs(separator, Arrays.asList(inputs)); }
3.68
hadoop_RLESparseResourceAllocation_merge
/** * Merges the range start to end of two {@code RLESparseResourceAllocation} * using a given {@code RLEOperator}. * * @param resCalc the resource calculator * @param clusterResource the total cluster resources (for DRF) * @param a the left operand * @param b the right operand * @param operator the operator to be applied during merge * @param start the start-time of the range to be considered * @param end the end-time of the range to be considered * @return the a merged RLESparseResourceAllocation, produced by applying * "operator" to "a" and "b" * @throws PlanningException in case the operator is subtractTestPositive and * the result would contain a negative value */ public static RLESparseResourceAllocation merge(ResourceCalculator resCalc, Resource clusterResource, RLESparseResourceAllocation a, RLESparseResourceAllocation b, RLEOperator operator, long start, long end) throws PlanningException { NavigableMap<Long, Resource> cumA = a.getRangeOverlapping(start, end).getCumulative(); NavigableMap<Long, Resource> cumB = b.getRangeOverlapping(start, end).getCumulative(); NavigableMap<Long, Resource> out = merge(resCalc, clusterResource, cumA, cumB, start, end, operator); return new RLESparseResourceAllocation(out, resCalc); }
3.68
open-banking-gateway_DatasafeMetadataStorage_update
/** * Updates user profile data * @param id Entity id * @param data New entry value */ @Override @Transactional public void update(String id, byte[] data) { T toSave = repository.findById(getIdValue(id)).get(); setData.accept(toSave, data); repository.save(toSave); }
3.68
querydsl_SimpleExpression_count
/** * Get the {@code count(this)} expression * * @return count(this) */ public NumberExpression<Long> count() { if (count == null) { count = Expressions.numberOperation(Long.class, Ops.AggOps.COUNT_AGG, mixin); } return count; }
3.68
rocketmq-connect_JdbcSourceTask_buildAndAddQuerier
/** * build and add querier * * @param loadMode * @param querySuffix * @param incrementingColumn * @param timestampColumns * @param timestampDelayInterval * @param timeZone * @param tableOrQuery * @param offset */ private void buildAndAddQuerier(TableLoadMode loadMode, String querySuffix, String incrementingColumn, List<String> timestampColumns, Long timestampDelayInterval, TimeZone timeZone, String tableOrQuery, Map<String, Object> offset) { String topicPrefix = config.getTopicPrefix(); QueryMode queryMode = !StringUtils.isEmpty(config.getQuery()) ? QueryMode.QUERY : QueryMode.TABLE; Querier querier; switch (loadMode) { case MODE_BULK: querier = new BulkQuerier( dialect, getContext(querySuffix, tableOrQuery, topicPrefix, queryMode) ); tableQueue.add(querier); break; case MODE_INCREMENTING: querier = new TimestampIncrementingQuerier( dialect, this.getIncrementContext(querySuffix, tableOrQuery, topicPrefix, queryMode, null, incrementingColumn, offset, timestampDelayInterval, timeZone) ); tableQueue.add(querier); break; case MODE_TIMESTAMP: querier = new TimestampIncrementingQuerier( dialect, this.getIncrementContext(querySuffix, tableOrQuery, topicPrefix, queryMode, timestampColumns, null, offset, timestampDelayInterval, timeZone) ); tableQueue.add(querier); break; case MODE_TIMESTAMP_INCREMENTING: querier = new TimestampIncrementingQuerier( dialect, this.getIncrementContext(querySuffix, tableOrQuery, topicPrefix, queryMode, timestampColumns, incrementingColumn, offset, timestampDelayInterval, timeZone) ); tableQueue.add(querier); break; } }
3.68
pulsar_SchemaUtils_convertKeyValueDataStringToSchemaInfoSchema
/** * Convert the key/value schema info data json bytes to key/value schema info data bytes. * * @param keyValueSchemaInfoDataJsonBytes the key/value schema info data json bytes * @return the key/value schema info data bytes */ public static byte[] convertKeyValueDataStringToSchemaInfoSchema( byte[] keyValueSchemaInfoDataJsonBytes) throws IOException { JsonObject jsonObject = (JsonObject) toJsonElement(new String(keyValueSchemaInfoDataJsonBytes, UTF_8)); byte[] keyBytes = getKeyOrValueSchemaBytes(jsonObject.get("key")); byte[] valueBytes = getKeyOrValueSchemaBytes(jsonObject.get("value")); int dataLength = 4 + keyBytes.length + 4 + valueBytes.length; byte[] schema = new byte[dataLength]; //record the key value schema respective length ByteBuf byteBuf = PulsarByteBufAllocator.DEFAULT.heapBuffer(dataLength); byteBuf.writeInt(keyBytes.length).writeBytes(keyBytes).writeInt(valueBytes.length).writeBytes(valueBytes); byteBuf.readBytes(schema); return schema; }
3.68
zxing_GridSampler_checkAndNudgePoints
/** * <p>Checks a set of points that have been transformed to sample points on an image against * the image's dimensions to see if the point are even within the image.</p> * * <p>This method will actually "nudge" the endpoints back onto the image if they are found to be * barely (less than 1 pixel) off the image. This accounts for imperfect detection of finder * patterns in an image where the QR Code runs all the way to the image border.</p> * * <p>For efficiency, the method will check points from either end of the line until one is found * to be within the image. Because the set of points are assumed to be linear, this is valid.</p> * * @param image image into which the points should map * @param points actual points in x1,y1,...,xn,yn form * @throws NotFoundException if an endpoint is lies outside the image boundaries */ protected static void checkAndNudgePoints(BitMatrix image, float[] points) throws NotFoundException { int width = image.getWidth(); int height = image.getHeight(); // Check and nudge points from start until we see some that are OK: boolean nudged = true; int maxOffset = points.length - 1; // points.length must be even for (int offset = 0; offset < maxOffset && nudged; offset += 2) { int x = (int) points[offset]; int y = (int) points[offset + 1]; if (x < -1 || x > width || y < -1 || y > height) { throw NotFoundException.getNotFoundInstance(); } nudged = false; if (x == -1) { points[offset] = 0.0f; nudged = true; } else if (x == width) { points[offset] = width - 1; nudged = true; } if (y == -1) { points[offset + 1] = 0.0f; nudged = true; } else if (y == height) { points[offset + 1] = height - 1; nudged = true; } } // Check and nudge points from end: nudged = true; for (int offset = points.length - 2; offset >= 0 && nudged; offset -= 2) { int x = (int) points[offset]; int y = (int) points[offset + 1]; if (x < -1 || x > width || y < -1 || y > height) { throw NotFoundException.getNotFoundInstance(); } nudged = false; if (x == -1) { points[offset] = 0.0f; nudged = true; } else if (x == width) { points[offset] = width - 1; nudged = true; } if (y == -1) { points[offset + 1] = 0.0f; nudged = true; } else if (y == height) { points[offset + 1] = height - 1; nudged = true; } } }
3.68
hadoop_StagingCommitter_getPendingJobAttemptsPath
/** * Get the location of pending job attempts. * @param out the base output directory. * @return the location of pending job attempts. */ private static Path getPendingJobAttemptsPath(Path out) { requireNonNull(out, "Null 'out' path"); return new Path(out, TEMPORARY); }
3.68
framework_VEmbedded_getSrc
/** * Helper to return translated src-attribute from embedded's UIDL * <p> * For internal use only. May be removed or replaced in the future. * * @param src * the src attribute * @param client * the communication engine for this UI * @return the translated src-attribute or an empty String if not found */ public String getSrc(String src, ApplicationConnection client) { String url = client.translateVaadinUri(src); if (url == null) { return ""; } return url; }
3.68
hbase_RequestConverter_buildNormalizeRequest
/** * Creates a protocol buffer NormalizeRequest * @return a NormalizeRequest */ public static NormalizeRequest buildNormalizeRequest(NormalizeTableFilterParams ntfp) { final NormalizeRequest.Builder builder = NormalizeRequest.newBuilder(); if (ntfp.getTableNames() != null) { builder.addAllTableNames(ProtobufUtil.toProtoTableNameList(ntfp.getTableNames())); } if (ntfp.getRegex() != null) { builder.setRegex(ntfp.getRegex()); } if (ntfp.getNamespace() != null) { builder.setNamespace(ntfp.getNamespace()); } return builder.build(); }
3.68
framework_ContainerHierarchicalWrapper_updateHierarchicalWrapper
/** * Updates the wrapper's internal hierarchy data to include all Items in the * underlying container. If the contents of the wrapped container change * without the wrapper's knowledge, this method needs to be called to update * the hierarchy information of the Items. */ public void updateHierarchicalWrapper() { if (!hierarchical) { // Recreate hierarchy and data structures if missing if (noChildrenAllowed == null || parent == null || children == null || roots == null) { noChildrenAllowed = new HashSet<Object>(); parent = new Hashtable<Object, Object>(); children = new Hashtable<Object, LinkedList<Object>>(); roots = new LinkedHashSet<Object>(container.getItemIds()); } else { // Check that the hierarchy is up-to-date // ensure order of root and child lists is same as in wrapped // container Collection<?> itemIds = container.getItemIds(); Comparator<Object> basedOnOrderFromWrappedContainer = new ListedItemsFirstComparator( itemIds); // Calculate the set of all items in the hierarchy final HashSet<Object> s = new HashSet<Object>(); s.addAll(parent.keySet()); s.addAll(children.keySet()); s.addAll(roots); // Remove unnecessary items for (final Object id : s) { if (!container.containsId(id)) { removeFromHierarchyWrapper(id); } } // Add all the missing items final Collection<?> ids = container.getItemIds(); for (final Object id : ids) { if (!s.contains(id)) { addToHierarchyWrapper(id); s.add(id); } } Object[] array = roots.toArray(); Arrays.sort(array, basedOnOrderFromWrappedContainer); roots = new LinkedHashSet<Object>(); for (Object root : array) { roots.add(root); } for (Object object : children.keySet()) { LinkedList<Object> object2 = children.get(object); Collections.sort(object2, basedOnOrderFromWrappedContainer); } } } }
3.68
hadoop_OBSFileSystem_getUsername
/** * Return the username of the filesystem. * * @return the short name of the user who instantiated the filesystem */ String getUsername() { return username; }
3.68
dubbo_AbstractProxyProtocol_destroyInternal
// used to destroy unused clients and other resource protected void destroyInternal(URL url) { // subclass override }
3.68
framework_Overlay_setTop
/** * Sets the pixel value for top css property. * * @param top * value to set */ public void setTop(int top) { this.top = top; }
3.68
hbase_TableDescriptorBuilder_getMemStoreFlushSize
/** * Returns the size of the memstore after which a flush to filesystem is triggered. * @return memory cache flush size for each hregion, -1 if not set. * @see #setMemStoreFlushSize(long) */ @Override public long getMemStoreFlushSize() { return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, (long) -1); }
3.68
dubbo_HeaderExchangeClient_calculateLeastDuration
/** * Each interval cannot be less than 1000ms. */ private long calculateLeastDuration(int time) { if (time / HEARTBEAT_CHECK_TICK <= 0) { return LEAST_HEARTBEAT_DURATION; } else { return time / HEARTBEAT_CHECK_TICK; } }
3.68
hadoop_FederationCache_buildCacheKey
/** * Build CacheKey. * * @param className Cache Class Name. * @param methodName Method Name. * @param argName ArgName. * @return append result. * Example: * className:FederationJCache, methodName:getApplicationHomeSubCluster, argName: app_1 * We Will Return FederationJCache.getApplicationHomeSubCluster.app_1 */ protected String buildCacheKey(String className, String methodName, String argName) { StringBuilder buffer = new StringBuilder(); buffer.append(className).append(POINT).append(methodName); if (argName != null) { buffer.append(POINT); buffer.append(argName); } return buffer.toString(); }
3.68
pulsar_Topics_deleteAsync
/** * @see Topics#deleteAsync(String, boolean, boolean) */ default CompletableFuture<Void> deleteAsync(String topic, boolean force) { return deleteAsync(topic, force, true); }
3.68
hudi_CompactionUtils_getAllPendingLogCompactionPlans
/** * Get all pending logcompaction plans along with their instants. * @param metaClient Hoodie Meta Client */ public static List<Pair<HoodieInstant, HoodieCompactionPlan>> getAllPendingLogCompactionPlans( HoodieTableMetaClient metaClient) { // This function returns pending logcompaction timeline. Function<HoodieTableMetaClient, HoodieTimeline> filteredTimelineSupplier = (hoodieTableMetaClient) -> hoodieTableMetaClient.getActiveTimeline().filterPendingLogCompactionTimeline(); // Hoodie requested instant supplier Function<String, HoodieInstant> requestedInstantSupplier = HoodieTimeline::getLogCompactionRequestedInstant; return getCompactionPlansByTimeline(metaClient, filteredTimelineSupplier, requestedInstantSupplier); }
3.68
hadoop_TypedBytesInput_get
/** * Get a thread-local typed bytes input for the supplied {@link DataInput}. * @param in data input object * @return typed bytes input corresponding to the supplied {@link DataInput}. */ public static TypedBytesInput get(DataInput in) { TypedBytesInput bin = TB_IN.get(); bin.setDataInput(in); return bin; }
3.68
hadoop_WordListAnonymizerUtility_hasSuffix
/** * Checks if the given data has a known suffix. */ public static boolean hasSuffix(String data, String[] suffixes) { // check if they end in known suffixes for (String ks : suffixes) { if (data.endsWith(ks)) { return true; } } return false; }
3.68
flink_HiveParserQBParseInfo_setDistributeByExprForClause
/** Set the Distribute By AST for the clause. */ public void setDistributeByExprForClause(String clause, HiveParserASTNode ast) { destToDistributeby.put(clause, ast); }
3.68
framework_VCalendarPanel_processClickEvent
/** * Handles a user click on the component * * @param sender * The component that was clicked * @param updateVariable * Should the value field be updated * */ private void processClickEvent(Widget sender) { if (!isEnabled() || isReadonly()) { return; } if (sender == prevYear) { focusPreviousYear(1); } else if (sender == nextYear) { focusNextYear(1); } else if (sender == prevMonth) { focusPreviousMonth(); } else if (sender == nextMonth) { focusNextMonth(); } }
3.68
framework_PortletListenerNotifier_synchronizedHandleRequest
/** * Fires portlet request events to any {@link PortletListener}s registered * to the given session using * {@link VaadinPortletSession#addPortletListener(PortletListener)}. The * PortletListener method corresponding to the request type is invoked. */ @Override public boolean synchronizedHandleRequest(VaadinSession session, VaadinRequest request, VaadinResponse response) throws IOException { VaadinPortletSession sess = (VaadinPortletSession) session; PortletRequest portletRequest = ((VaadinPortletRequest) request) .getPortletRequest(); PortletResponse portletResponse = ((VaadinPortletResponse) response) .getPortletResponse(); // Finds the right UI UI uI = null; if (ServletPortletHelper.isUIDLRequest(request)) { uI = session.getService().findUI(request); } if (portletRequest instanceof RenderRequest) { sess.firePortletRenderRequest(uI, (RenderRequest) portletRequest, (RenderResponse) portletResponse); } else if (portletRequest instanceof ActionRequest) { sess.firePortletActionRequest(uI, (ActionRequest) portletRequest, (ActionResponse) portletResponse); } else if (portletRequest instanceof EventRequest) { sess.firePortletEventRequest(uI, (EventRequest) portletRequest, (EventResponse) portletResponse); } else if (portletRequest instanceof ResourceRequest) { sess.firePortletResourceRequest(uI, (ResourceRequest) portletRequest, (ResourceResponse) portletResponse); } return false; }
3.68
hbase_SimpleRpcServer_stop
/** Stops the service. No new calls will be handled after this is called. */ @Override public synchronized void stop() { LOG.info("Stopping server on " + port); running = false; if (authTokenSecretMgr != null) { authTokenSecretMgr.stop(); authTokenSecretMgr = null; } listener.interrupt(); listener.doStop(); responder.interrupt(); scheduler.stop(); notifyAll(); }
3.68
flink_AbstractBytesMultiMap_reset
/** reset the map's record and bucket area's memory segments for reusing. */ @Override public void reset() { super.reset(); // reset the record segments. recordArea.reset(); numKeys = 0; }
3.68
framework_InfoSection_getControls
/* * (non-Javadoc) * * @see com.vaadin.client.debug.internal.Section#getControls() */ @Override public Widget getControls() { return controls; }
3.68
hbase_ThrottledInputStream_getBytesPerSec
/** * Getter for the read-rate from this stream, since creation. Calculated as * bytesRead/elapsedTimeSinceStart. * @return Read rate, in bytes/sec. */ public long getBytesPerSec() { long elapsed = (EnvironmentEdgeManager.currentTime() - startTime) / 1000; if (elapsed == 0) { return bytesRead; } else { return bytesRead / elapsed; } }
3.68
flink_MailboxMetricsController_isLatencyMeasurementStarted
/** * Indicates if latency mesurement has been started. * * @return True if latency measurement has been started. */ public boolean isLatencyMeasurementStarted() { return started; }
3.68
flink_SharedBufferAccessor_releaseEvent
/** * Decreases the reference counter for the given event so that it can be removed once the * reference counter reaches 0. * * @param eventId id of the event * @throws Exception Thrown if the system cannot access the state. */ public void releaseEvent(EventId eventId) throws Exception { Lockable<V> eventWrapper = sharedBuffer.getEvent(eventId); if (eventWrapper != null) { if (eventWrapper.release()) { sharedBuffer.removeEvent(eventId); } else { sharedBuffer.upsertEvent(eventId, eventWrapper); } } }
3.68
hadoop_MarshalledCredentialProvider_createCredentials
/** * Perform the binding, looking up the DT and parsing it. * @return true if there were some credentials * @throws CredentialInitializationException validation failure * @throws IOException on a failure */ @Override protected AwsCredentials createCredentials(final Configuration config) throws IOException { return toAWSCredentials(credentials, typeRequired, component); }
3.68
hadoop_OperationDuration_value
/** * Get the duration in milliseconds. * * <p> * This will be 0 until a call * to {@link #finished()} has been made. * </p> * @return the currently recorded duration. */ public long value() { return finished -started; }
3.68