name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
morf_HumanReadableStatementHelper_generateChangeIndexString
/** * Generates a human-readable "Change Index" string. * * @param tableName the name of the table to change the index on * @param fromIndex the original index definition * @param toIndex the replacement definition for the index * @return a string containing the human-readable version of the action */ public static String generateChangeIndexString(final String tableName, final Index fromIndex, final Index toIndex) { if (fromIndex.isUnique() == toIndex.isUnique()) { return String.format("Change %s index %s on %s", generateUniqueIndexString(fromIndex), fromIndex.getName(), tableName); } return String.format("Change %s index %s on %s to be %s", generateUniqueIndexString(fromIndex), fromIndex.getName(), tableName, generateUniqueIndexString(toIndex)); }
3.68
pulsar_ProducerConfiguration_getProducerName
/** * @return the configured custom producer name or null if no custom name was specified * @since 1.20.0 */ public String getProducerName() { return conf.getProducerName(); }
3.68
pulsar_BrokerVersionFilter_getLatestVersionNumber
/** * Get the most recent broker version number from the broker lookup data of all the running brokers. * The version number is from the build artifact in the pom and got added to the package when it was built by Maven * * @param brokerMap * The BrokerId -> BrokerLookupData Map. * @return The most recent broker version * @throws BrokerFilterBadVersionException * If the most recent version is undefined (e.g., a bad broker version was encountered or a broker * does not have a version string in its lookup data. */ public Version getLatestVersionNumber(Map<String, BrokerLookupData> brokerMap) throws BrokerFilterBadVersionException { if (brokerMap.size() == 0) { throw new BrokerFilterBadVersionException( "Unable to determine latest version since broker version map was empty"); } Version latestVersion = null; for (Map.Entry<String, BrokerLookupData> entry : brokerMap.entrySet()) { String brokerId = entry.getKey(); String version = entry.getValue().brokerVersion(); if (null == version || version.length() == 0) { log.warn("No version string in lookup data for broker [{}]; disabling PreferLaterVersions feature", brokerId); // Trigger the load manager to reset all the brokers to the original set throw new BrokerFilterBadVersionException("No version string in lookup data for broker \"" + brokerId + "\""); } Version brokerVersionVersion; try { brokerVersionVersion = Version.valueOf(version); } catch (Exception x) { log.warn("Invalid version string in lookup data for broker [{}]: [{}];" + " disabling PreferLaterVersions feature", brokerId, version); // Trigger the load manager to reset all the brokers to the original set throw new BrokerFilterBadVersionException("Invalid version string in lookup data for broker \"" + brokerId + "\": \"" + version + "\")"); } if (latestVersion == null) { latestVersion = brokerVersionVersion; } else if (Version.BUILD_AWARE_ORDER.compare(latestVersion, brokerVersionVersion) < 0) { latestVersion = brokerVersionVersion; } } return latestVersion; }
3.68
hadoop_ResourceEstimatorService_parseFile
/** * Parse the log file. See also {@link LogParser#parseStream(InputStream)}. * * @param logFile file/directory of the log to be parsed. * @throws IOException if fails to parse the log. * @throws SkylineStoreException if fails to addHistory to * {@link SkylineStore}. * @throws ResourceEstimatorException if the {@link LogParser} * is not initialized. */ @POST @Path("/translator/{logFile : .+}") public void parseFile( @PathParam("logFile") String logFile) throws IOException, SkylineStoreException, ResourceEstimatorException { logParserUtil.parseLog(logFile); LOGGER.debug("Parse logFile: {}.", logFile); }
3.68
hadoop_FileIoProvider_deleteWithExistsCheck
/** * Delete a file, first checking to see if it exists. * @param volume target volume. null if unavailable. * @param f File to delete * @return true if the file was successfully deleted or if it never * existed. */ public boolean deleteWithExistsCheck(@Nullable FsVolumeSpi volume, File f) { final long begin = profilingEventHook.beforeMetadataOp(volume, DELETE); try { faultInjectorEventHook.beforeMetadataOp(volume, DELETE); boolean deleted = !f.exists() || f.delete(); profilingEventHook.afterMetadataOp(volume, DELETE, begin); if (!deleted) { LOG.warn("Failed to delete file {}", f); } return deleted; } catch (Exception e) { onFailure(volume, begin); throw e; } }
3.68
flink_ConnectedStreams_flatMap
/** * Applies a CoFlatMap transformation on a {@link ConnectedStreams} and maps the output to a * common type. The transformation calls a {@link CoFlatMapFunction#flatMap1} for each element * of the first input and {@link CoFlatMapFunction#flatMap2} for each element of the second * input. Each CoFlatMapFunction call returns any number of elements including none. * * @param coFlatMapper The CoFlatMapFunction used to jointly transform the two input DataStreams * @param outputType {@link TypeInformation} for the result type of the function. * @return The transformed {@link DataStream} */ public <R> SingleOutputStreamOperator<R> flatMap( CoFlatMapFunction<IN1, IN2, R> coFlatMapper, TypeInformation<R> outputType) { return transform( "Co-Flat Map", outputType, new CoStreamFlatMap<>(inputStream1.clean(coFlatMapper))); }
3.68
hbase_HBaseTestingUtility_createRandomTable
/** Creates a random table with the given parameters */ public Table createRandomTable(TableName tableName, final Collection<String> families, final int maxVersions, final int numColsPerRow, final int numFlushes, final int numRegions, final int numRowsPerFlush) throws IOException, InterruptedException { LOG.info("\n\nCreating random table " + tableName + " with " + numRegions + " regions, " + numFlushes + " storefiles per region, " + numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions + "\n"); final int numCF = families.size(); final byte[][] cfBytes = new byte[numCF][]; { int cfIndex = 0; for (String cf : families) { cfBytes[cfIndex++] = Bytes.toBytes(cf); } } final int actualStartKey = 0; final int actualEndKey = Integer.MAX_VALUE; final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions; final int splitStartKey = actualStartKey + keysPerRegion; final int splitEndKey = actualEndKey - keysPerRegion; final String keyFormat = "%08x"; final Table table = createTable(tableName, cfBytes, maxVersions, Bytes.toBytes(String.format(keyFormat, splitStartKey)), Bytes.toBytes(String.format(keyFormat, splitEndKey)), numRegions); if (hbaseCluster != null) { getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME); } BufferedMutator mutator = getConnection().getBufferedMutator(tableName); final Random rand = ThreadLocalRandom.current(); for (int iFlush = 0; iFlush < numFlushes; ++iFlush) { for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) { final byte[] row = Bytes.toBytes( String.format(keyFormat, actualStartKey + rand.nextInt(actualEndKey - actualStartKey))); Put put = new Put(row); Delete del = new Delete(row); for (int iCol = 0; iCol < numColsPerRow; ++iCol) { final byte[] cf = cfBytes[rand.nextInt(numCF)]; final long ts = rand.nextInt(); final byte[] qual = Bytes.toBytes("col" + iCol); if (rand.nextBoolean()) { final byte[] value = Bytes.toBytes("value_for_row_" + iRow + "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" + ts + "_random_" + rand.nextLong()); put.addColumn(cf, qual, ts, value); } else if (rand.nextDouble() < 0.8) { del.addColumn(cf, qual, ts); } else { del.addColumns(cf, qual, ts); } } if (!put.isEmpty()) { mutator.mutate(put); } if (!del.isEmpty()) { mutator.mutate(del); } } LOG.info("Initiating flush #" + iFlush + " for table " + tableName); mutator.flush(); if (hbaseCluster != null) { getMiniHBaseCluster().flushcache(table.getName()); } } mutator.close(); return table; }
3.68
morf_WhenCondition_deepCopy
/** * Creates a deep copy of {@link WhenCondition} * * @return deep copy of the field */ public WhenCondition deepCopy() { return new WhenCondition(this, DeepCopyTransformations.noTransformation()); }
3.68
morf_AnalyseTable_accept
/** * @see org.alfasoftware.morf.upgrade.SchemaChange#accept(org.alfasoftware.morf.upgrade.SchemaChangeVisitor) */ @Override public void accept(SchemaChangeVisitor visitor) { visitor.visit(this); }
3.68
hadoop_RenameFailedException_withExitCode
/** * Set the exit code. * @param code exit code to raise * @return the exception */ public RenameFailedException withExitCode(boolean code) { this.exitCode = code; return this; }
3.68
hadoop_Server_getResource
/** * Convenience method that returns a resource as inputstream from the * classpath. * <p> * It first attempts to use the Thread's context classloader and if not * set it uses the <code>ClassUtils</code> classloader. * * @param name resource to retrieve. * * @return inputstream with the resource, NULL if the resource does not * exist. */ static InputStream getResource(String name) { Check.notEmpty(name, "name"); ClassLoader cl = Thread.currentThread().getContextClassLoader(); if (cl == null) { cl = Server.class.getClassLoader(); } return cl.getResourceAsStream(name); }
3.68
framework_VScrollTable_updateWidth
/** For internal use only. May be removed or replaced in the future. */ public void updateWidth() { if (!isVisible()) { /* * Do not update size when the table is hidden as all column widths * will be set to zero and they won't be recalculated when the table * is set visible again (until the size changes again) */ return; } if (!isDynamicWidth()) { int innerPixels = getOffsetWidth() - getBorderWidth(); if (innerPixels < 0) { innerPixels = 0; } setContentWidth(innerPixels); // readjust undefined width columns triggerLazyColumnAdjustment(false); } else { sizeNeedsInit = true; // readjust undefined width columns triggerLazyColumnAdjustment(false); } /* * setting width may affect wheter the component has scrollbars -> needs * scrolling or not */ setProperTabIndex(); }
3.68
hadoop_MetricsAnnotations_makeSource
/** * Make an metrics source from an annotated object. * @param source the annotated object. * @return a metrics source */ public static MetricsSource makeSource(Object source) { return new MetricsSourceBuilder(source, DefaultMetricsFactory.getAnnotatedMetricsFactory()).build(); }
3.68
framework_Profiler_enterChild
/** * Creates a new child node or retrieves and existing child and updates * its total time and hit count. * * @param name * the name of the child * @param timestamp * the timestamp for when the node is entered * @return the child node object */ public Node enterChild(String name, double timestamp) { Node child = children.get(name); if (child == null) { child = new Node(name); children.put(name, child); } child.enterTime = timestamp; child.count++; return child; }
3.68
morf_AbstractSelectStatementBuilder_innerJoin
/** * @param subSelect the sub select statement to join on to * @return this, for method chaining. * * @deprecated Use {@link #crossJoin(SelectStatement)} to do a cross join; * or add join conditions for {@link #innerJoin(SelectStatement, Criterion)} * to make this an inner join. */ @Deprecated public T innerJoin(SelectStatement subSelect) { return crossJoin(subSelect); }
3.68
framework_FocusUtil_focusOnFirstFocusableElement
/** * Moves the focus to the first focusable child of given parent element. * * @param parent * the parent element * @since 8.1.7 */ public static void focusOnFirstFocusableElement(Element parent) { Element[] focusableChildren = getFocusableChildren(parent); if (focusableChildren.length == 0) { return; } // find the first element that doesn't have "disabled" in the class name for (int i = 0; i < focusableChildren.length; i++) { Element element = focusableChildren[i]; String classes = element.getAttribute("class"); if (classes == null || !classes.toLowerCase().contains("disabled")) { element.focus(); return; } } }
3.68
flink_Tuple6_of
/** * Creates a new tuple and assigns the given values to the tuple's fields. This is more * convenient than using the constructor, because the compiler can infer the generic type * arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new * Tuple3<Integer, Double, String>(n, x, s)} */ public static <T0, T1, T2, T3, T4, T5> Tuple6<T0, T1, T2, T3, T4, T5> of( T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5) { return new Tuple6<>(f0, f1, f2, f3, f4, f5); }
3.68
hbase_TimeRange_getMax
/** Returns the biggest timestamp that should be considered */ public long getMax() { return maxStamp; }
3.68
framework_VDragEvent_createDragImage
/** * Automatically tries to create a proxy image from given element. * * @param element * the original element that is dragged * @param alignImageToEvent * if true, proxy image is aligned to start event, else next to * mouse cursor * @since 7.2 */ public void createDragImage(Element element, boolean alignImageToEvent) { createDragImage(DOM.asOld(element), alignImageToEvent); }
3.68
flink_ExecutionEnvironment_getExecutionEnvironment
/** * Creates an execution environment that represents the context in which the program is * currently executed. If the program is invoked standalone, this method returns a local * execution environment, as returned by {@link #createLocalEnvironment()}. If the program is * invoked from within the command line client to be submitted to a cluster, this method returns * the execution environment of this cluster. * * @return The execution environment of the context in which the program is executed. */ public static ExecutionEnvironment getExecutionEnvironment() { return Utils.resolveFactory(threadLocalContextEnvironmentFactory, contextEnvironmentFactory) .map(ExecutionEnvironmentFactory::createExecutionEnvironment) .orElseGet(ExecutionEnvironment::createLocalEnvironment); }
3.68
hbase_LocalHBaseCluster_startup
/** * Start the cluster. */ public void startup() throws IOException { JVMClusterUtil.startup(this.masterThreads, this.regionThreads); }
3.68
framework_Design_getComponentMapper
/** * Gets the currently used component mapper. * * @see #setComponentMapper(ComponentMapper) * * @return the component mapper * * @since 7.5.0 */ public static ComponentMapper getComponentMapper() { return componentMapper; }
3.68
hbase_AsyncNonMetaRegionLocator_getNumberOfCachedRegionLocations
// only used for testing whether we have cached the location for a table. int getNumberOfCachedRegionLocations(TableName tableName) { TableCache tableCache = cache.get(tableName); if (tableCache == null) { return 0; } return tableCache.regionLocationCache.getAll().stream() .mapToInt(RegionLocations::numNonNullElements).sum(); }
3.68
hadoop_TypedBytesInput_readMapHeader
/** * Reads the header following a <code>Type.MAP</code> code. * @return the number of key-value pairs in the map * @throws IOException */ public int readMapHeader() throws IOException { return in.readInt(); }
3.68
morf_GraphBasedUpgradeBuilder_produceNodes
/** * Maps instances of {@link UpgradeStep} to instances of {@link GraphBasedUpgradeNode}. * @param upgradeSteps to be mapped * @param mapper to be used * @return list of {@link GraphBasedUpgradeNode} instances */ private List<GraphBasedUpgradeNode> produceNodes(List<UpgradeStep> upgradeSteps, UpgradeStepToUpgradeNode mapper) { return upgradeSteps.stream(). map(mapper). sorted(Comparator.comparing(GraphBasedUpgradeNode::getSequence)). collect(Collectors.toList()); }
3.68
flink_TypeSerializerSchemaCompatibility_isIncompatible
/** * Returns whether or not the type of the compatibility is {@link Type#INCOMPATIBLE}. * * @return whether or not the type of the compatibility is {@link Type#INCOMPATIBLE}. */ public boolean isIncompatible() { return resultType == Type.INCOMPATIBLE; }
3.68
flink_Predicates_arePublicFinalOfType
/** * Tests that the given field is {@code public final}, not {@code static} and has the given * fully qualified type name of {@code fqClassName}. */ public static DescribedPredicate<JavaField> arePublicFinalOfType(String fqClassName) { return is(ofType(fqClassName)).and(isPublic()).and(isFinal()).and(isNotStatic()); }
3.68
hbase_MasterObserver_preUpdateReplicationPeerConfig
/** * Called before update peerConfig for the specified peer * @param peerId a short name that identifies the peer */ default void preUpdateReplicationPeerConfig( final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId, ReplicationPeerConfig peerConfig) throws IOException { }
3.68
flink_MutableRecordAndPosition_setNext
/** Sets the next record of a sequence. This increments the {@code recordSkipCount} by one. */ public void setNext(E record) { this.record = record; this.recordSkipCount++; }
3.68
flink_RocksDBNativeMetricOptions_enableCurSizeAllMemTables
/** Returns approximate size of active and unflushed immutable memtables (bytes). */ public void enableCurSizeAllMemTables() { this.properties.add(RocksDBProperty.CurSizeAllMemTables.getRocksDBProperty()); }
3.68
framework_VTree_selectAllChildren
/** * Selects all the open children to a node * * @param node * The parent node */ private void selectAllChildren(TreeNode node, boolean includeRootNode) { if (includeRootNode) { node.setSelected(true); selectedIds.add(node.key); } for (TreeNode child : node.getChildren()) { if (!child.isLeaf() && child.getState()) { selectAllChildren(child, true); } else { child.setSelected(true); selectedIds.add(child.key); } } selectionHasChanged = true; }
3.68
hbase_RegionLocations_getRegionLocationByRegionName
/** * Returns the region location from the list for matching regionName, which can be regionName or * encodedRegionName * @param regionName regionName or encodedRegionName * @return HRegionLocation found or null */ public HRegionLocation getRegionLocationByRegionName(byte[] regionName) { for (HRegionLocation loc : locations) { if (loc != null) { if ( Bytes.equals(loc.getRegion().getRegionName(), regionName) || Bytes.equals(loc.getRegion().getEncodedNameAsBytes(), regionName) ) { return loc; } } } return null; }
3.68
flink_ExecutionEnvironment_getNumberOfExecutionRetries
/** * Gets the number of times the system will try to re-execute failed tasks. A value of {@code * -1} indicates that the system default value (as defined in the configuration) should be used. * * @return The number of times the system will try to re-execute failed tasks. * @deprecated This method will be replaced by {@link #getRestartStrategy}. The {@link * RestartStrategies.FixedDelayRestartStrategyConfiguration} contains the number of * execution retries. */ @Deprecated @PublicEvolving public int getNumberOfExecutionRetries() { return config.getNumberOfExecutionRetries(); }
3.68
hadoop_Environment_toArray
// to be used with Runtime.exec(String[] cmdarray, String[] envp) String[] toArray() { String[] arr = new String[super.size()]; Enumeration<Object> it = super.keys(); int i = -1; while (it.hasMoreElements()) { String key = (String) it.nextElement(); String val = (String) get(key); i++; arr[i] = key + "=" + val; } return arr; }
3.68
hibernate-validator_JavaBeanGetter_run
/** * Runs the given privileged action, using a privileged block if required. * <p> * <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary * privileged actions within HV's protection domain. */ @IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17") private static <T> T run(PrivilegedAction<T> action) { return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run(); }
3.68
flink_DynamicSourceUtils_createMetadataKeysToMetadataColumnsMap
/** * Returns a map record the mapping relation between metadataKeys to metadataColumns in input * schema. */ public static Map<String, MetadataColumn> createMetadataKeysToMetadataColumnsMap( ResolvedSchema schema) { final List<MetadataColumn> metadataColumns = extractMetadataColumns(schema); Map<String, MetadataColumn> metadataKeysToMetadataColumns = new HashMap<>(); for (MetadataColumn column : metadataColumns) { String metadataKey = column.getMetadataKey().orElse(column.getName()); // After resolving, every metadata column has the unique metadata key. metadataKeysToMetadataColumns.put(metadataKey, column); } return metadataKeysToMetadataColumns; }
3.68
flink_QueryableStateClient_shutdownAndHandle
/** * Shuts down the client and returns a {@link CompletableFuture} that will be completed when the * shutdown process is completed. * * <p>If an exception is thrown for any reason, then the returned future will be completed * exceptionally with that exception. * * @return A {@link CompletableFuture} for further handling of the shutdown result. */ public CompletableFuture<?> shutdownAndHandle() { return client.shutdown(); }
3.68
flink_BufferBuilder_createBufferConsumerFromBeginning
/** * This method always creates a {@link BufferConsumer} starting from position 0 of {@link * MemorySegment}. * * @return created matching instance of {@link BufferConsumer} to this {@link BufferBuilder}. */ public BufferConsumer createBufferConsumerFromBeginning() { return createBufferConsumer(0); }
3.68
framework_VaadinFinderLocatorStrategy_getElementByPath
/** * {@inheritDoc} */ @Override public Element getElementByPath(String path) { List<Element> elements = getElementsByPath(path); if (elements.isEmpty()) { return null; } return elements.get(0); }
3.68
morf_DatabaseTypeIdentifierTestUtils_mockDataSourceFor
/** * Create a mock data source reporting the correct {@link DatabaseMetaData}. * * @param product {@link DatabaseMetaData#getDatabaseProductName()} * @param versionString {@link DatabaseMetaData#getDatabaseProductVersion()} * @param versionMajor {@link DatabaseMetaData#getDatabaseMajorVersion()} * @param versionMinor {@link DatabaseMetaData#getDatabaseMinorVersion()} * @return A data source which will report the above (and nothing else). * @throws SQLException as part of mocked signature. */ public static DataSource mockDataSourceFor(String product, String versionString, int versionMajor, int versionMinor) throws SQLException { DataSource dataSource = mock(DataSource.class, RETURNS_DEEP_STUBS); when(dataSource.getConnection().getMetaData().getDatabaseProductName()).thenReturn(product); when(dataSource.getConnection().getMetaData().getDatabaseProductVersion()).thenReturn(versionString); when(dataSource.getConnection().getMetaData().getDatabaseMajorVersion()).thenReturn(versionMajor); when(dataSource.getConnection().getMetaData().getDatabaseMinorVersion()).thenReturn(versionMinor); return dataSource; }
3.68
hbase_ScannerContext_setSkippingRow
/** * @param skippingRow set true to cause disabling of collecting per-cell progress or enforcing any * limits. This is used when trying to skip over all cells in a row, in which * case those cells are thrown away so should not count towards progress. */ void setSkippingRow(boolean skippingRow) { this.skippingRow = skippingRow; }
3.68
hibernate-validator_Configuration_isVerbose
/** * Whether logging information shall be put out in a verbose way or not. * * @return {@code true} if logging information shall be put out in a verbose, {@code false} otherwise */ public boolean isVerbose() { return verbose; }
3.68
flink_ChannelReaderInputView_close
/** * Closes this InputView, closing the underlying reader and returning all memory segments. * * @return A list containing all memory segments originally supplied to this view. * @throws IOException Thrown, if the underlying reader could not be properly closed. */ @Override public List<MemorySegment> close() throws IOException { if (this.closed) { throw new IllegalStateException("Already closed."); } this.closed = true; // re-collect all memory segments ArrayList<MemorySegment> list = this.freeMem; final MemorySegment current = getCurrentSegment(); if (current != null) { list.add(current); } clear(); // close the writer and gather all segments final LinkedBlockingQueue<MemorySegment> queue = this.reader.getReturnQueue(); this.reader.close(); while (list.size() < this.numSegments) { final MemorySegment m = queue.poll(); if (m == null) { // we get null if the queue is empty. that should not be the case if the reader was // properly closed. throw new RuntimeException("Bug in ChannelReaderInputView: MemorySegments lost."); } list.add(m); } return list; }
3.68
pulsar_ProxyService_startProxyExtensions
// This call is used for starting additional protocol handlers public void startProxyExtensions(Map<String, Map<InetSocketAddress, ChannelInitializer<SocketChannel>>> protocolHandlers, ServerBootstrap serverBootstrap) { protocolHandlers.forEach((extensionName, initializers) -> { initializers.forEach((address, initializer) -> { try { startProxyExtension(extensionName, address, initializer, serverBootstrap); } catch (IOException e) { LOG.error("{}", e.getMessage(), e.getCause()); throw new RuntimeException(e.getMessage(), e.getCause()); } }); }); }
3.68
hadoop_RejectedSchedulingRequest_newInstance
/** * Create new RejectedSchedulingRequest. * @param reason Rejection Reason. * @param request Rejected Scheduling Request. * @return RejectedSchedulingRequest. */ public static RejectedSchedulingRequest newInstance(RejectionReason reason, SchedulingRequest request) { RejectedSchedulingRequest instance = Records.newRecord(RejectedSchedulingRequest.class); instance.setReason(reason); instance.setRequest(request); return instance; }
3.68
framework_BrowserWindowOpener_setParameter
/** * Sets a parameter that will be added to the query string of the opened * URI. If the window is opened to contain a Vaadin UI, the parameter will * be available using {@link VaadinRequest#getParameter(String)} e.g. using * the request instance passed to {@link UI#init(VaadinRequest)}. * <p> * Setting a parameter with the same name as a previously set parameter will * replace the previous value. * * @param name * the name of the parameter to add, not <code>null</code> * @param value * the value of the parameter to add, not <code>null</code> * * @see #removeParameter(String) * @see #getParameterNames() * @see #getParameter(String) */ public void setParameter(String name, String value) { if (name == null || value == null) { throw new IllegalArgumentException("Null not allowed"); } getState().parameters.put(name, value); }
3.68
flink_RequestedLocalProperties_reset
/** This method resets the local properties to a state where no properties are given. */ public void reset() { this.ordering = null; this.groupedFields = null; }
3.68
hadoop_SchedulingRequest_build
/** * Return generated {@link SchedulingRequest} object. * * @return {@link SchedulingRequest} */ @Public @Unstable public SchedulingRequest build() { return schedulingRequest; }
3.68
hbase_Mutation_getTimestamp
/** * Method for retrieving the timestamp. */ public long getTimestamp() { return this.ts; }
3.68
hbase_ObserverContextImpl_createAndPrepare
/** * Instantiates a new ObserverContext instance if the passed reference is <code>null</code> and * sets the environment in the new or existing instance. This allows deferring the instantiation * of a ObserverContext until it is actually needed. * @param <E> The environment type for the context * @param env The coprocessor environment to set * @return An instance of <code>ObserverContext</code> with the environment set */ @Deprecated // TODO: Remove this method, ObserverContext should not depend on RpcServer public static <E extends CoprocessorEnvironment> ObserverContext<E> createAndPrepare(E env) { ObserverContextImpl<E> ctx = new ObserverContextImpl<>(RpcServer.getRequestUser().orElse(null)); ctx.prepare(env); return ctx; }
3.68
pulsar_AbstractPushSource_notifyError
/** * Allows the source to notify errors asynchronously. * @param ex */ public void notifyError(Exception ex) { consume(new ErrorNotifierRecord(ex)); }
3.68
flink_Tuple9_copy
/** * Shallow tuple copy. * * @return A new Tuple with the same fields as this. */ @Override @SuppressWarnings("unchecked") public Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8> copy() { return new Tuple9<>( this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f6, this.f7, this.f8); }
3.68
flink_Pattern_greedy
/** * Specifies that this pattern is greedy. This means as many events as possible will be matched * to this pattern. * * @return The same pattern with {@link Quantifier#greedy} set to true. * @throws MalformedPatternException if the quantifier is not applicable to this pattern. */ public Pattern<T, F> greedy() { checkIfNoNotPattern(); checkIfNoGroupPattern(); this.quantifier.greedy(); return this; }
3.68
hbase_HBaseTestingUtility_waitUntilAllSystemRegionsAssigned
/** * Waith until all system table's regions get assigned */ public void waitUntilAllSystemRegionsAssigned() throws IOException { waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); }
3.68
hadoop_StagingCommitter_listPendingUploadsToCommit
/** * Get the list of pending uploads for this job attempt. * @param commitContext job context * @return a list of pending uploads. * @throws IOException Any IO failure */ @Override protected ActiveCommit listPendingUploadsToCommit( CommitContext commitContext) throws IOException { return listPendingUploads(commitContext, false); }
3.68
hadoop_ServiceLauncher_getUsageMessage
/** * Get the usage message, ideally dynamically. * @return the usage message */ protected String getUsageMessage() { String message = USAGE_MESSAGE; if (commandOptions != null) { message = USAGE_NAME + " " + commandOptions.toString() + " " + USAGE_SERVICE_ARGUMENTS; } return message; }
3.68
hadoop_DBNameNodeConnector_getBalancerNodeFromDataNode
/** * This function maps the required fields from DataNodeInfo to disk * BalancerDataNode. * * @param nodeInfo * @return DiskBalancerDataNode */ private DiskBalancerDataNode getBalancerNodeFromDataNode(DatanodeInfo nodeInfo) { Preconditions.checkNotNull(nodeInfo); DiskBalancerDataNode dbDataNode = new DiskBalancerDataNode(nodeInfo .getDatanodeUuid()); dbDataNode.setDataNodeIP(nodeInfo.getIpAddr()); dbDataNode.setDataNodeName(nodeInfo.getHostName()); dbDataNode.setDataNodePort(nodeInfo.getIpcPort()); return dbDataNode; }
3.68
hadoop_StartupProgressMetrics_addGauge
/** * Adds a gauge with a name built by using the specified phase's name as prefix * and then appending the specified suffix. * * @param builder MetricsRecordBuilder to receive counter * @param phase Phase to add * @param nameSuffix String suffix of metric name * @param descSuffix String suffix of metric description * @param value float gauge value */ private static void addGauge(MetricsRecordBuilder builder, Phase phase, String nameSuffix, String descSuffix, float value) { MetricsInfo metricsInfo = info(phase.getName() + nameSuffix, phase.getDescription() + descSuffix); builder.addGauge(metricsInfo, value); }
3.68
flink_SingleOutputStreamOperator_forceNonParallel
/** * Sets the parallelism and maximum parallelism of this operator to one. And mark this operator * cannot set a non-1 degree of parallelism. * * @return The operator with only one parallelism. */ @PublicEvolving public SingleOutputStreamOperator<T> forceNonParallel() { transformation.setParallelism(1); transformation.setMaxParallelism(1); nonParallel = true; return this; }
3.68
querydsl_GeometryExpressions_dwithin
/** * Returns true if the geometries are within the specified distance of one another. * For geometry units are in those of spatial reference and For geography units are in meters. * * @param expr1 geometry * @param expr2 other geometry * @param distance distance * @return true, if within distance of each other */ public static BooleanExpression dwithin(Expression<? extends Geometry> expr1, Expression<? extends Geometry> expr2, double distance) { return Expressions.booleanOperation(SpatialOps.DWITHIN, expr1, expr2, ConstantImpl.create(distance)); }
3.68
framework_ButtonUpdateAltText_setup
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server. * VaadinRequest) */ @Override protected void setup(VaadinRequest request) { final Button btn = new Button(); btn.setId("button"); btn.setIcon(ICON, "initial alt text"); addComponent(btn); final CheckBox enable = new CheckBox("Enable alt text", true); enable.addValueChangeListener(event -> { if (event.getValue()) { btn.setIconAlternateText("alt text"); } else { btn.setIconAlternateText(""); } }); addComponent(enable); }
3.68
flink_TwoPhaseCommitSinkFunction_ignoreFailuresAfterTransactionTimeout
/** * If called, the sink will only log but not propagate exceptions thrown in {@link * #recoverAndCommit(Object)} if the transaction is older than a specified transaction timeout. * The start time of an transaction is determined by {@link System#currentTimeMillis()}. By * default, failures are propagated. */ protected TwoPhaseCommitSinkFunction<IN, TXN, CONTEXT> ignoreFailuresAfterTransactionTimeout() { this.ignoreFailuresAfterTransactionTimeout = true; return this; }
3.68
framework_AbstractTextFieldConnector_sendValueChange
/** * Sends the updated value and cursor position to the server, if either one * has changed. */ @Override public void sendValueChange() { if (!hasStateChanged()) { return; } lastSentCursorPosition = getAbstractTextField().getCursorPos(); getRpcProxy(AbstractTextFieldServerRpc.class).setText( getAbstractTextField().getValue(), lastSentCursorPosition); getState().text = getAbstractTextField().getValue(); }
3.68
hbase_BulkLoadHFilesTool_loadHFileQueue
/** * Used by the replication sink to load the hfiles from the source cluster. It does the following, * <ol> * <li>{@link #groupOrSplitPhase(AsyncClusterConnection, TableName, ExecutorService, Deque, List)} * </li> * <li>{@link #bulkLoadPhase(AsyncClusterConnection, TableName, Deque, Multimap, boolean, Map)} * </li> * </ol> * @param conn Connection to use * @param tableName Table to which these hfiles should be loaded to * @param queue {@code LoadQueueItem} has hfiles yet to be loaded */ public void loadHFileQueue(AsyncClusterConnection conn, TableName tableName, Deque<LoadQueueItem> queue, boolean copyFiles) throws IOException { ExecutorService pool = createExecutorService(); try { Multimap<ByteBuffer, LoadQueueItem> regionGroups = groupOrSplitPhase(conn, tableName, pool, queue, FutureUtils.get(conn.getRegionLocator(tableName).getStartEndKeys())).getFirst(); bulkLoadPhase(conn, tableName, queue, regionGroups, copyFiles, null); } finally { pool.shutdown(); } }
3.68
hbase_MutableRegionInfo_getReplicaId
/** * Returns the region replica id * @return returns region replica id */ @Override public int getReplicaId() { return replicaId; }
3.68
flink_AggregatorWithName_getAggregator
/** * Gets the aggregator. * * @return The aggregator. */ public Aggregator<T> getAggregator() { return aggregator; }
3.68
AreaShop_FileManager_loadDefaultFile
/** * Load the default.yml file * @return true if it has been loaded successfully, otherwise false */ public boolean loadDefaultFile() { boolean result = true; File defaultFile = new File(defaultPath); // Safe the file from the jar to disk if it does not exist if(!defaultFile.exists()) { try( InputStream input = plugin.getResource(AreaShop.defaultFile); OutputStream output = new FileOutputStream(defaultFile) ) { int read; byte[] bytes = new byte[1024]; while((read = input.read(bytes)) != -1) { output.write(bytes, 0, read); } AreaShop.info("File with default region settings has been saved, should only happen on first startup"); } catch(IOException e) { AreaShop.warn("Something went wrong saving the default region settings: " + defaultFile.getAbsolutePath()); } } // Load default.yml from the plugin folder, and as backup the default one try( InputStreamReader custom = new InputStreamReader(new FileInputStream(defaultFile), Charsets.UTF_8); InputStreamReader normal = new InputStreamReader(plugin.getResource(AreaShop.defaultFile), Charsets.UTF_8) ) { defaultConfig = YamlConfiguration.loadConfiguration(custom); if(defaultConfig.getKeys(false).isEmpty()) { AreaShop.warn("File 'default.yml' is empty, check for errors in the log."); result = false; } defaultConfigFallback = YamlConfiguration.loadConfiguration(normal); } catch(IOException e) { result = false; } return result; }
3.68
hbase_PrivateCellUtil_getValueAsLong
/** * Converts the value bytes of the given cell into a long value * @return value as long */ public static long getValueAsLong(Cell cell) { if (cell instanceof ByteBufferExtendedCell) { return ByteBufferUtils.toLong(((ByteBufferExtendedCell) cell).getValueByteBuffer(), ((ByteBufferExtendedCell) cell).getValuePosition()); } return Bytes.toLong(cell.getValueArray(), cell.getValueOffset()); }
3.68
hadoop_BlockBlobAppendStream_execute
/** * Execute command. */ public void execute() throws InterruptedException, IOException { if (committedBlobLength.get() >= getCommandBlobOffset()) { LOG.debug("commit already applied for {}", key); return; } if (lastBlock == null) { LOG.debug("nothing to commit for {}", key); return; } LOG.debug("active commands: {} for {}", activeBlockCommands.size(), key); for (UploadCommand activeCommand : activeBlockCommands) { if (activeCommand.getCommandBlobOffset() < getCommandBlobOffset()) { activeCommand.dump(); activeCommand.awaitAsDependent(); } else { break; } } // stop all uploads until the block list is committed uploadingSemaphore.acquire(MAX_NUMBER_THREADS_IN_THREAD_POOL); BlockEntry uncommittedBlock; do { uncommittedBlock = uncommittedBlockEntries.poll(); blockEntries.add(uncommittedBlock); } while (uncommittedBlock != lastBlock); if (blockEntries.size() > activateCompactionBlockCount) { LOG.debug("Block compaction: activated with {} blocks for {}", blockEntries.size(), key); // Block compaction long startCompaction = System.nanoTime(); blockCompaction(); LOG.debug("Block compaction finished for {} ms with {} blocks for {}", TimeUnit.NANOSECONDS.toMillis( System.nanoTime() - startCompaction), blockEntries.size(), key); } writeBlockListRequestInternal(); uploadingSemaphore.release(MAX_NUMBER_THREADS_IN_THREAD_POOL); // remove blocks previous commands for (Iterator<UploadCommand> it = activeBlockCommands.iterator(); it.hasNext();) { UploadCommand activeCommand = it.next(); if (activeCommand.getCommandBlobOffset() <= getCommandBlobOffset()) { it.remove(); } else { break; } } committedBlobLength.set(getCommandBlobOffset()); }
3.68
morf_Join_getSubSelect
/** * @return the subSelect */ public SelectStatement getSubSelect() { return subSelect; }
3.68
hbase_NettyFutureUtils_consume
/** * Log the error if the future indicates any failure. */ @SuppressWarnings("FutureReturnValueIgnored") public static void consume(Future<?> future) { future.addListener(NettyFutureUtils::loggingWhenError); }
3.68
morf_SqlDialect_getFromDummyTable
/** * An additional clause to use in SELECT statements where there is no select * source, which allows us to include "FROM &lt;dummy table&gt;" on RDBMSes such as * Oracle where selecting from no table is not allowed but the RDBMS provides * a dummy table (such as "dual"). * * @return the additional clause. */ protected String getFromDummyTable() { return StringUtils.EMPTY; }
3.68
graphhopper_EdgeBasedNodeContractor_findAndHandlePrepareShortcuts
/** * This method performs witness searches between all nodes adjacent to the given node and calls the * given handler for all required shortcuts. */ private void findAndHandlePrepareShortcuts(int node, PrepareShortcutHandler shortcutHandler, int maxPolls, EdgeBasedWitnessPathSearcher.Stats wpsStats) { stats().nodes++; addedShortcuts.clear(); sourceNodes.clear(); // traverse incoming edges/shortcuts to find all the source nodes PrepareGraphEdgeIterator incomingEdges = inEdgeExplorer.setBaseNode(node); while (incomingEdges.next()) { final int sourceNode = incomingEdges.getAdjNode(); if (sourceNode == node) continue; // make sure we process each source node only once if (!sourceNodes.add(sourceNode)) continue; // for each source node we need to look at every incoming original edge and check which target edges are reachable PrepareGraphOrigEdgeIterator origInIter = sourceNodeOrigInEdgeExplorer.setBaseNode(sourceNode); while (origInIter.next()) { int origInKey = reverseEdgeKey(origInIter.getOrigEdgeKeyLast()); // we search 'bridge paths' leading to the target edges IntObjectMap<BridgePathFinder.BridePathEntry> bridgePaths = bridgePathFinder.find(origInKey, sourceNode, node); if (bridgePaths.isEmpty()) continue; witnessPathSearcher.initSearch(origInKey, sourceNode, node, wpsStats); for (IntObjectCursor<BridgePathFinder.BridePathEntry> bridgePath : bridgePaths) { if (!Double.isFinite(bridgePath.value.weight)) throw new IllegalStateException("Bridge entry weights should always be finite"); int targetEdgeKey = bridgePath.key; dijkstraSW.start(); double weight = witnessPathSearcher.runSearch(bridgePath.value.chEntry.adjNode, targetEdgeKey, bridgePath.value.weight, maxPolls); dijkstraSW.stop(); if (weight <= bridgePath.value.weight) // we found a witness, nothing to do continue; PrepareCHEntry root = bridgePath.value.chEntry; while (EdgeIterator.Edge.isValid(root.parent.prepareEdge)) root = root.getParent(); // we make sure to add each shortcut only once. when we are actually adding shortcuts we check for existing // shortcuts anyway, but at least this is important when we *count* shortcuts. long addedShortcutKey = BitUtil.LITTLE.toLong(root.firstEdgeKey, bridgePath.value.chEntry.incEdgeKey); if (!addedShortcuts.add(addedShortcutKey)) continue; double initialTurnCost = prepareGraph.getTurnWeight(origInKey, sourceNode, root.firstEdgeKey); bridgePath.value.chEntry.weight -= initialTurnCost; LOGGER.trace("Adding shortcuts for target entry {}", bridgePath.value.chEntry); // todo: re-implement loop-avoidance heuristic as it existed in GH 1.0? it did not work the // way it was implemented so it was removed at some point shortcutHandler.handleShortcut(root, bridgePath.value.chEntry, bridgePath.value.chEntry.origEdges); } witnessPathSearcher.finishSearch(); } } }
3.68
hbase_MasterObserver_preGetRSGroupInfoOfTable
/** * Called before getting region server group info of the passed tableName. * @param ctx the environment to interact with the framework and master * @param tableName name of the table to get RSGroupInfo for */ default void preGetRSGroupInfoOfTable(final ObserverContext<MasterCoprocessorEnvironment> ctx, final TableName tableName) throws IOException { }
3.68
framework_TestingPushConnection_createConfig
/* * Force transport */ @Override protected AtmosphereConfiguration createConfig() { AtmosphereConfiguration conf = super.createConfig(); if (transport != null) { conf.setTransport(transport); conf.setFallbackTransport(transport); } return conf; }
3.68
hadoop_NamenodeRegistration_getRole
/** * Get name-node role. */ public NamenodeRole getRole() { return role; }
3.68
framework_VComboBox_cancelPendingPostFiltering
/** * Cancel a pending request to perform post-filtering actions. */ private void cancelPendingPostFiltering() { pendingUserInput = null; waitingForFilteringResponse = false; }
3.68
AreaShop_AreaShop_setupLanguageManager
/** * Setup a new LanguageManager. */ private void setupLanguageManager() { languageManager = new LanguageManager( this, languageFolder, getConfig().getString("language"), "EN", chatprefix ); }
3.68
zxing_PDF417ResultMetadata_isLastSegment
/** * @return true if it is the last segment */ public boolean isLastSegment() { return lastSegment; }
3.68
druid_MySqlStatementParser_parseCursorDeclare
/** * parse cursor declare statement */ public MySqlCursorDeclareStatement parseCursorDeclare() { MySqlCursorDeclareStatement stmt = new MySqlCursorDeclareStatement(); accept(Token.DECLARE); stmt.setCursorName(exprParser.name()); accept(Token.CURSOR); accept(Token.FOR); //SQLSelectStatement selelctStmt = (SQLSelectStatement) parseSelect(); SQLSelect select = this.createSQLSelectParser().select(); stmt.setSelect(select); accept(Token.SEMI); return stmt; }
3.68
hadoop_FederationStateStoreUtils_returnToPool
/** * Returns the SQL <code>FederationStateStore</code> connections to the pool. * * @param log the logger interface * @param cstmt the interface used to execute SQL stored procedures * @throws YarnException on failure */ public static void returnToPool(Logger log, CallableStatement cstmt) throws YarnException { returnToPool(log, cstmt, null); }
3.68
framework_TreeDataProvider_getTreeData
/** * Return the underlying hierarchical data of this provider. * * @return the underlying data of this provider */ public TreeData<T> getTreeData() { return treeData; }
3.68
flink_HiveDDLUtils_validateConstraint
// returns a constraint trait that requires VALIDATE public static byte validateConstraint(byte trait) { return (byte) (trait | HIVE_CONSTRAINT_VALIDATE); }
3.68
morf_DatabaseMetaDataProvider_isSystemView
/** * Identify whether or not the view is one owned by the system, or owned by * our application. The default implementation assumes that all views we can * access in the schema are under our control. * * @param viewName The view which we are accessing. * @return <var>true</var> if the view is owned by the system */ protected boolean isSystemView(@SuppressWarnings("unused") RealName viewName) { return false; }
3.68
flink_Transformation_setResources
/** * Sets the minimum and preferred resources for this stream transformation. * * @param minResources The minimum resource of this transformation. * @param preferredResources The preferred resource of this transformation. */ public void setResources(ResourceSpec minResources, ResourceSpec preferredResources) { OperatorValidationUtils.validateMinAndPreferredResources(minResources, preferredResources); this.minResources = checkNotNull(minResources); this.preferredResources = checkNotNull(preferredResources); }
3.68
flink_Time_seconds
/** Creates a new {@link Time} that represents the given number of seconds. */ public static Time seconds(long seconds) { return of(seconds, TimeUnit.SECONDS); }
3.68
morf_Function_trim
/** * Helper method to create an instance of the "trim" SQL function, * which will result in argument having leading and trailing spaces removed. * * @param expression the field to evaluate. * @return an instance of the trim function. */ public static Function trim(AliasedField expression) { return new Function(FunctionType.TRIM, expression); }
3.68
framework_ContainerHierarchicalWrapper_rootItemIds
/* * Gets the IDs of the root elements in the container. Don't add a JavaDoc * comment here, we use the default documentation from implemented * interface. */ @Override public Collection<?> rootItemIds() { // If the wrapped container implements the method directly, use it if (hierarchical) { return ((Container.Hierarchical) container).rootItemIds(); } return Collections.unmodifiableCollection(roots); }
3.68
framework_Embedded_getStandby
/** * This attribute specifies a message that a user agent may render while * loading the object's implementation and data. * * @return The text displayed when loading */ public String getStandby() { return getState(false).standby; }
3.68
hadoop_UnitsConversionUtil_compareUnits
/** * Compare a unit to another unit. * <br> * Examples:<br> * 1. 'm' (milli) is smaller than 'k' (kilo), so compareUnits("m", "k") * will return -1.<br> * 2. 'M' (MEGA) is greater than 'k' (kilo), so compareUnits("M", "k") will * return 1. * * @param unitA first unit * @param unitB second unit * @return +1, 0 or -1 depending on whether the relationship between units * is smaller than, * equal to or lesser than. */ public static int compareUnits(String unitA, String unitB) { checkUnitArgument(unitA); checkUnitArgument(unitB); int unitAPos = SORTED_UNITS.indexOf(unitA); int unitBPos = SORTED_UNITS.indexOf(unitB); return Integer.compare(unitAPos, unitBPos); }
3.68
flink_HighAvailabilityServicesFactory_createClientHAServices
/** * Create a {@link ClientHighAvailabilityServices} instance. * * @param configuration Flink configuration * @return instance of {@link ClientHighAvailabilityServices} * @throws Exception when ClientHAServices cannot be created */ default ClientHighAvailabilityServices createClientHAServices(Configuration configuration) throws Exception { return createHAServices(configuration, UnsupportedOperationExecutor.INSTANCE); }
3.68
open-banking-gateway_DatasafeMetadataStorage_read
/** * Reads user profile data * @param id Entity id * @return User profile data */ @Override @Transactional public Optional<byte[]> read(String id) { return repository.findById(Long.valueOf(id)).map(getData); }
3.68
graphhopper_SubnetworkStorage_getSubnetwork
/** * Returns the subnetwork ID for the specified nodeId or 0 if non is associated e.g. because the * subnetwork is too small. */ public int getSubnetwork(int nodeId) { return da.getByte(nodeId); }
3.68
zxing_MinimalECIInput_isECI
/** * Determines if a value is an ECI * * @param index the index of the value * * @return true if the value at position {@code index} is an ECI * * @throws IndexOutOfBoundsException * if the {@code index} argument is negative or not less than * {@code length()} */ public boolean isECI(int index) { if (index < 0 || index >= length()) { throw new IndexOutOfBoundsException("" + index); } return bytes[index] > 255 && bytes[index] <= 999; }
3.68
flink_NetUtils_createSocketFromPorts
/** * Tries to allocate a socket from the given sets of ports. * * @param portsIterator A set of ports to choose from. * @param factory A factory for creating the SocketServer * @return null if no port was available or an allocated socket. */ public static ServerSocket createSocketFromPorts( Iterator<Integer> portsIterator, SocketFactory factory) { while (portsIterator.hasNext()) { int port = portsIterator.next(); LOG.debug("Trying to open socket on port {}", port); try { return factory.createSocket(port); } catch (IOException | IllegalArgumentException e) { if (LOG.isDebugEnabled()) { LOG.debug("Unable to allocate socket on port", e); } else { LOG.info( "Unable to allocate on port {}, due to error: {}", port, e.getMessage()); } } } return null; }
3.68
framework_VaadinFinderLocatorStrategy_findNotificationsByPath
/** * Special case for finding notifications as they have no connectors and are * directly attached to {@link RootPanel}. * * @param path * The path of the notification, should be * {@code "//VNotification"} optionally followed by an index in * brackets. * @return the notification element or null if not found. */ private List<VNotification> findNotificationsByPath(String path) { List<VNotification> notifications = new ArrayList<>(); for (Widget w : RootPanel.get()) { if (w instanceof VNotification) { notifications.add((VNotification) w); } } List<SelectorPredicate> predicates = SelectorPredicate .extractPredicates(path); for (SelectorPredicate p : predicates) { if (p.getIndex() > -1) { VNotification n = notifications.get(p.getIndex()); notifications.clear(); if (n != null) { notifications.add(n); } } } return eliminateDuplicates(notifications); }
3.68
flink_RocksDBFullRestoreOperation_restoreKVStateData
/** * Restore the KV-state / ColumnFamily data for all key-groups referenced by the current state * handle. */ private void restoreKVStateData( ThrowingIterator<KeyGroup> keyGroups, Map<Integer, ColumnFamilyHandle> columnFamilies) throws IOException, RocksDBException, StateMigrationException { // for all key-groups in the current state handle... try (RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(this.rocksHandle.getDb(), writeBatchSize)) { ColumnFamilyHandle handle = null; while (keyGroups.hasNext()) { KeyGroup keyGroup = keyGroups.next(); try (ThrowingIterator<KeyGroupEntry> groupEntries = keyGroup.getKeyGroupEntries()) { int oldKvStateId = -1; while (groupEntries.hasNext()) { KeyGroupEntry groupEntry = groupEntries.next(); int kvStateId = groupEntry.getKvStateId(); if (kvStateId != oldKvStateId) { oldKvStateId = kvStateId; handle = columnFamilies.get(kvStateId); } writeBatchWrapper.put(handle, groupEntry.getKey(), groupEntry.getValue()); } } } } }
3.68
hbase_ReplicationSourceManager_interruptOrAbortWhenFail
/** * Refresh replication source will terminate the old source first, then the source thread will be * interrupted. Need to handle it instead of abort the region server. */ private void interruptOrAbortWhenFail(ReplicationQueueOperation op) { try { op.exec(); } catch (ReplicationException e) { if ( e.getCause() != null && e.getCause() instanceof KeeperException.SystemErrorException && e.getCause().getCause() != null && e.getCause().getCause() instanceof InterruptedException ) { // ReplicationRuntimeException(a RuntimeException) is thrown out here. The reason is // that thread is interrupted deep down in the stack, it should pass the following // processing logic and propagate to the most top layer which can handle this exception // properly. In this specific case, the top layer is ReplicationSourceShipper#run(). throw new ReplicationRuntimeException( "Thread is interrupted, the replication source may be terminated", e.getCause().getCause()); } server.abort("Failed to operate on replication queue", e); } }
3.68
hadoop_BooleanWritable_write
/** */ @Override public void write(DataOutput out) throws IOException { out.writeBoolean(value); }
3.68
AreaShop_BuyRegion_getFormattedPrice
/** * Get the formatted string of the price (includes prefix and suffix). * @return The formatted string of the price */ public String getFormattedPrice() { return Utils.formatCurrency(getPrice()); }
3.68