name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_Window_setAssistiveDescription
/** * Allows to specify which components contain the description for the * window. Text contained in these components will be read by assistive * devices when it is opened. * * @param components * the components to use as description */ public void setAssistiveDescription(Component... components) { if (components == null) { throw new IllegalArgumentException( "Parameter connectors must be non-null"); } else { getState().contentDescription = components; } }
3.68
flink_PipelinedSubpartition_increaseBuffersInBacklog
/** * Increases the number of non-event buffers by one after adding a non-event buffer into this * subpartition. */ @GuardedBy("buffers") private void increaseBuffersInBacklog(BufferConsumer buffer) { assert Thread.holdsLock(buffers); if (buffer != null && buffer.isBuffer()) { buffersInBacklog++; } }
3.68
flink_BoundedFIFOQueue_iterator
/** * Returns the {@code BoundedFIFOQueue}'s {@link Iterator}. * * @return The queue's {@code Iterator}. */ @Override public Iterator<T> iterator() { return elements.iterator(); }
3.68
hadoop_TrashPolicy_getInstance
/** * Get an instance of the configured TrashPolicy based on the value * of the configuration parameter fs.trash.classname. * * @param conf the configuration to be used * @param fs the file system to be used * @return an instance of TrashPolicy */ public static TrashPolicy getInstance(Configuration conf, FileSystem fs) { Class<? extends TrashPolicy> trashClass = conf.getClass( "fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class); TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf); trash.initialize(conf, fs); // initialize TrashPolicy return trash; }
3.68
hbase_WALEntryBatch_getNbHFiles
/** Returns the number of HFiles in this batch */ public int getNbHFiles() { return nbHFiles; }
3.68
hbase_ReportMakingVisitor_metaTableConsistencyCheck
/** * Check row. * @param metaTableRow Row from hbase:meta table. * @return Returns default regioninfo found in row parse as a convenience to save on having to do * a double-parse of Result. */ private RegionInfo metaTableConsistencyCheck(Result metaTableRow) { RegionInfo ri; // Locations comes back null if the RegionInfo field is empty. // If locations is null, ensure the regioninfo is for sure empty before progressing. // If really empty, report as missing regioninfo! Otherwise, can run server check // and get RegionInfo from locations. RegionLocations locations = CatalogFamilyFormat.getRegionLocations(metaTableRow); if (locations == null) { ri = CatalogFamilyFormat.getRegionInfo(metaTableRow, HConstants.REGIONINFO_QUALIFIER); } else { ri = locations.getDefaultRegionLocation().getRegion(); checkServer(locations); } if (ri == null) { this.report.emptyRegionInfo.add(metaTableRow.getRow()); return ri; } if (!Bytes.equals(metaTableRow.getRow(), ri.getRegionName())) { LOG.warn( "INCONSISTENCY: Row name is not equal to serialized info:regioninfo content; " + "row={} {}; See if RegionInfo is referenced in another hbase:meta row? Delete?", Bytes.toStringBinary(metaTableRow.getRow()), ri.getRegionNameAsString()); return null; } // Skip split parent region if (ri.isSplitParent()) { return ri; } // If table is disabled, skip integrity check. if (!isTableDisabled(ri)) { if (isTableTransition(ri)) { // HBCK1 used to have a special category for missing start or end keys. // We'll just lump them in as 'holes'. // This is a table transition. If this region is not first region, report a hole. if (!ri.isFirst()) { addHole(RegionInfoBuilder.UNDEFINED, ri); } // This is a table transition. If last region was not last region of previous table, // report a hole if (this.previous != null && !this.previous.isLast()) { addHole(this.previous, RegionInfoBuilder.UNDEFINED); } } else { if (!this.previous.isNext(ri)) { if (this.previous.isOverlap(ri)) { addOverlap(this.previous, ri); } else if (ri.isOverlap(this.highestEndKeyRegionInfo)) { // We may have seen a region a few rows back that overlaps this one. addOverlap(this.highestEndKeyRegionInfo, ri); } else if (!this.highestEndKeyRegionInfo.isNext(ri)) { // Need to check the case if this.highestEndKeyRegionInfo.isNext(ri). If no, // report a hole, otherwise, it is ok. For an example, // previous: [aa, bb), ri: [cc, dd), highestEndKeyRegionInfo: [a, cc) // In this case, it should not report a hole, as highestEndKeyRegionInfo covers // the hole between previous and ri. addHole(this.previous, ri); } } else if (ri.isOverlap(this.highestEndKeyRegionInfo)) { // We may have seen a region a few rows back that overlaps this one // even though it properly 'follows' the region just before. addOverlap(this.highestEndKeyRegionInfo, ri); } } this.previous = ri; this.highestEndKeyRegionInfo = MetaFixer.getRegionInfoWithLargestEndKey(this.highestEndKeyRegionInfo, ri); } return ri; }
3.68
hbase_ScannerModel_setStartRow
/** * @param startRow start row */ public void setStartRow(byte[] startRow) { this.startRow = startRow; }
3.68
dubbo_ServiceInstancesChangedListener_hasEmptyMetadata
/** * Calculate the number of revisions that failed to find metadata info. * * @param revisionToInstances instance list classified by revisions * @return the number of revisions that failed at fetching MetadataInfo */ protected int hasEmptyMetadata(Map<String, List<ServiceInstance>> revisionToInstances) { if (revisionToInstances == null) { return 0; } StringBuilder builder = new StringBuilder(); int emptyMetadataNum = 0; for (Map.Entry<String, List<ServiceInstance>> entry : revisionToInstances.entrySet()) { DefaultServiceInstance serviceInstance = (DefaultServiceInstance) entry.getValue().get(0); if (serviceInstance == null || serviceInstance.getServiceMetadata() == MetadataInfo.EMPTY) { emptyMetadataNum++; } builder.append(entry.getKey()); builder.append(' '); } if (emptyMetadataNum > 0) { builder.insert( 0, emptyMetadataNum + "/" + revisionToInstances.size() + " revisions failed to get metadata from remote: "); logger.error(INTERNAL_ERROR, "unknown error in registry module", "", builder.toString()); } else { builder.insert(0, revisionToInstances.size() + " unique working revisions: "); logger.info(builder.toString()); } return emptyMetadataNum; }
3.68
hadoop_ResourceSkyline_getJobSubmissionTime
/** * Get the job's submission time. * * @return job's submission time. */ public final long getJobSubmissionTime() { return jobSubmissionTime; }
3.68
hadoop_Chunk_isLastChunk
/** * Have we reached the last chunk. * * @return true if we have reached the last chunk. * @throws java.io.IOException */ public boolean isLastChunk() throws IOException { checkEOF(); return lastChunk; }
3.68
hadoop_JobDefinition_getParams
//Currently unused public Map<String, String> getParams() { return params; }
3.68
flink_RocksIncrementalSnapshotStrategy_uploadSnapshotFiles
/** upload files and return total uploaded size. */ private long uploadSnapshotFiles( @Nonnull List<HandleAndLocalPath> sstFiles, @Nonnull List<HandleAndLocalPath> miscFiles, @Nonnull CloseableRegistry snapshotCloseableRegistry, @Nonnull CloseableRegistry tmpResourcesRegistry) throws Exception { // write state data Preconditions.checkState(localBackupDirectory.exists()); Path[] files = localBackupDirectory.listDirectory(); long uploadedSize = 0; if (files != null) { List<Path> sstFilePaths = new ArrayList<>(files.length); List<Path> miscFilePaths = new ArrayList<>(files.length); createUploadFilePaths(files, sstFiles, sstFilePaths, miscFilePaths); final CheckpointedStateScope stateScope = sharingFilesStrategy == SnapshotType.SharingFilesStrategy.NO_SHARING ? CheckpointedStateScope.EXCLUSIVE : CheckpointedStateScope.SHARED; List<HandleAndLocalPath> sstFilesUploadResult = stateUploader.uploadFilesToCheckpointFs( sstFilePaths, checkpointStreamFactory, stateScope, snapshotCloseableRegistry, tmpResourcesRegistry); uploadedSize += sstFilesUploadResult.stream().mapToLong(e -> e.getStateSize()).sum(); sstFiles.addAll(sstFilesUploadResult); List<HandleAndLocalPath> miscFilesUploadResult = stateUploader.uploadFilesToCheckpointFs( miscFilePaths, checkpointStreamFactory, stateScope, snapshotCloseableRegistry, tmpResourcesRegistry); uploadedSize += miscFilesUploadResult.stream().mapToLong(e -> e.getStateSize()).sum(); miscFiles.addAll(miscFilesUploadResult); synchronized (uploadedSstFiles) { switch (sharingFilesStrategy) { case FORWARD_BACKWARD: case FORWARD: uploadedSstFiles.put( checkpointId, Collections.unmodifiableList(sstFiles)); break; case NO_SHARING: break; default: // This is just a safety precaution. It is checked before creating the // RocksDBIncrementalSnapshotOperation throw new IllegalArgumentException( "Unsupported sharing files strategy: " + sharingFilesStrategy); } } } return uploadedSize; }
3.68
hbase_MetricsTableRequests_updateScan
/** * Update the scan metrics. * @param time response time of scan * @param responseCellSize size of the scan resposne * @param blockBytesScanned size of block bytes scanned to retrieve the response */ public void updateScan(long time, long responseCellSize, long blockBytesScanned) { if (isEnableTableLatenciesMetrics()) { scanTimeHistogram.update(time); scanSizeHistogram.update(responseCellSize); if (blockBytesScanned > 0) { blockBytesScannedCount.increment(blockBytesScanned); scanBlockBytesScanned.update(blockBytesScanned); } } }
3.68
rocketmq-connect_Deserializer_configure
/** * Configure this class. * @param configs configs in key/value pairs */ default void configure(Map<String, ?> configs) { // intentionally left blank }
3.68
framework_WebBrowser_isTooOldToFunctionProperly
/** * Checks if the browser is so old that it simply won't work with a Vaadin * application. Can be used to redirect to an alternative page, show * alternative content or similar. * * When this method returns true chances are very high that the browser * won't work and it does not make sense to direct the user to the Vaadin * application. * * @return true if the browser won't work, false if not the browser is * supported or might work */ public boolean isTooOldToFunctionProperly() { if (browserDetails == null) { // Don't know, so assume it will work return false; } return browserDetails.isTooOldToFunctionProperly(); }
3.68
hadoop_NMTokenSecretManagerInRM_removeNodeKey
/** * This is to be called when NodeManager reconnects or goes down. This will * remove if NMTokens if present for any running application from cache. * @param nodeId Node Id. */ public void removeNodeKey(NodeId nodeId) { this.writeLock.lock(); try { Iterator<HashSet<NodeId>> appNodeKeySetIterator = this.appAttemptToNodeKeyMap.values().iterator(); while (appNodeKeySetIterator.hasNext()) { appNodeKeySetIterator.next().remove(nodeId); } } finally { this.writeLock.unlock(); } }
3.68
hudi_Table_addAll
/** * Add all rows. * * @param rows Rows to be added * @return */ public Table addAll(List<List<Comparable>> rows) { rows.forEach(this::add); return this; }
3.68
hadoop_NMClientAsync_createNMClientAsync
/** * @deprecated Use {@link #createNMClientAsync(AbstractCallbackHandler)} * instead. */ @Deprecated public static NMClientAsync createNMClientAsync( CallbackHandler callbackHandler) { return new NMClientAsyncImpl(callbackHandler); }
3.68
AreaShop_TeleportFeature_getTeleportLocation
/** * Get the teleportlocation set for this region. * @return The teleport location, or null if not set */ public Location getTeleportLocation() { return Utils.configToLocation(getRegion().getConfigurationSectionSetting("general.teleportLocation")); }
3.68
framework_VTabsheet_needsToScrollIntoViewIfBecomesVisible
/** * If the tab bar was previously scrolled as far left as it can go, i.e. * every scrolled out tab was also hidden on server, and the tab that is * getting its visibility updated is among them, it should become the first * visible tab instead. If the tab was not among those tabs, the scroller * index doesn't need adjusting. If any visible-on-server tabs were already * scrolled out of view, scroll position likewise doesn't need adjusting * regardless of which side of the line this tab falls. * <p> * This check must be performed before the tab's hiddenOnServer state is * updated, and only if the server visibility is changed from hidden to * visible. * * @param index * the index of the tab that is getting updated * @return {@code true} if the given index should become the new scroller * index, {@code false} otherwise */ private boolean needsToScrollIntoViewIfBecomesVisible(int index) { // note that these methods use different definition for word 'scrolled', // the first one accepts hidden-on-server tabs as scrolled while the // second one only cares about tabs that end-user considers scrolled return scrolledOutOfView(index) && !hasScrolledTabs(); }
3.68
morf_SqlDialect_hasColumnNamed
/** * Checks the schema to see if the {@code tableName} has a named column as * provided. * * @param tableName the table name. * @param metadata the schema. * @param columnName the column name to check for. * @return true if a column with the name 'id' is found. */ private boolean hasColumnNamed(String tableName, Schema metadata, String columnName) { for (Column currentColumn : metadata.getTable(tableName).columns()) { if (currentColumn.getName().equalsIgnoreCase(columnName)) { return true; } } return false; }
3.68
hbase_NamespacesModel_toString
/* * (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); for (String namespace : namespaces) { sb.append(namespace); sb.append("\n"); } return sb.toString(); }
3.68
hadoop_BalanceProcedureScheduler_writeJournal
/** * Save current status to journal. */ boolean writeJournal(BalanceJob job) { try { journal.saveJob(job); return true; } catch (Exception e) { LOG.warn("Save procedure failed, add to recoverQueue. job=" + job, e); recoverQueue.add(job); return false; } }
3.68
hudi_ExternalSpillableMap_getSizeOfFileOnDiskInBytes
/** * Number of bytes spilled to disk. */ public long getSizeOfFileOnDiskInBytes() { return getDiskBasedMap().sizeOfFileOnDiskInBytes(); }
3.68
hadoop_NodePlan_setVolumeSetPlans
/** * Sets a volume List plan. * * @param volumeSetPlans - List of plans. */ public void setVolumeSetPlans(List<Step> volumeSetPlans) { this.volumeSetPlans = volumeSetPlans; }
3.68
flink_TieredStorageResourceRegistry_registerResource
/** * Register a new resource for the given owner. * * @param owner identifier of the data that the resource corresponds to. * @param tieredStorageResource the tiered storage resources to be registered. */ public void registerResource( TieredStorageDataIdentifier owner, TieredStorageResource tieredStorageResource) { registeredResources .computeIfAbsent(owner, (ignore) -> new ArrayList<>()) .add(tieredStorageResource); }
3.68
hudi_AvroSchemaCompatibility_getLocation
/** * Returns a * <a href="https://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-08">JSON * Pointer</a> describing the node location within the schema's JSON document * tree where the incompatibility was encountered. * * @return JSON Pointer encoded as a string. */ public String getLocation() { StringBuilder s = new StringBuilder("/"); boolean first = true; // ignore root element for (String coordinate : mLocation.subList(1, mLocation.size())) { if (first) { first = false; } else { s.append('/'); } // Apply JSON pointer escaping. s.append(coordinate.replace("~", "~0").replace("/", "~1")); } return s.toString(); }
3.68
pulsar_ReaderHandler_handleEndOfTopic
// Check and notify reader if reached end of topic. private void handleEndOfTopic() { try { String msg = objectWriter().writeValueAsString( new EndOfTopicResponse(reader.hasReachedEndOfTopic())); getSession().getRemote() .sendString(msg, new WriteCallback() { @Override public void writeFailed(Throwable th) { log.warn("[{}/{}] Failed to send end of topic msg to {} due to {}", reader.getTopic(), subscription, getRemote().getInetSocketAddress().toString(), th.getMessage()); } @Override public void writeSuccess() { if (log.isDebugEnabled()) { log.debug("[{}/{}] End of topic message is delivered successfully to {} ", reader.getTopic(), subscription, getRemote().getInetSocketAddress().toString()); } } }); } catch (JsonProcessingException e) { log.warn("[{}] Failed to generate end of topic response: {}", reader.getTopic(), e.getMessage()); } catch (Exception e) { log.warn("[{}] Failed to send end of topic response: {}", reader.getTopic(), e.getMessage()); } }
3.68
hibernate-validator_AbstractElementVisitor_reportIssues
/** * Reports provided issues using {@link javax.annotation.processing.Messager} API based on their * kind ({@link ConstraintCheckIssue.IssueKind}). * * @param foundIssues a collection of issues to be reported */ protected void reportIssues(Collection<ConstraintCheckIssue> foundIssues) { Set<ConstraintCheckIssue> warnings = CollectionHelper.newHashSet(); Set<ConstraintCheckIssue> errors = CollectionHelper.newHashSet(); for ( ConstraintCheckIssue issue : foundIssues ) { if ( issue.isError() ) { errors.add( issue ); } else if ( issue.isWarning() ) { warnings.add( issue ); } } messager.reportErrors( errors ); messager.reportWarnings( warnings ); }
3.68
flink_MasterHooks_close
/** * Closes the master hooks. * * @param hooks The hooks to close */ public static void close( final Collection<MasterTriggerRestoreHook<?>> hooks, final Logger log) { for (MasterTriggerRestoreHook<?> hook : hooks) { try { hook.close(); } catch (Throwable t) { log.warn( "Failed to cleanly close a checkpoint master hook (" + hook.getIdentifier() + ")", t); } } }
3.68
flink_FlinkPreparingTableBase_explainSourceAsString
/** Returns the digest of the {@link TableSource} instance. */ protected List<String> explainSourceAsString(TableSource<?> ts) { String tsDigest = ts.explainSource(); if (!Strings.isNullOrEmpty(tsDigest)) { return ImmutableList.<String>builder() .addAll(Util.skipLast(names)) .add(String.format("%s, source: [%s]", Util.last(names), tsDigest)) .build(); } else { return names; } }
3.68
framework_WeekGrid_hasToday
/** * @return true if this weekgrid contains a date that is today */ public boolean hasToday() { return dateCellOfToday != null; }
3.68
hbase_HFileReaderImpl_getScanner
/** * Create a Scanner on this file. No seeks or reads are done on creation. Call * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is nothing to clean up * in a Scanner. Letting go of your references to the scanner is sufficient. * @param conf Store configuration. * @param cacheBlocks True if we should cache blocks read in by this scanner. * @param pread Use positional read rather than seek+read if true (pread is better for * random reads, seek+read is better scanning). * @param isCompaction is scanner being used for a compaction? * @return Scanner on this file. */ @Override public HFileScanner getScanner(Configuration conf, boolean cacheBlocks, final boolean pread, final boolean isCompaction) { if (dataBlockEncoder.useEncodedScanner()) { return new EncodedScanner(this, cacheBlocks, pread, isCompaction, this.hfileContext, conf); } return new HFileScannerImpl(this, cacheBlocks, pread, isCompaction); }
3.68
hbase_ServerManager_expireServer
/** * Expire the passed server. Add it to list of dead servers and queue a shutdown processing. * @return pid if we queued a ServerCrashProcedure else {@link Procedure#NO_PROC_ID} if we did not * (could happen for many reasons including the fact that its this server that is going * down or we already have queued an SCP for this server or SCP processing is currently * disabled because we are in startup phase). */ // Redo test so we can make this protected. public synchronized long expireServer(final ServerName serverName) { return expireServer(serverName, false); }
3.68
flink_CsvOutputFormat_setCharsetName
/** * Sets the charset with which the CSV strings are written to the file. If not specified, the * output format uses the systems default character encoding. * * @param charsetName The name of charset to use for encoding the output. */ public void setCharsetName(String charsetName) { this.charsetName = charsetName; }
3.68
hbase_HelloHBase_putRowToTable
/** * Invokes Table#put to store a row (with two new columns created 'on the fly') into the table. * @param table Standard Table object (used for CRUD operations). * @throws IOException If IO problem encountered */ static void putRowToTable(final Table table) throws IOException { table.put(new Put(MY_ROW_ID) .addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello")) .addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!"))); System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table [" + table.getName().getNameAsString() + "] in HBase;\n" + " the row's two columns (created 'on the fly') are: [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); }
3.68
morf_DatabaseMetaDataProvider_tableExists
/** * @see org.alfasoftware.morf.metadata.Schema#tableExists(java.lang.String) */ @Override public boolean tableExists(String tableName) { return tableNames.get().containsKey(named(tableName)); }
3.68
graphhopper_MatrixResponse_getTime
/** * Returns the time for the specific entry (from -&gt; to) in milliseconds or {@link Long#MAX_VALUE} in case * no connection was found (and {@link GHMRequest#setFailFast(boolean)} was set to true). */ public long getTime(int from, int to) { if (hasErrors()) { throw new IllegalStateException("Cannot return time (" + from + "," + to + ") if errors occurred " + getErrors()); } if (from >= times.length) { throw new IllegalStateException("Cannot get 'from' " + from + " from times with size " + times.length); } else if (to >= times[from].length) { throw new IllegalStateException("Cannot get 'to' " + to + " from times with size " + times[from].length); } return times[from][to]; }
3.68
hudi_TimelineUtils_getExtraMetadataFromLatest
/** * Get extra metadata for specified key from latest commit/deltacommit/replacecommit(eg. insert_overwrite) instant. */ public static Option<String> getExtraMetadataFromLatest(HoodieTableMetaClient metaClient, String extraMetadataKey) { return metaClient.getCommitsTimeline().filterCompletedInstants().getReverseOrderedInstants() // exclude clustering commits for returning user stored extra metadata .filter(instant -> !isClusteringCommit(metaClient, instant)) .findFirst().map(instant -> getMetadataValue(metaClient, extraMetadataKey, instant)).orElse(Option.empty()); }
3.68
hbase_RegionState_isUnassignable
/** * Check if a region state is one of offline states that can't transition to pending_close/closing * (unassign/offline) */ public static boolean isUnassignable(State state) { return state == State.MERGED || state == State.SPLIT || state == State.OFFLINE || state == State.SPLITTING_NEW || state == State.MERGING_NEW; }
3.68
flink_NettyProtocol_getServerChannelHandlers
/** * Returns the server channel handlers. * * <pre> * +-------------------------------------------------------------------+ * | SERVER CHANNEL PIPELINE | * | | * | +----------+----------+ (3) write +----------------------+ | * | | Queue of queues +----------->| Message encoder | | * | +----------+----------+ +-----------+----------+ | * | /|\ \|/ | * | | (2) enqueue | | * | +----------+----------+ | | * | | Request handler | | | * | +----------+----------+ | | * | /|\ | | * | | | | * | +-----------+-----------+ | | * | | Message+Frame decoder | | | * | +-----------+-----------+ | | * | /|\ | | * +---------------+-----------------------------------+---------------+ * | | (1) client request \|/ * +---------------+-----------------------------------+---------------+ * | | | | * | [ Socket.read() ] [ Socket.write() ] | * | | * | Netty Internal I/O Threads (Transport Implementation) | * +-------------------------------------------------------------------+ * </pre> * * @return channel handlers */ public ChannelHandler[] getServerChannelHandlers() { PartitionRequestQueue queueOfPartitionQueues = new PartitionRequestQueue(); PartitionRequestServerHandler serverHandler = new PartitionRequestServerHandler( partitionProvider, taskEventPublisher, queueOfPartitionQueues); return new ChannelHandler[] { messageEncoder, new NettyMessage.NettyMessageDecoder(), serverHandler, queueOfPartitionQueues }; }
3.68
framework_EffectAllowed_getValue
/** * Get the string value that is accepted by the client side drag event. * * @return String value accepted by the client side drag event. */ public String getValue() { return value; }
3.68
rocketmq-connect_DeadLetterQueueReporter_populateContextHeaders
/** * pop context property * * @param producerRecord * @param context */ void populateContextHeaders(Message producerRecord, ProcessingContext context) { Map<String, String> headers = producerRecord.getProperties(); if (context.consumerRecord() != null) { producerRecord.putUserProperty(ERROR_HEADER_ORIG_TOPIC, context.consumerRecord().getTopic()); producerRecord.putUserProperty(ERROR_HEADER_ORIG_PARTITION, String.valueOf(context.consumerRecord().getQueueId())); producerRecord.putUserProperty(ERROR_HEADER_ORIG_OFFSET, String.valueOf(context.consumerRecord().getQueueOffset())); } if (workerId != null) { producerRecord.putUserProperty(ERROR_HEADER_CLUSTER_ID, workerId); } producerRecord.putUserProperty(ERROR_HEADER_STAGE, context.stage().name()); producerRecord.putUserProperty(ERROR_HEADER_EXECUTING_CLASS, context.executingClass().getName()); producerRecord.putUserProperty(ERROR_HEADER_CONNECTOR_NAME, connectorTaskId.connector()); producerRecord.putUserProperty(ERROR_HEADER_TASK_ID, connectorTaskId.task() + ""); if (context.error() != null) { Throwable error = context.error(); headers.put(ERROR_HEADER_EXCEPTION, error.getClass().getName()); headers.put(ERROR_HEADER_EXCEPTION_MESSAGE, error.getMessage()); byte[] trace; if ((trace = stacktrace(context.error())) != null) { headers.put(ERROR_HEADER_EXCEPTION_STACK_TRACE, new String(trace)); } } }
3.68
Activiti_TreeMethodExpression_invoke
/** * Evaluates the expression and invokes the method. * @param context used to resolve properties (<code>base.property</code> and <code>base[property]</code>) * @param paramValues * @return method result or <code>null</code> if this is a literal text expression * @throws ELException if evaluation fails (e.g. suitable method not found) */ @Override public Object invoke(ELContext context, Object[] paramValues) throws ELException { return node.invoke(bindings, context, type, types, paramValues); }
3.68
framework_CheckBoxGroup_isHtmlContentAllowed
/** * Checks whether captions are interpreted as html or plain text. * * @return true if the captions are used as html, false if used as plain * text * @see #setHtmlContentAllowed(boolean) */ public boolean isHtmlContentAllowed() { return getState(false).htmlContentAllowed; }
3.68
framework_CustomFieldConnector_getContent
/** * Returns the content (only/first child) of the container. * * @return child connector or null if none (e.g. invisible or not set on * server) */ protected ComponentConnector getContent() { List<ComponentConnector> children = getChildComponents(); if (children.isEmpty()) { return null; } else { return children.get(0); } }
3.68
graphhopper_NavigateResponseConverter_getModifier
/** * No modifier values for arrive and depart * <p> * Find modifier values here: https://www.mapbox.com/api-documentation/#stepmaneuver-object */ private static String getModifier(Instruction instruction) { switch (instruction.getSign()) { case Instruction.CONTINUE_ON_STREET: return "straight"; case Instruction.U_TURN_LEFT: case Instruction.U_TURN_RIGHT: case Instruction.U_TURN_UNKNOWN: return "uturn"; case Instruction.KEEP_LEFT: case Instruction.TURN_SLIGHT_LEFT: return "slight left"; case Instruction.TURN_LEFT: return "left"; case Instruction.TURN_SHARP_LEFT: return "sharp left"; case Instruction.KEEP_RIGHT: case Instruction.TURN_SLIGHT_RIGHT: return "slight right"; case Instruction.TURN_RIGHT: return "right"; case Instruction.TURN_SHARP_RIGHT: return "sharp right"; case Instruction.USE_ROUNDABOUT: // TODO: This might be an issue in left-handed traffic, because there it schould be left return "right"; default: return null; } }
3.68
hmily_ExtensionLoader_getValue
/** * Gets value. * * @return the value */ public T getValue() { return value; }
3.68
pulsar_WindowManager_track
/** * feed the event to the eviction and trigger policies * for bookkeeping and optionally firing the trigger. */ private void track(Event<T> windowEvent) { evictionPolicy.track(windowEvent); triggerPolicy.track(windowEvent); }
3.68
hbase_ServerNonceManager_createCleanupScheduledChore
/** * Creates a scheduled chore that is used to clean up old nonces. * @param stoppable Stoppable for the chore. * @return ScheduledChore; the scheduled chore is not started. */ public ScheduledChore createCleanupScheduledChore(Stoppable stoppable) { // By default, it will run every 6 minutes (30 / 5). return new ScheduledChore("nonceCleaner", stoppable, deleteNonceGracePeriod / 5) { @Override protected void chore() { cleanUpOldNonces(); } }; }
3.68
hadoop_ItemInfo_getStartPath
/** * Returns the start path of the current file. This indicates that SPS * was invoked on this path. */ public long getStartPath() { return startPathId; }
3.68
framework_FlyweightCell_getRow
/** * Returns the row index of the cell. * * @return the row index */ public int getRow() { assertSetup(); return row.getRow(); }
3.68
framework_DataCommunicator_pushData
/** * Sends given collection of data objects to the client-side. * * @param firstIndex * first index of pushed data * @param data * data objects to send as an iterable */ protected void pushData(int firstIndex, List<T> data) { JsonArray dataArray = Json.createArray(); int i = 0; for (T item : data) { dataArray.set(i++, getDataObject(item)); } rpc.setData(firstIndex, dataArray); handler.addActiveData(data.stream()); handler.cleanUp(data.stream()); }
3.68
hudi_TableSchemaResolver_getTableAvroSchemaWithoutMetadataFields
/** * Gets users data schema for a hoodie table in Avro format. * * @return Avro user data schema * @throws Exception * * @deprecated use {@link #getTableAvroSchema(boolean)} instead */ @Deprecated public Schema getTableAvroSchemaWithoutMetadataFields() throws Exception { return getTableAvroSchemaInternal(false, Option.empty()).orElseThrow(schemaNotFoundError()); }
3.68
graphhopper_PbfBlobResult_storeFailureResult
/** * Stores a failure result for a blob decoding operation. */ public void storeFailureResult(Exception ex) { complete = true; success = false; this.ex = ex; }
3.68
framework_LayoutManager_getMarginLeft
/** * Gets the left margin of the given element, provided that it has been * measured. These elements are guaranteed to be measured: * <ul> * <li>ManagedLayouts and their child Connectors * <li>Elements for which there is at least one ElementResizeListener * <li>Elements for which at least one ManagedLayout has registered a * dependency * </ul> * * A negative number is returned if the element has not been measured. If 0 * is returned, it might indicate that the element is not attached to the * DOM. * * @param element * the element to get the measured size for * @return the measured left margin of the element in pixels. */ public int getMarginLeft(Element element) { assert needsMeasure( element) : "Getting measurement for element that is not measured"; return getMeasuredSize(element, nullSize).getMarginLeft(); }
3.68
hbase_NamespaceTableAndRegionInfo_getRegionCount
/** * Gets the total number of regions in namespace. * @return the region count */ synchronized int getRegionCount() { int regionCount = 0; for (Entry<TableName, AtomicInteger> entry : this.tableAndRegionInfo.entrySet()) { regionCount = regionCount + entry.getValue().get(); } return regionCount; }
3.68
dubbo_FutureContext_setFuture
/** * set future. * * @param future */ public void setFuture(CompletableFuture<?> future) { this.future = future; }
3.68
morf_ChangeIndex_getTableName
/** * Gets the name of the table to change. * * @return the name of the table to change */ public String getTableName() { return tableName; }
3.68
hbase_ReplicationSourceManager_join
/** * Terminate the replication on this region server */ public void join() { this.executor.shutdown(); for (ReplicationSourceInterface source : this.sources.values()) { source.terminate("Region server is closing"); } synchronized (oldsources) { for (ReplicationSourceInterface source : this.oldsources) { source.terminate("Region server is closing"); } } }
3.68
hadoop_StateStoreSerializer_getSerializer
/** * Get a serializer based on the provided configuration. * @param conf Configuration. Default if null. * @return Singleton serializer. */ public static StateStoreSerializer getSerializer(Configuration conf) { if (conf == null) { synchronized (StateStoreSerializer.class) { if (defaultSerializer == null) { conf = new Configuration(); defaultSerializer = newSerializer(conf); } } return defaultSerializer; } else { return newSerializer(conf); } }
3.68
hbase_TableDescriptor_hasGlobalReplicationScope
/** * Check if any of the table's cfs' replication scope are set to * {@link HConstants#REPLICATION_SCOPE_GLOBAL}. * @return {@code true} if we have, otherwise {@code false}. */ default boolean hasGlobalReplicationScope() { return Stream.of(getColumnFamilies()) .anyMatch(cf -> cf.getScope() == HConstants.REPLICATION_SCOPE_GLOBAL); }
3.68
hbase_SpaceQuotaRefresherChore_isInViolation
/** * Checks if the given <code>snapshot</code> is in violation, allowing the snapshot to be null. If * the snapshot is null, this is interpreted as no snapshot which implies not in violation. * @param snapshot The snapshot to operate on. * @return true if the snapshot is in violation, false otherwise. */ boolean isInViolation(SpaceQuotaSnapshot snapshot) { if (snapshot == null) { return false; } return snapshot.getQuotaStatus().isInViolation(); }
3.68
framework_BinderValidationStatus_getBinder
/** * Gets the source binder of the status. * * @return the source binder */ public Binder<BEAN> getBinder() { return binder; }
3.68
dubbo_AbstractDirectory_refreshInvoker
/** * Refresh invokers from total invokers * 1. all the invokers in need to reconnect list should be removed in the valid invokers list * 2. all the invokers in disabled invokers list should be removed in the valid invokers list * 3. all the invokers disappeared from total invokers should be removed in the need to reconnect list * 4. all the invokers disappeared from total invokers should be removed in the disabled invokers list */ public void refreshInvoker() { if (invokersInitialized) { refreshInvokerInternal(); } MetricsEventBus.publish( RegistryEvent.refreshDirectoryEvent(applicationModel, getSummary(), getDirectoryMeta())); }
3.68
hbase_ColumnFamilyDescriptorBuilder_setEncryptionType
/** * Set the encryption algorithm for use with this family * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setEncryptionType(String algorithm) { return setValue(ENCRYPTION_BYTES, algorithm); }
3.68
open-banking-gateway_Xs2aConsentInfo_isPasswordPresent
/** * Is the PSU password present in the context. */ public boolean isPasswordPresent(Xs2aContext ctx) { return null != ctx.getPsuPassword() && !isOauthEmbeddedPreStepDone(ctx); }
3.68
framework_Link_getTargetName
/** * Returns the target window name. Empty name of null implies that the * target is opened to the window containing the link. * * @return the target window name. */ public String getTargetName() { return getState(false).target; }
3.68
flink_RoundRobinOperatorStateRepartitioner_initMergeMapList
/** * Init the list of StreamStateHandle -> OperatorStateHandle map with given * parallelSubtaskStates when parallelism not changed. */ private List<Map<StreamStateHandle, OperatorStateHandle>> initMergeMapList( List<List<OperatorStateHandle>> parallelSubtaskStates) { int parallelism = parallelSubtaskStates.size(); final List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList = new ArrayList<>(parallelism); for (List<OperatorStateHandle> previousParallelSubtaskState : parallelSubtaskStates) { mergeMapList.add( previousParallelSubtaskState.stream() .collect( Collectors.toMap( OperatorStateHandle::getDelegateStateHandle, Function.identity()))); } return mergeMapList; }
3.68
shardingsphere-elasticjob_DefaultYamlTupleProcessor_process
/** * Process node tuple. * * @param nodeTuple node tuple * @return processed node tuple */ public NodeTuple process(final NodeTuple nodeTuple) { return isUnsetNodeTuple(nodeTuple.getValueNode()) ? null : nodeTuple; }
3.68
hadoop_SinglePendingCommit_getVersion
/** @return version marker. */ public int getVersion() { return version; }
3.68
hbase_MiniHBaseCluster_getServerWith
/** * Get the location of the specified region * @param regionName Name of the region in bytes * @return Index into List of {@link MiniHBaseCluster#getRegionServerThreads()} of HRS carrying * hbase:meta. Returns -1 if none found. */ public int getServerWith(byte[] regionName) { int index = 0; for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); if (!hrs.isStopped()) { Region region = hrs.getOnlineRegion(regionName); if (region != null) { return index; } } index++; } return -1; }
3.68
hbase_HBaseTestingUtility_getNewDataTestDirOnTestFS
/** * Sets up a new path in test filesystem to be used by tests. */ private Path getNewDataTestDirOnTestFS() throws IOException { // The file system can be either local, mini dfs, or if the configuration // is supplied externally, it can be an external cluster FS. If it is a local // file system, the tests should use getBaseTestDir, otherwise, we can use // the working directory, and create a unique sub dir there FileSystem fs = getTestFileSystem(); Path newDataTestDir; String randomStr = getRandomUUID().toString(); if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) { newDataTestDir = new Path(getDataTestDir(), randomStr); File dataTestDir = new File(newDataTestDir.toString()); if (deleteOnExit()) dataTestDir.deleteOnExit(); } else { Path base = getBaseTestDirOnTestFS(); newDataTestDir = new Path(base, randomStr); if (deleteOnExit()) fs.deleteOnExit(newDataTestDir); } return newDataTestDir; }
3.68
hbase_HRegionServer_getCopyOfOnlineRegionsSortedByOffHeapSize
/** * @return A new Map of online regions sorted by region off-heap size with the first entry being * the biggest. */ SortedMap<Long, Collection<HRegion>> getCopyOfOnlineRegionsSortedByOffHeapSize() { // we'll sort the regions in reverse SortedMap<Long, Collection<HRegion>> sortedRegions = new TreeMap<>(Comparator.reverseOrder()); // Copy over all regions. Regions are sorted by size with biggest first. for (HRegion region : this.onlineRegions.values()) { addRegion(sortedRegions, region, region.getMemStoreOffHeapSize()); } return sortedRegions; }
3.68
hadoop_Sender_op
/** Initialize a operation. */ private static void op(final DataOutput out, final Op op) throws IOException { out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); op.write(out); }
3.68
hbase_MunkresAssignment_stepTwo
/** * Corresponds to step 2 of the original algorithm. */ private void stepTwo() { // Construct a path of alternating starred zeroes and primed zeroes, where // each starred zero is in the same column as the previous primed zero, and // each primed zero is in the same row as the previous starred zero. The // path will always end in a primed zero. while (true) { Pair<Integer, Integer> star = starInCol(path.getLast().getSecond()); if (star != null) { path.offerLast(star); } else { break; } Pair<Integer, Integer> prime = primeInRow(path.getLast().getFirst()); path.offerLast(prime); } // Augment path - unmask all starred zeroes and star all primed zeroes. All // nodes in the path will be either starred or primed zeroes. The set of // starred zeroes is independent and now one larger than before. for (Pair<Integer, Integer> p : path) { if (mask[p.getFirst()][p.getSecond()] == STAR) { mask[p.getFirst()][p.getSecond()] = NONE; } else { mask[p.getFirst()][p.getSecond()] = STAR; } } // Clear all covers from rows and columns. Arrays.fill(rowsCovered, false); Arrays.fill(colsCovered, false); // Remove the prime mask from all primed zeroes. for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { if (mask[r][c] == PRIME) { mask[r][c] = NONE; } } } }
3.68
framework_LayoutManager_setNeedsMeasureRecursively
/** * Informs this LayoutManager that some sizes in a component hierarchy might * have changed. This method should be used whenever the size of any child * component might have changed from outside of Vaadin's normal update * phase, e.g. when a CSS class name related to sizing has been changed. * <p> * To set a single component to be measured, use * {@link #setNeedsMeasure(ComponentConnector)} instead. * <p> * If there is no upcoming layout phase, a new layout phase is scheduled. * * @since 7.2 * @param component * the component at the root of the component hierarchy to * measure */ public void setNeedsMeasureRecursively(ComponentConnector component) { setNeedsMeasure(component); if (component instanceof HasComponentsConnector) { HasComponentsConnector hasComponents = (HasComponentsConnector) component; for (ComponentConnector child : hasComponents .getChildComponents()) { setNeedsMeasureRecursively(child); } } }
3.68
hbase_AbstractFSWAL_updateStore
/** * updates the sequence number of a specific store. depending on the flag: replaces current seq * number if the given seq id is bigger, or even if it is lower than existing one */ @Override public void updateStore(byte[] encodedRegionName, byte[] familyName, Long sequenceid, boolean onlyIfGreater) { sequenceIdAccounting.updateStore(encodedRegionName, familyName, sequenceid, onlyIfGreater); }
3.68
hadoop_ClientDatanodeProtocolServerSideTranslatorPB_getDiskBalancerSetting
/** * Returns a run-time setting from diskbalancer like Bandwidth. */ @Override public DiskBalancerSettingResponseProto getDiskBalancerSetting( RpcController controller, DiskBalancerSettingRequestProto request) throws ServiceException { try { String val = impl.getDiskBalancerSetting(request.getKey()); return DiskBalancerSettingResponseProto.newBuilder() .setValue(val) .build(); } catch (Exception e) { throw new ServiceException(e); } }
3.68
dubbo_AbstractReferenceBuilder_injvm
/** * @param injvm * @see AbstractInterfaceBuilder#scope(String) * @deprecated instead, use the parameter <b>scope</b> to judge if it's in jvm, scope=local */ @Deprecated public B injvm(Boolean injvm) { this.injvm = injvm; return getThis(); }
3.68
hadoop_RegistryOperationsFactory_createInstance
/** * Create and initialize a registry operations instance. * Access rights will be determined from the configuration * @param name name of the instance * @param conf configuration * @return a registry operations instance * @throws ServiceStateException on any failure to initialize */ public static RegistryOperations createInstance(String name, Configuration conf) { Preconditions.checkArgument(conf != null, "Null configuration"); RegistryOperationsClient operations = new RegistryOperationsClient(name); operations.init(conf); return operations; }
3.68
hadoop_DynamicIOStatisticsBuilder_activeInstance
/** * Get the statistics instance. * @return the instance to build/return * @throws IllegalStateException if the builder has already been built. */ private DynamicIOStatistics activeInstance() { checkState(instance != null, "Already built"); return instance; }
3.68
hbase_JVM_getMaxFileDescriptorCount
/** * Get the number of the maximum file descriptors the system can use. If Oracle java, it will use * the com.sun.management interfaces. Otherwise, this methods implements it (linux only). * @return max number of file descriptors the operating system can use. */ public long getMaxFileDescriptorCount() { Long mfdc; if (!ibmvendor) { mfdc = runUnixMXBeanMethod("getMaxFileDescriptorCount"); return (mfdc != null ? mfdc : -1); } InputStream in = null; BufferedReader output = null; try { // using linux bash commands to retrieve info Process p = Runtime.getRuntime().exec(new String[] { "bash", "-c", "ulimit -n" }); in = p.getInputStream(); output = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); String maxFileDesCount; if ((maxFileDesCount = output.readLine()) != null) { return Long.parseLong(maxFileDesCount); } } catch (IOException ie) { LOG.warn("Not able to get the max number of file descriptors", ie); } finally { if (output != null) { try { output.close(); } catch (IOException e) { LOG.warn("Not able to close the reader", e); } } if (in != null) { try { in.close(); } catch (IOException e) { LOG.warn("Not able to close the InputStream", e); } } } return -1; }
3.68
pulsar_ProducerConfiguration_setMaxPendingMessages
/** * Set the max size of the queue holding the messages pending to receive an acknowledgment from the broker. * <p> * When the queue is full, by default, all calls to {@link Producer#send} and {@link Producer#sendAsync} will fail * unless blockIfQueueFull is set to true. Use {@link #setBlockIfQueueFull} to change the blocking behavior. * * @param maxPendingMessages * @return */ public ProducerConfiguration setMaxPendingMessages(int maxPendingMessages) { conf.setMaxPendingMessages(maxPendingMessages); return this; }
3.68
hbase_ByteBuffAllocator_create
/** * Initialize an {@link ByteBuffAllocator} which will try to allocate ByteBuffers from off-heap if * reservoir is enabled and the reservoir has enough buffers, otherwise the allocator will just * allocate the insufficient buffers from on-heap to meet the requirement. * @param conf which get the arguments to initialize the allocator. * @param reservoirEnabled indicate whether the reservoir is enabled or disabled. NOTICE: if * reservoir is enabled, then we will use the pool allocator to allocate * off-heap ByteBuffers and use the HEAP allocator to allocate heap * ByteBuffers. Otherwise if reservoir is disabled then all allocations * will happen in HEAP instance. * @return ByteBuffAllocator to manage the byte buffers. */ public static ByteBuffAllocator create(Configuration conf, boolean reservoirEnabled) { int poolBufSize = conf.getInt(BUFFER_SIZE_KEY, DEFAULT_BUFFER_SIZE); if (reservoirEnabled) { // The max number of buffers to be pooled in the ByteBufferPool. The default value been // selected based on the #handlers configured. When it is read request, 2 MB is the max size // at which we will send back one RPC request. Means max we need 2 MB for creating the // response cell block. (Well it might be much lesser than this because in 2 MB size calc, we // include the heap size overhead of each cells also.) Considering 2 MB, we will need // (2 * 1024 * 1024) / poolBufSize buffers to make the response cell block. Pool buffer size // is by default 64 KB. // In case of read request, at the end of the handler process, we will make the response // cellblock and add the Call to connection's response Q and a single Responder thread takes // connections and responses from that one by one and do the socket write. So there is chances // that by the time a handler originated response is actually done writing to socket and so // released the BBs it used, the handler might have processed one more read req. On an avg 2x // we consider and consider that also for the max buffers to pool int bufsForTwoMB = (2 * 1024 * 1024) / poolBufSize; int maxBuffCount = conf.getInt(MAX_BUFFER_COUNT_KEY, conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT) * bufsForTwoMB * 2); int minSizeForReservoirUse = conf.getInt(MIN_ALLOCATE_SIZE_KEY, poolBufSize / 6); Class<?> clazz = conf.getClass(BYTEBUFF_ALLOCATOR_CLASS, ByteBuffAllocator.class); return (ByteBuffAllocator) ReflectionUtils.newInstance(clazz, true, maxBuffCount, poolBufSize, minSizeForReservoirUse); } else { return HEAP; } }
3.68
flink_ProjectOperator_projectTuple9
/** * Projects a {@link Tuple} {@link DataSet} to the previously selected fields. * * @return The projected DataSet. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3, T4, T5, T6, T7, T8> ProjectOperator<T, Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>> projectTuple9() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType()); TupleTypeInfo<Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>> tType = new TupleTypeInfo<Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>>(fTypes); return new ProjectOperator<T, Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>>( this.ds, this.fieldIndexes, tType); }
3.68
flink_FlinkPreparingTableBase_getNames
/** * Returns the table path in the {@link RelOptSchema}. Different with {@link * #getQualifiedName()}, the latter is mainly used for table digest. */ public List<String> getNames() { return names; }
3.68
flink_CheckpointConfig_getAlignedCheckpointTimeout
/** * @return value of alignment timeout, as configured via {@link * #setAlignedCheckpointTimeout(Duration)} or {@link * ExecutionCheckpointingOptions#ALIGNED_CHECKPOINT_TIMEOUT}. */ @PublicEvolving public Duration getAlignedCheckpointTimeout() { return configuration.get(ExecutionCheckpointingOptions.ALIGNED_CHECKPOINT_TIMEOUT); }
3.68
dubbo_URLParam_addParameterIfAbsent
/** * Add absent parameters to a new URLParam. * * @param key key * @param value value * @return A new URLParam */ public URLParam addParameterIfAbsent(String key, String value) { if (StringUtils.isEmpty(key) || StringUtils.isEmpty(value)) { return this; } if (hasParameter(key)) { return this; } return addParametersIfAbsent(Collections.singletonMap(key, value)); }
3.68
hadoop_BlockGrouper_getRequiredNumDataBlocks
/** * Get required data blocks count in a BlockGroup. * @return count of required data blocks */ public int getRequiredNumDataBlocks() { return schema.getNumDataUnits(); }
3.68
flink_TaskExecutionState_getAccumulators
/** Gets flink and user-defined accumulators in serialized form. */ public AccumulatorSnapshot getAccumulators() { return accumulators; }
3.68
framework_Form_setBuffered
/* * Sets the editor's buffered mode to the specified status. Don't add a * JavaDoc comment here, we use the default one from the interface. */ @Override public void setBuffered(boolean buffered) { if (buffered != this.buffered) { this.buffered = buffered; for (final Object id : propertyIds) { fields.get(id).setBuffered(buffered); } } }
3.68
mutate-test-kata_EmployeeFixed_setName
/** * Set the employee name after removing leading and trailing spaces, which could be left by upstream system * @param newName the new name for the employee, possibly with leading and trailing white space to be removed */ public void setName(String newName) { this.name = newName.trim(); }
3.68
hudi_Key_incrementWeight
/** * Increments the weight of <i>this</i> key by one. */ public void incrementWeight() { this.weight++; }
3.68
flink_NumericColumnSummary_getMissingCount
/** * The number of "missing" values where "missing" is defined as null, NaN, or Infinity. * * <p>These values are ignored in some calculations like mean, variance, and standardDeviation. */ public long getMissingCount() { return nullCount + nanCount + infinityCount; }
3.68
hbase_VisibilityUtils_writeLabelOrdinalsToStream
/** * This will sort the passed labels in ascending oder and then will write one after the other to * the passed stream. Unsorted label ordinals Stream where to write the labels. When IOE during * writes to Stream. */ private static void writeLabelOrdinalsToStream(List<Integer> labelOrdinals, DataOutputStream dos) throws IOException { Collections.sort(labelOrdinals); for (Integer labelOrdinal : labelOrdinals) { StreamUtils.writeRawVInt32(dos, labelOrdinal); } }
3.68
hmily_NacosClient_addListener
/** * Add listener. * * @param context the context * @param passiveHandler the passive handler * @param config the config * @throws NacosException the nacos exception */ void addListener(final Supplier<ConfigLoader.Context> context, final ConfigLoader.PassiveHandler<NacosPassiveConfig> passiveHandler, final NacosConfig config) throws NacosException { if (!config.isPassive()) { return; } if (configService == null) { LOGGER.warn("nacos configService is null..."); } configService.addListener(config.getDataId(), config.getGroup(), new Listener() { @Override public Executor getExecutor() { return null; } @Override public void receiveConfigInfo(final String s) { NacosPassiveConfig nacosPassiveConfig = new NacosPassiveConfig(); nacosPassiveConfig.setValue(s); nacosPassiveConfig.setFileExtension(config.getFileExtension()); nacosPassiveConfig.setDataId(config.getDataId()); passiveHandler.passive(context, nacosPassiveConfig); } }); LOGGER.info("passive nacos remote started...."); }
3.68
graphhopper_OSMReaderUtility_parseDuration
/** * Parser according to http://wiki.openstreetmap.org/wiki/Key:duration The value consists of a * string ala 'hh:mm', format for hours and minutes 'mm', 'hh:mm' or 'hh:mm:ss', or * alternatively ISO_8601 duration * <p> * * @return duration value in seconds */ public static long parseDuration(String str) throws IllegalArgumentException { if (str == null) return 0; // Check for ISO_8601 format if (str.startsWith("P")) { // A common mistake is when the minutes format is intended but the month format is specified // e.g. one month "P1M" is set, but one minute "PT1M" is meant. try { Duration dur = DatatypeFactory.newInstance().newDuration(str); return dur.getTimeInMillis(STATIC_DATE) / 1000; } catch (Exception ex) { throw new IllegalArgumentException("Cannot parse duration tag value: " + str, ex); } } try { int index = str.indexOf(":"); if (index > 0) { String hourStr = str.substring(0, index); String minStr = str.substring(index + 1); String secondsStr = "0"; index = minStr.indexOf(":"); if (index > 0) { secondsStr = minStr.substring(index + 1, index + 3); minStr = minStr.substring(0, index); } long seconds = Integer.parseInt(hourStr) * 60L * 60; seconds += Integer.parseInt(minStr) * 60L; seconds += Integer.parseInt(secondsStr); return seconds; } else { // value contains minutes return Integer.parseInt(str) * 60L; } } catch (Exception ex) { throw new IllegalArgumentException("Cannot parse duration tag value: " + str, ex); } }
3.68
flink_DefaultExecutionGraph_getAccumulatorsSerialized
/** * Gets a serialized accumulator map. * * @return The accumulator map with serialized accumulator values. */ @Override public Map<String, SerializedValue<OptionalFailure<Object>>> getAccumulatorsSerialized() { return aggregateUserAccumulators().entrySet().stream() .collect( Collectors.toMap( Map.Entry::getKey, entry -> serializeAccumulator(entry.getKey(), entry.getValue()))); }
3.68
hadoop_ContainerLogContext_getExitCode
/** * Get the exit code of the container. * * @return the exit code */ public int getExitCode() { return exitCode; }
3.68