name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
zilla_HpackContext_staticIndex14
// Index in static table for the given name of length 14 private static int staticIndex14(DirectBuffer name) { switch (name.getByte(13)) { case 'h': if (STATIC_TABLE[28].name.equals(name)) // content-length { return 28; } break; case 't': if (STATIC_TABLE[15].name.equals(name)) // accept-charset { return 15; } break; } return -1; }
3.68
hudi_SpillableMapUtils_generateEmptyPayload
/** * Utility method to convert bytes to HoodieRecord using schema and payload class. */ public static <R> R generateEmptyPayload(String recKey, String partitionPath, Comparable orderingVal, String payloadClazz) { HoodieRecord<? extends HoodieRecordPayload> hoodieRecord = new HoodieAvroRecord<>(new HoodieKey(recKey, partitionPath), HoodieRecordUtils.loadPayload(payloadClazz, new Object[] {null, orderingVal}, GenericRecord.class, Comparable.class)); return (R) hoodieRecord; }
3.68
hbase_SplitTableRegionProcedure_openParentRegion
/** * Rollback close parent region */ private void openParentRegion(MasterProcedureEnv env) throws IOException { AssignmentManagerUtil.reopenRegionsForRollback(env, Collections.singletonList((getParentRegion())), getRegionReplication(env), getParentRegionServerName(env)); }
3.68
hbase_CachedClusterId_getFromCacheOrFetch
/** * Fetches the ClusterId from FS if it is not cached locally. Atomically updates the cached copy * and is thread-safe. Optimized to do a single fetch when there are multiple threads are trying * get from a clean cache. * @return ClusterId by reading from FileSystem or null in any error case or cluster ID does not * exist on the file system or if the server initiated a tear down. */ public String getFromCacheOrFetch() { if (server.isStopping() || server.isStopped()) { return null; } String id = getClusterId(); if (id != null) { return id; } if (!attemptFetch()) { // A fetch is in progress. try { waitForFetchToFinish(); } catch (InterruptedException e) { // pass and return whatever is in the cache. } } return getClusterId(); }
3.68
hbase_FileArchiverNotifierImpl_bucketFilesToSnapshot
/** * For the given snapshot, find all files which this {@code snapshotName} references. After a file * is found to be referenced by the snapshot, it is removed from {@code filesToUpdate} and * {@code snapshotSizeChanges} is updated in concert. * @param snapshotName The snapshot to check * @param filesToUpdate A mapping of archived files to their size * @param snapshotSizeChanges A mapping of snapshots and their change in size */ void bucketFilesToSnapshot(String snapshotName, Map<String, Long> filesToUpdate, Map<String, Long> snapshotSizeChanges) throws IOException { // A quick check to avoid doing work if the caller unnecessarily invoked this method. if (filesToUpdate.isEmpty()) { return; } Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, CommonFSUtils.getRootDir(conf)); SnapshotDescription sd = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd); // For each region referenced by the snapshot for (SnapshotRegionManifest rm : manifest.getRegionManifests()) { // For each column family in this region for (FamilyFiles ff : rm.getFamilyFilesList()) { // And each store file in that family for (StoreFile sf : ff.getStoreFilesList()) { Long valueOrNull = filesToUpdate.remove(sf.getName()); if (valueOrNull != null) { // This storefile was recently archived, we should update this snapshot with its size snapshotSizeChanges.merge(snapshotName, valueOrNull, Long::sum); } // Short-circuit, if we have no more files that were archived, we don't need to iterate // over the rest of the snapshot. if (filesToUpdate.isEmpty()) { return; } } } } }
3.68
flink_RichOrCondition_getRight
/** @return One of the {@link IterativeCondition conditions} combined in this condition. */ public IterativeCondition<T> getRight() { return getNestedConditions()[1]; }
3.68
flink_AvroFactory_getSpecificDataForClass
/** * Creates a {@link SpecificData} object for a given class. Possibly uses the specific data from * the generated class with logical conversions applied (avro >= 1.9.x). * * <p>Copied over from {@code SpecificData#getForClass(Class<T> c)} we do not use the method * directly, because we want to be API backwards compatible with older Avro versions which did * not have this method */ public static <T extends SpecificData> SpecificData getSpecificDataForClass( Class<T> type, ClassLoader cl) { try { Field specificDataField = type.getDeclaredField("MODEL$"); specificDataField.setAccessible(true); return (SpecificData) specificDataField.get((Object) null); } catch (IllegalAccessException e) { throw new FlinkRuntimeException("Could not access the MODEL$ field of avro record", e); } catch (NoSuchFieldException e) { return new SpecificData(cl); } }
3.68
hbase_RestoreSnapshotProcedure_deleteRegionsFromInMemoryStates
/** * Delete regions from in-memory states * @param regionInfos regions to delete * @param env MasterProcedureEnv * @param regionReplication the number of region replications */ private void deleteRegionsFromInMemoryStates(List<RegionInfo> regionInfos, MasterProcedureEnv env, int regionReplication) { FavoredNodesManager fnm = env.getMasterServices().getFavoredNodesManager(); env.getAssignmentManager().getRegionStates().deleteRegions(regionInfos); env.getMasterServices().getServerManager().removeRegions(regionInfos); if (fnm != null) { fnm.deleteFavoredNodesForRegions(regionInfos); } // For region replicas if (regionReplication > 1) { for (RegionInfo regionInfo : regionInfos) { for (int i = 1; i < regionReplication; i++) { RegionInfo regionInfoForReplica = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, i); env.getAssignmentManager().getRegionStates().deleteRegion(regionInfoForReplica); env.getMasterServices().getServerManager().removeRegion(regionInfoForReplica); if (fnm != null) { fnm.deleteFavoredNodesForRegion(regionInfoForReplica); } } } } }
3.68
framework_Alignment_isLeft
/** * Checks if component is aligned to the left of the available space. * * @return true if aligned left */ public boolean isLeft() { return (bitMask & Bits.ALIGNMENT_LEFT) == Bits.ALIGNMENT_LEFT; }
3.68
hadoop_TimelineWriteResponse_setEntityId
/** * Set the entity Id. * * @param id the entity Id. */ public void setEntityId(String id) { this.entityId = id; }
3.68
flink_SliceAssigners_cumulative
/** * Creates a cumulative window {@link SliceAssigner} that assigns elements to slices of * cumulative windows. * * @param rowtimeIndex the index of rowtime field in the input row, {@code -1} if based on * * processing time. * @param shiftTimeZone The shift timezone of the window, if the proctime or rowtime type is * TIMESTAMP_LTZ, the shift timezone is the timezone user configured in TableConfig, other * cases the timezone is UTC which means never shift when assigning windows. * @param step the step interval of the generated windows. */ public static CumulativeSliceAssigner cumulative( int rowtimeIndex, ZoneId shiftTimeZone, Duration maxSize, Duration step) { return new CumulativeSliceAssigner( rowtimeIndex, shiftTimeZone, maxSize.toMillis(), step.toMillis(), 0); }
3.68
hadoop_CSQueueStore_getShortNameQueues
/** * This getter method will return an immutable map with all queues * which can be disambiguously referenced by short name, using short name * as the key. * @return Map containing queues and having short name as key */ @VisibleForTesting Map<String, CSQueue> getShortNameQueues() { //this is not the most efficient way to create a short named list //but this method is only used in tests try { modificationLock.readLock().lock(); return ImmutableMap.copyOf( fullNameQueues //getting all queues from path->queue map .entrySet() .stream() //filtering the list to contain only disambiguous short names .filter( //keeping queues where get(queueShortname) == queue //these are the ambigous references entry -> getMap.get(entry.getValue().getQueueShortName()) == entry.getValue()) //making a map from the stream .collect( Collectors.toMap( //using the queue's short name as key entry->entry.getValue().getQueueShortName(), //using the queue as value entry->entry.getValue())) ); } finally { modificationLock.readLock().unlock(); } }
3.68
hadoop_EntityRowKey_decode
/* * (non-Javadoc) * * Decodes an application row key of the form * userName!clusterId!flowName!flowRunId!appId!entityType!entityId * represented in byte format and converts it into an EntityRowKey object. * flowRunId is inverted while decoding as it was inverted while encoding. * * @see * org.apache.hadoop.yarn.server.timelineservice.storage.common * .KeyConverter#decode(byte[]) */ @Override public EntityRowKey decode(byte[] rowKey) { byte[][] rowKeyComponents = Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES); if (rowKeyComponents.length != 8) { throw new IllegalArgumentException("the row key is not valid for " + "an entity"); } String userId = Separator.decode(Bytes.toString(rowKeyComponents[0]), Separator.QUALIFIERS, Separator.TAB, Separator.SPACE); String clusterId = Separator.decode(Bytes.toString(rowKeyComponents[1]), Separator.QUALIFIERS, Separator.TAB, Separator.SPACE); String flowName = Separator.decode(Bytes.toString(rowKeyComponents[2]), Separator.QUALIFIERS, Separator.TAB, Separator.SPACE); Long flowRunId = LongConverter.invertLong(Bytes.toLong(rowKeyComponents[3])); String appId = appIDKeyConverter.decode(rowKeyComponents[4]); String entityType = Separator.decode(Bytes.toString(rowKeyComponents[5]), Separator.QUALIFIERS, Separator.TAB, Separator.SPACE); Long entityPrefixId = Bytes.toLong(rowKeyComponents[6]); String entityId = Separator.decode(Bytes.toString(rowKeyComponents[7]), Separator.QUALIFIERS, Separator.TAB, Separator.SPACE); return new EntityRowKey(clusterId, userId, flowName, flowRunId, appId, entityType, entityPrefixId, entityId); }
3.68
hadoop_RemoteMethod_getProtocol
/** * Get the interface/protocol for this method. For example, ClientProtocol or * NamenodeProtocol. * * @return Protocol for this method. */ public Class<?> getProtocol() { return this.protocol; }
3.68
rocketmq-connect_AbstractConnectController_connectorInfo
/** * Get the definition and status of a connector. * * @param connector name of the connector */ public ConnectorInfo connectorInfo(String connector) { final ClusterConfigState configState = configManagementService.snapshot(); if (!configState.contains(connector)) { throw new ConnectException("Connector[" + connector + "] does not exist"); } Map<String, String> config = configState.rawConnectorConfig(connector); return new ConnectorInfo( connector, config, configState.tasks(connector), connectorTypeForClass(config.get(ConnectorConfig.CONNECTOR_CLASS)) ); }
3.68
hbase_RecoverableZooKeeper_getChildren
/** * getChildren is an idempotent operation. Retry before throwing exception * @return List of children znodes */ public List<String> getChildren(String path, boolean watch) throws KeeperException, InterruptedException { return getChildren(path, null, watch); }
3.68
morf_SchemaUtils_copy
/** * Create a copy of a view. * * @param view The {@link View} to copy. * @return {@link View} implementation copied from the provided view. */ public static View copy(View view) { return new ViewBean(view); }
3.68
hbase_ReplicationProtobufUtil_buildReplicateWALEntryRequest
/** * Create a new ReplicateWALEntryRequest from a list of WAL entries * @param entries the WAL entries to be replicated * @param encodedRegionName alternative region name to use if not null * @param replicationClusterId Id which will uniquely identify source cluster FS client * configurations in the replication configuration directory * @param sourceBaseNamespaceDir Path to source cluster base namespace directory * @param sourceHFileArchiveDir Path to the source cluster hfile archive directory * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values found. */ public static Pair<ReplicateWALEntryRequest, CellScanner> buildReplicateWALEntryRequest( final Entry[] entries, byte[] encodedRegionName, String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir) { // Accumulate all the Cells seen in here. List<List<? extends Cell>> allCells = new ArrayList<>(entries.length); int size = 0; WALEntry.Builder entryBuilder = WALEntry.newBuilder(); ReplicateWALEntryRequest.Builder builder = ReplicateWALEntryRequest.newBuilder(); for (Entry entry : entries) { entryBuilder.clear(); WALProtos.WALKey.Builder keyBuilder; try { keyBuilder = entry.getKey().getBuilder(WALCellCodec.getNoneCompressor()); } catch (IOException e) { throw new AssertionError( "There should not throw exception since NoneCompressor do not throw any exceptions", e); } if (encodedRegionName != null) { keyBuilder.setEncodedRegionName(UnsafeByteOperations.unsafeWrap(encodedRegionName)); } entryBuilder.setKey(keyBuilder.build()); WALEdit edit = entry.getEdit(); List<Cell> cells = edit.getCells(); // Add up the size. It is used later serializing out the kvs. for (Cell cell : cells) { size += PrivateCellUtil.estimatedSerializedSizeOf(cell); } // Collect up the cells allCells.add(cells); // Write out how many cells associated with this entry. entryBuilder.setAssociatedCellCount(cells.size()); builder.addEntry(entryBuilder.build()); } if (replicationClusterId != null) { builder.setReplicationClusterId(replicationClusterId); } if (sourceBaseNamespaceDir != null) { builder.setSourceBaseNamespaceDirPath(sourceBaseNamespaceDir.toString()); } if (sourceHFileArchiveDir != null) { builder.setSourceHFileArchiveDirPath(sourceHFileArchiveDir.toString()); } return new Pair<>(builder.build(), getCellScanner(allCells, size)); }
3.68
framework_AbstractSelect_setMultiSelect
/** * Sets the multiselect mode. Setting multiselect mode false may lose * selection information: if selected items set contains one or more * selected items, only one of the selected items is kept as selected. * * Subclasses of AbstractSelect can choose not to support changing the * multiselect mode, and may throw {@link UnsupportedOperationException}. * * @param multiSelect * the New value of property multiSelect. */ public void setMultiSelect(boolean multiSelect) { if (multiSelect && getNullSelectionItemId() != null) { throw new IllegalStateException( "Multiselect and NullSelectionItemId can not be set at the same time."); } if (multiSelect != this.multiSelect) { // Selection before mode change final Object oldValue = getValue(); this.multiSelect = multiSelect; // Convert the value type if (multiSelect) { final Set<Object> s = new HashSet<Object>(); if (oldValue != null) { s.add(oldValue); } setValue(s); } else { final Set<?> s = (Set<?>) oldValue; if (s == null || s.isEmpty()) { setValue(null); } else { // Set the single select to contain only the first // selected value in the multiselect setValue(s.iterator().next()); } } markAsDirty(); } }
3.68
hadoop_WriteManager_getFileAttr
/** * If the file is in cache, update the size based on the cached data size */ Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle fileHandle, IdMappingServiceProvider iug) throws IOException { String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle); Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug); if (attr != null) { OpenFileCtx openFileCtx = fileContextCache.get(fileHandle); if (openFileCtx != null) { attr.setSize(openFileCtx.getNextOffset()); attr.setUsed(openFileCtx.getNextOffset()); } } return attr; }
3.68
flink_SingleInputGate_getConsumedPartitionType
/** * Returns the type of this input channel's consumed result partition. * * @return consumed result partition type */ public ResultPartitionType getConsumedPartitionType() { return consumedPartitionType; }
3.68
framework_ServerRpcHandler_getRpcInvocationsData
/** * Gets the data to recreate the RPC as requested by the client side. * * @return the data describing which RPC should be made, and all their * data */ public JsonArray getRpcInvocationsData() { return invocations; }
3.68
hadoop_LocalityMulticastAMRMProxyPolicy_getPolicyConfigWeighting
/** * Compute the "weighting" to give to a sublcuster based on the configured * policy weights (for the active subclusters). */ private float getPolicyConfigWeighting(SubClusterId targetId, AllocationBookkeeper allocationBookkeeper) { float totWeight = allocationBookkeeper.totPolicyWeight; Float localWeight = allocationBookkeeper.policyWeights.get(targetId); return (localWeight != null && totWeight > 0) ? localWeight / totWeight : 0; }
3.68
rocketmq-connect_JdbcSourceConnector_validate
/** * Should invoke before start the connector. * * @param config * @return error message */ @Override public void validate(KeyValue config) { jdbcSourceConfig = new JdbcSourceConfig(config); // validate config }
3.68
pulsar_AuthenticationDataProvider_authenticate
/** * For mutual authentication, This method use passed in `data` to evaluate and challenge, * then returns null if authentication has completed; * returns authenticated data back to server side, if authentication has not completed. * * <p>Mainly used for mutual authentication like sasl. */ default AuthData authenticate(AuthData data) throws AuthenticationException { byte[] bytes = (hasDataFromCommand() ? this.getCommandData() : "").getBytes(UTF_8); return AuthData.of(bytes); }
3.68
hudi_AvroSchemaUtils_canProject
/** * Check that each field in the prevSchema can be populated in the newSchema except specified columns * @param prevSchema prev schema. * @param newSchema new schema * @return true if prev schema is a projection of new schema. */ public static boolean canProject(Schema prevSchema, Schema newSchema, Set<String> exceptCols) { return prevSchema.getFields().stream() .filter(f -> !exceptCols.contains(f.name())) .map(oldSchemaField -> SchemaCompatibility.lookupWriterField(newSchema, oldSchemaField)) .noneMatch(Objects::isNull); }
3.68
graphhopper_VectorTile_clearType
/** * <pre> * The type of geometry stored in this feature. * </pre> * * <code>optional .vector_tile.Tile.GeomType type = 3 [default = UNKNOWN];</code> */ public Builder clearType() { bitField0_ = (bitField0_ & ~0x00000004); type_ = 0; onChanged(); return this; }
3.68
flink_DataSet_joinWithTiny
/** * Initiates a Join transformation. * * <p>A Join transformation joins the elements of two {@link DataSet DataSets} on key equality * and provides multiple ways to combine joining elements into one DataSet. * * <p>This method also gives the hint to the optimizer that the second DataSet to join is much * smaller than the first one. * * <p>This method returns a {@link JoinOperatorSets} on which {@link * JoinOperatorSets#where(String...)} needs to be called to define the join key of the first * joining (i.e., this) DataSet. * * @param other The other DataSet with which this DataSet is joined. * @return A JoinOperatorSets to continue the definition of the Join transformation. * @see JoinOperatorSets * @see DataSet */ public <R> JoinOperatorSets<T, R> joinWithTiny(DataSet<R> other) { return new JoinOperatorSets<>(this, other, JoinHint.BROADCAST_HASH_SECOND); }
3.68
flink_StateTable_containsKey
/** * Returns whether this table contains a mapping for the composite of active key and given * namespace. * * @param namespace the namespace in the composite key to search for. Not null. * @return {@code true} if this map contains the specified key/namespace composite key, {@code * false} otherwise. */ public boolean containsKey(N namespace) { return containsKey( keyContext.getCurrentKey(), keyContext.getCurrentKeyGroupIndex(), namespace); }
3.68
framework_VTabsheet_removeTab
/** * Removes a tab from this tab bar and updates the scroll position if * needed. If there is no tab that corresponds with the given index, * nothing is done. * <p> * Tab removal should always get triggered via the connector, even when * a tab's close button is clicked. That ensures that the states stay in * sync, and that logic such as selection change forced by tab removal * only needs to be implemented once. * * @param i * the index of the tab to remove */ public void removeTab(int i) { Tab tab = getTab(i); if (tab == null) { return; } remove(tab); tabWidths.remove(tab); /* * If this widget was still selected we need to unselect it. This * should only be necessary if there are no other tabs left that the * selection could move to. Otherwise the server-side updates the * selection when a component is removed from the tab sheet, and the * connector handles that selection change before triggering tab * removal. */ if (tab == selected) { selected = null; } int scrollerIndexCandidate = getTabIndex( getTabsheet().scrollerPositionTabId); if (scrollerIndexCandidate < 0) { // The tab with id scrollerPositionTabId has been removed scrollerIndexCandidate = getTabsheet().scrollerIndex; } scrollerIndexCandidate = getNearestShownTabIndex( scrollerIndexCandidate); if (scrollerIndexCandidate >= 0 && scrollerIndexCandidate < getTabCount()) { getTabsheet().scrollIntoView(getTab(scrollerIndexCandidate)); } }
3.68
hudi_BaseHoodieWriteClient_postCommit
/** * Post Commit Hook. Derived classes use this method to perform post-commit processing * * @param table table to commit on * @param metadata Commit Metadata corresponding to committed instant * @param instantTime Instant Time * @param extraMetadata Additional Metadata passed by user */ protected void postCommit(HoodieTable table, HoodieCommitMetadata metadata, String instantTime, Option<Map<String, String>> extraMetadata) { try { context.setJobStatus(this.getClass().getSimpleName(),"Cleaning up marker directories for commit " + instantTime + " in table " + config.getTableName()); // Delete the marker directory for the instant. WriteMarkersFactory.get(config.getMarkersType(), table, instantTime) .quietDeleteMarkerDir(context, config.getMarkersDeleteParallelism()); } finally { this.heartbeatClient.stop(instantTime); } }
3.68
flink_CallExpression_anonymous
/** * Creates a {@link CallExpression} to an anonymous function that has been declared inline * without a {@link FunctionIdentifier}. */ public static CallExpression anonymous( FunctionDefinition functionDefinition, List<ResolvedExpression> args, DataType dataType) { return new CallExpression(true, null, functionDefinition, args, dataType); }
3.68
framework_TabSheetClose_getTicketNumber
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber() */ @Override protected Integer getTicketNumber() { return 14348; }
3.68
hadoop_JobMonitor_onFailure
/** * Temporary hook for recording job failure. */ protected void onFailure(Job job) { LOG.info(job.getJobName() + " (" + job.getJobID() + ")" + " failure"); }
3.68
framework_AbsoluteLayout_getRightUnits
/** * Gets the unit for the 'right' attribute. * * @return See {@link Sizeable} UNIT_SYMBOLS for a description of the * available units. */ public Unit getRightUnits() { return rightUnits; }
3.68
morf_SqlUtils_tableRef
/** * Construct a new table with a given name with provided DB-link. * * @param tableName the name of the table * @param dblink database link if table exists in another database * @return {@link TableReference} */ public static TableReference tableRef(String tableName, String dblink) { Validate.notEmpty(tableName, "Table name was not provided."); Validate.notEmpty(dblink, "DB-link name was not provided."); return new TableReference(null, tableName, dblink); }
3.68
hbase_MetricSampleQuantiles_insert
/** * Add a new value from the stream. * @param v the value to insert */ synchronized public void insert(long v) { buffer[bufferCount] = v; bufferCount++; count++; if (bufferCount == buffer.length) { insertBatch(); compress(); } }
3.68
flink_ExecutionEnvironment_getRestartStrategy
/** * Returns the specified restart strategy configuration. * * @return The restart strategy configuration to be used */ @PublicEvolving public RestartStrategies.RestartStrategyConfiguration getRestartStrategy() { return config.getRestartStrategy(); }
3.68
hbase_PrettyPrinter_humanReadableIntervalToSec
/** * Convert a human readable time interval to seconds. Examples of the human readable time * intervals are: 50 DAYS 1 HOUR 30 MINUTES , 25000 SECONDS etc. The units of time specified can * be in uppercase as well as lowercase. Also, if a single number is specified without any time * unit, it is assumed to be in seconds. * @return value in seconds */ private static long humanReadableIntervalToSec(final String humanReadableInterval) throws HBaseException { if (humanReadableInterval == null || humanReadableInterval.equalsIgnoreCase("FOREVER")) { return HConstants.FOREVER; } try { return Long.parseLong(humanReadableInterval); } catch (NumberFormatException ex) { LOG.debug("Given interval value is not a number, parsing for human readable format"); } String days = null; String hours = null; String minutes = null; String seconds = null; String expectedTtl = null; long ttl; Matcher matcher = PrettyPrinter.INTERVAL_PATTERN.matcher(humanReadableInterval); if (matcher.matches()) { expectedTtl = matcher.group(2); days = matcher.group(4); hours = matcher.group(6); minutes = matcher.group(8); seconds = matcher.group(10); } else { LOG.warn("Given interval value '{}' is not a number and does not match human readable format," + " value will be set to 0.", humanReadableInterval); } ttl = 0; ttl += days != null ? Long.parseLong(days) * HConstants.DAY_IN_SECONDS : 0; ttl += hours != null ? Long.parseLong(hours) * HConstants.HOUR_IN_SECONDS : 0; ttl += minutes != null ? Long.parseLong(minutes) * HConstants.MINUTE_IN_SECONDS : 0; ttl += seconds != null ? Long.parseLong(seconds) : 0; if (expectedTtl != null && Long.parseLong(expectedTtl) != ttl) { throw new HBaseException( "Malformed TTL string: TTL values in seconds and human readable" + "format do not match"); } return ttl; }
3.68
framework_Calendar_getInternalCalendar
/** * Get the internally used Calendar instance. This is the currently used * instance of {@link java.util.Calendar} but is bound to change during the * lifetime of the component. * * @return the currently used java calendar */ public java.util.Calendar getInternalCalendar() { return currentCalendar; }
3.68
flink_ResultInfo_getData
/** Get the data. */ public List<RowData> getData() { return data; }
3.68
flink_GenericInMemoryCatalog_isPartitionedTable
/** * Check if the given table is a partitioned table. Note that "false" is returned if the table * doesn't exists. */ private boolean isPartitionedTable(ObjectPath tablePath) { CatalogBaseTable table = null; try { table = getTable(tablePath); } catch (TableNotExistException e) { return false; } return (table instanceof CatalogTable) && ((CatalogTable) table).isPartitioned(); }
3.68
hbase_ByteBuffInputStream_skip
/** * Skips <code>n</code> bytes of input from this input stream. Fewer bytes might be skipped if the * end of the input stream is reached. The actual number <code>k</code> of bytes to be skipped is * equal to the smaller of <code>n</code> and remaining bytes in the stream. * @param n the number of bytes to be skipped. * @return the actual number of bytes skipped. */ @Override public long skip(long n) { long k = Math.min(n, available()); if (k <= 0) { return 0; } this.buf.skip((int) k); return k; }
3.68
flink_HiveGenericUDTF_setCollector
// Will only take effect after calling open() @VisibleForTesting protected final void setCollector(Collector collector) { function.setCollector(collector); }
3.68
flink_HiveParserJoinCondTypeCheckProcFactory_getColumnExprProcessor
/** Factory method to get ColumnExprProcessor. */ @Override public HiveParserTypeCheckProcFactory.ColumnExprProcessor getColumnExprProcessor() { return new HiveParserJoinCondTypeCheckProcFactory.JoinCondColumnExprProcessor(); }
3.68
hbase_Client_execute
/** * Execute a transaction method. Will call either <tt>executePathOnly</tt> or <tt>executeURI</tt> * depending on whether a path only is supplied in 'path', or if a complete URI is passed instead, * respectively. * @param cluster the cluster definition * @param method the HTTP method * @param headers HTTP header values to send * @param path the properly urlencoded path or URI * @return the HTTP response code */ public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, String path) throws IOException { if (path.startsWith("/")) { return executePathOnly(cluster, method, headers, path); } return executeURI(method, headers, path); }
3.68
hadoop_FSDataOutputStreamBuilder_append
/** * Append to an existing file (optional operation). * * @return Generics Type B. */ public B append() { flags.add(CreateFlag.APPEND); return getThisBuilder(); }
3.68
hbase_Scan_readVersions
/** * Get up to the specified number of versions of each column. * @param versions specified number of versions for each column */ public Scan readVersions(int versions) { this.maxVersions = versions; return this; }
3.68
pulsar_AuthenticationProvider_newAuthState
/** * Create an authentication data State use passed in AuthenticationDataSource. */ default AuthenticationState newAuthState(AuthData authData, SocketAddress remoteAddress, SSLSession sslSession) throws AuthenticationException { return new OneStageAuthenticationState(authData, remoteAddress, sslSession, this); }
3.68
framework_Embedded_setAlternateText
/** * Sets this component's "alt-text", that is, an alternate text that can be * presented instead of this component's normal content, for accessibility * purposes. Does not work when {@link #setType(int)} has been called with * {@link #TYPE_BROWSER}. * * @param altText * A short, human-readable description of this component's * content. * @since 6.8 */ public void setAlternateText(String altText) { String oldAltText = getAlternateText(); if (altText != oldAltText || (altText != null && !altText.equals(oldAltText))) { getState().altText = altText; } }
3.68
hadoop_AWSRequestAnalyzer_isRequestMultipartIO
/** * Predicate which returns true if the request is part of the * multipart upload API -and which therefore must be rejected * if multipart upload is disabled. * @param request request * @return true if the transfer manager creates them. */ public static boolean isRequestMultipartIO(final Object request) { return request instanceof UploadPartCopyRequest || request instanceof CompleteMultipartUploadRequest || request instanceof CreateMultipartUploadRequest || request instanceof UploadPartRequest; }
3.68
hbase_Result_mayHaveMoreCellsInRow
/** * For scanning large rows, the RS may choose to return the cells chunk by chunk to prevent OOM or * timeout. This flag is used to tell you if the current Result is the last one of the current * row. False means this Result is the last one. True means there MAY be more cells belonging to * the current row. If you don't use {@link Scan#setAllowPartialResults(boolean)} or * {@link Scan#setBatch(int)}, this method will always return false because the Result must * contains all cells in one Row. */ public boolean mayHaveMoreCellsInRow() { return mayHaveMoreCellsInRow; }
3.68
flink_StreamProjection_projectTuple8
/** * Projects a {@link Tuple} {@link DataStream} to the previously selected fields. * * @return The projected DataStream. * @see Tuple * @see DataStream */ public <T0, T1, T2, T3, T4, T5, T6, T7> SingleOutputStreamOperator<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> projectTuple8() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, dataStream.getType()); TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>> tType = new TupleTypeInfo<Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>(fTypes); return dataStream.transform( "Projection", tType, new StreamProject<IN, Tuple8<T0, T1, T2, T3, T4, T5, T6, T7>>( fieldIndexes, tType.createSerializer(dataStream.getExecutionConfig()))); }
3.68
hadoop_PathOutputCommitterFactory_createFileOutputCommitter
/** * Create an instance of the default committer, a {@link FileOutputCommitter} * for a task. * @param outputPath the task's output path, or or null if no output path * has been defined. * @param context the task attempt context * @return the committer to use * @throws IOException problems instantiating the committer */ protected final PathOutputCommitter createFileOutputCommitter( Path outputPath, TaskAttemptContext context) throws IOException { LOG.debug("Creating FileOutputCommitter for path {} and context {}", outputPath, context); return new FileOutputCommitter(outputPath, context); }
3.68
streampipes_AbstractConfigurablePipelineElementBuilder_requiredColorParameter
/** * Assigns a new color picker parameter which is required by the pipeline * element. * * @param label The {@link org.apache.streampipes.sdk.helpers.Label} * that describes why this parameter is needed in a user-friendly manner. * @param defaultColor The default color, encoded as an HTML color code * @return */ public K requiredColorParameter(Label label, String defaultColor) { ColorPickerStaticProperty csp = new ColorPickerStaticProperty(label.getInternalId(), label.getLabel(), label.getDescription()); csp.setSelectedColor(defaultColor); this.staticProperties.add(csp); return me(); }
3.68
hadoop_S3ARemoteInputStream_available
/** * Returns the number of bytes that can read from this stream without blocking. */ @Override public int available() throws IOException { throwIfClosed(); // Update the current position in the current buffer, if possible. if (!fpos.setAbsolute(nextReadPos)) { return 0; } return fpos.buffer().remaining(); }
3.68
rocketmq-connect_Worker_startTasks
/** * Start a collection of tasks with the given configs. If a task is already started with the same configs, it will * not start again. If a task is already started but not contained in the new configs, it will stop. * * @param taskConfigs * @throws Exception */ public void startTasks(Map<String, List<ConnectKeyValue>> taskConfigs) { synchronized (latestTaskConfigs) { this.latestTaskConfigs = taskConfigs; } }
3.68
hadoop_AllocateRequest_progress
/** * Set the <code>progress</code> of the request. * @see AllocateRequest#setProgress(float) * @param progress <code>progress</code> of the request * @return {@link AllocateRequestBuilder} */ @Public @Stable public AllocateRequestBuilder progress(float progress) { allocateRequest.setProgress(progress); return this; }
3.68
querydsl_MetaDataExporter_setBeanSerializer
/** * Set the Bean serializer to create bean types as well * * @param beanSerializer serializer for JavaBeans (default: null) */ public void setBeanSerializer(@Nullable Serializer beanSerializer) { module.bind(SQLCodegenModule.BEAN_SERIALIZER, beanSerializer); }
3.68
hadoop_AbfsClient_appendSuccessCheckOp
// For AppendBlob its possible that the append succeeded in the backend but the request failed. // However a retry would fail with an InvalidQueryParameterValue // (as the current offset would be unacceptable). // Hence, we pass/succeed the appendblob append call // in case we are doing a retry after checking the length of the file public boolean appendSuccessCheckOp(AbfsRestOperation op, final String path, final long length, TracingContext tracingContext) throws AzureBlobFileSystemException { if ((op.isARetriedRequest()) && (op.getResult().getStatusCode() == HttpURLConnection.HTTP_BAD_REQUEST)) { final AbfsRestOperation destStatusOp = getPathStatus(path, false, tracingContext); if (destStatusOp.getResult().getStatusCode() == HttpURLConnection.HTTP_OK) { String fileLength = destStatusOp.getResult().getResponseHeader( HttpHeaderConfigurations.CONTENT_LENGTH); if (length <= Long.parseLong(fileLength)) { LOG.debug("Returning success response from append blob idempotency code"); return true; } } } return false; }
3.68
zxing_PDF417ErrorCorrection_getRecommendedMinimumErrorCorrectionLevel
/** * Returns the recommended minimum error correction level as described in annex E of * ISO/IEC 15438:2001(E). * * @param n the number of data codewords * @return the recommended minimum error correction level */ static int getRecommendedMinimumErrorCorrectionLevel(int n) throws WriterException { if (n <= 0) { throw new IllegalArgumentException("n must be > 0"); } if (n <= 40) { return 2; } if (n <= 160) { return 3; } if (n <= 320) { return 4; } if (n <= 863) { return 5; } throw new WriterException("No recommendation possible"); }
3.68
pulsar_LedgerMetadataUtils_buildAdditionalMetadataForCursor
/** * Build additional metadata for a Cursor. * * @param name the name of the cursor * @return an immutable map which describes the cursor * @see #buildBaseManagedLedgerMetadata(java.lang.String) */ static Map<String, byte[]> buildAdditionalMetadataForCursor(String name) { return Map.of(METADATA_PROPERTY_CURSOR_NAME, name.getBytes(StandardCharsets.UTF_8)); }
3.68
hbase_MonitoredRPCHandlerImpl_isOperationRunning
/** * Indicates to the client whether this task is monitoring a currently active RPC call to a * database command. (as defined by o.a.h.h.client.Operation) * @return true if the monitored handler is currently servicing an RPC call to a database command. */ @Override public synchronized boolean isOperationRunning() { if (!isRPCRunning()) { return false; } for (Object param : params) { if (param instanceof Operation) { return true; } } return false; }
3.68
flink_ExecutionEnvironment_getLastJobExecutionResult
/** * Returns the {@link org.apache.flink.api.common.JobExecutionResult} of the last executed job. * * @return The execution result from the latest job execution. */ public JobExecutionResult getLastJobExecutionResult() { return this.lastJobExecutionResult; }
3.68
dubbo_MemorySafeLinkedBlockingQueue_getMaxFreeMemory
/** * get the max free memory. * * @return the max free memory limit */ public long getMaxFreeMemory() { return maxFreeMemory; }
3.68
AreaShop_FileManager_updateRegions
/** * Update a list of regions. * @param regions The list of regions to update. */ public void updateRegions(List<GeneralRegion> regions) { updateRegions(regions, null); }
3.68
hbase_MutableRegionInfo_getShortNameToLog
/** * Returns Return a short, printable name for this region (usually encoded name) for us logging. */ @Override public String getShortNameToLog() { return RegionInfo.prettyPrint(this.getEncodedName()); }
3.68
flink_PlanNode_getNodeName
/** * Gets the name of the plan node. * * @return The name of the plan node. */ public String getNodeName() { return this.nodeName; }
3.68
hadoop_RegexMountPointResolvedDstPathReplaceInterceptor_interceptResolvedDestPathStr
/** * Intercept resolved path, e.g. * Mount point /^(\\w+)/, ${1}.hadoop.net * If incoming path is /user1/home/tmp/job1, * then the resolved path str will be user1. * * @return intercepted string */ @Override public String interceptResolvedDestPathStr( String parsedDestPathStr) { Matcher matcher = srcRegexPattern.matcher(parsedDestPathStr); return matcher.replaceAll(replaceString); }
3.68
framework_ColorPickerHistory_addColorChangeListener
/** * Adds a color change listener. * * @param listener * The listener */ @Override public void addColorChangeListener(ColorChangeListener listener) { addListener(ColorChangeEvent.class, listener, COLOR_CHANGE_METHOD); }
3.68
framework_DataProvider_fromFilteringCallbacks
/** * Creates a new data provider that uses filtering callbacks for fetching * and counting items from any backing store. * <p> * The query that is passed to each callback may contain a filter value that * is provided by the component querying for data. * * @param fetchCallback * function that returns a stream of items from the back end for * a query * @param countCallback * function that returns the number of items in the back end for * a query * @return a new callback data provider */ public static <T, F> CallbackDataProvider<T, F> fromFilteringCallbacks( FetchCallback<T, F> fetchCallback, CountCallback<T, F> countCallback) { return new CallbackDataProvider<>(fetchCallback, countCallback); }
3.68
hbase_CostFunction_postAction
/** * Called once per cluster Action to give the cost function an opportunity to update it's state. * postAction() is always called at least once before cost() is called with the cluster that this * action is performed on. */ void postAction(BalanceAction action) { switch (action.getType()) { case NULL: break; case ASSIGN_REGION: AssignRegionAction ar = (AssignRegionAction) action; regionMoved(ar.getRegion(), -1, ar.getServer()); break; case MOVE_REGION: MoveRegionAction mra = (MoveRegionAction) action; regionMoved(mra.getRegion(), mra.getFromServer(), mra.getToServer()); break; case SWAP_REGIONS: SwapRegionsAction a = (SwapRegionsAction) action; regionMoved(a.getFromRegion(), a.getFromServer(), a.getToServer()); regionMoved(a.getToRegion(), a.getToServer(), a.getFromServer()); break; default: throw new RuntimeException("Uknown action:" + action.getType()); } }
3.68
hadoop_RMWebAppUtil_createContainerLaunchContext
/** * Create the ContainerLaunchContext required for the * ApplicationSubmissionContext. This function takes the user information and * generates the ByteBuffer structures required by the ContainerLaunchContext * * @param newApp the information provided by the user * @return created context * @throws BadRequestException * @throws IOException */ private static ContainerLaunchContext createContainerLaunchContext( ApplicationSubmissionContextInfo newApp) throws BadRequestException, IOException { // create container launch context HashMap<String, ByteBuffer> hmap = new HashMap<String, ByteBuffer>(); for (Map.Entry<String, String> entry : newApp .getContainerLaunchContextInfo().getAuxillaryServiceData().entrySet()) { if (!entry.getValue().isEmpty()) { Base64 decoder = new Base64(0, null, true); byte[] data = decoder.decode(entry.getValue()); hmap.put(entry.getKey(), ByteBuffer.wrap(data)); } } HashMap<String, LocalResource> hlr = new HashMap<String, LocalResource>(); for (Map.Entry<String, LocalResourceInfo> entry : newApp .getContainerLaunchContextInfo().getResources().entrySet()) { LocalResourceInfo l = entry.getValue(); LocalResource lr = LocalResource.newInstance(URL.fromURI(l.getUrl()), l.getType(), l.getVisibility(), l.getSize(), l.getTimestamp()); hlr.put(entry.getKey(), lr); } DataOutputBuffer out = new DataOutputBuffer(); Credentials cs = createCredentials( newApp.getContainerLaunchContextInfo().getCredentials()); cs.writeTokenStorageToStream(out); ByteBuffer tokens = ByteBuffer.wrap(out.getData()); ContainerLaunchContext ctx = ContainerLaunchContext.newInstance(hlr, newApp.getContainerLaunchContextInfo().getEnvironment(), newApp.getContainerLaunchContextInfo().getCommands(), hmap, tokens, newApp.getContainerLaunchContextInfo().getAcls()); return ctx; }
3.68
hadoop_PseudoAuthenticator_getUserName
/** * Returns the current user name. * <p> * This implementation returns the value of the Java system property 'user.name' * * @return the current user name. */ protected String getUserName() { return System.getProperty("user.name"); }
3.68
morf_Function_coalesce
/** * Helper method to create an instance of the "coalesce" SQL function, * which will result in the first non-null argument. * * @param fields the fields to evaluate. * @return an instance of the coalesce function. */ public static Function coalesce(Iterable<? extends AliasedField> fields) { return new Function(FunctionType.COALESCE, fields); }
3.68
pulsar_NonPersistentTopicStatsImpl_add
// if the stats are added for the 1st time, we will need to make a copy of these stats and add it to the current // stats. This stat addition is not thread-safe. public NonPersistentTopicStatsImpl add(NonPersistentTopicStats ts) { NonPersistentTopicStatsImpl stats = (NonPersistentTopicStatsImpl) ts; Objects.requireNonNull(stats); super.add(stats); this.msgDropRate += stats.msgDropRate; List<NonPersistentPublisherStats> publisherStats = stats.getNonPersistentPublishers(); for (int index = 0; index < publisherStats.size(); index++) { NonPersistentPublisherStats s = publisherStats.get(index); if (s.isSupportsPartialProducer() && s.getProducerName() != null) { ((NonPersistentPublisherStatsImpl) this.nonPersistentPublishersMap .computeIfAbsent(s.getProducerName(), key -> { final NonPersistentPublisherStatsImpl newStats = new NonPersistentPublisherStatsImpl(); newStats.setSupportsPartialProducer(true); newStats.setProducerName(s.getProducerName()); return newStats; })).add((NonPersistentPublisherStatsImpl) s); } else { // Add a non-persistent publisher stat entry to this.nonPersistentPublishers // if this.nonPersistentPublishers.size() is smaller than // the input stats.nonPersistentPublishers.size(). // Here, index == this.nonPersistentPublishers.size() means // this.nonPersistentPublishers.size() is smaller than the input stats.nonPersistentPublishers.size() if (index == this.nonPersistentPublishers.size()) { NonPersistentPublisherStatsImpl newStats = new NonPersistentPublisherStatsImpl(); newStats.setSupportsPartialProducer(false); this.nonPersistentPublishers.add(newStats); } ((NonPersistentPublisherStatsImpl) this.nonPersistentPublishers.get(index)) .add((NonPersistentPublisherStatsImpl) s); } } for (Map.Entry<String, NonPersistentSubscriptionStats> entry : stats.getNonPersistentSubscriptions() .entrySet()) { NonPersistentSubscriptionStatsImpl subscriptionStats = (NonPersistentSubscriptionStatsImpl) this.getNonPersistentSubscriptions() .computeIfAbsent(entry.getKey(), k -> new NonPersistentSubscriptionStatsImpl()); subscriptionStats.add( (NonPersistentSubscriptionStatsImpl) entry.getValue()); } for (Map.Entry<String, NonPersistentReplicatorStats> entry : stats.getNonPersistentReplicators().entrySet()) { NonPersistentReplicatorStatsImpl replStats = (NonPersistentReplicatorStatsImpl) this.getNonPersistentReplicators().computeIfAbsent(entry.getKey(), k -> { NonPersistentReplicatorStatsImpl r = new NonPersistentReplicatorStatsImpl(); return r; }); replStats.add((NonPersistentReplicatorStatsImpl) entry.getValue()); } return this; }
3.68
flink_FileRegionWriteReadUtils_writeHsInternalRegionToFile
/** * Write {@link InternalRegion} to {@link FileChannel}. * * <p>Note that this type of region's length may be variable because it contains an array to * indicate each buffer's release state. * * @param channel the file's channel to write. * @param headerBuffer the buffer to write {@link InternalRegion}'s header. * @param region the region to be written to channel. */ public static void writeHsInternalRegionToFile( FileChannel channel, ByteBuffer headerBuffer, InternalRegion region) throws IOException { // write header buffer. headerBuffer.clear(); headerBuffer.putInt(region.getFirstBufferIndex()); headerBuffer.putInt(region.getNumBuffers()); headerBuffer.putLong(region.getRegionStartOffset()); headerBuffer.flip(); // write payload buffer. ByteBuffer payloadBuffer = allocateAndConfigureBuffer(region.getNumBuffers()); boolean[] released = region.getReleased(); for (boolean b : released) { payloadBuffer.put(b ? (byte) 1 : (byte) 0); } payloadBuffer.flip(); BufferReaderWriterUtil.writeBuffers( channel, headerBuffer.capacity() + payloadBuffer.capacity(), headerBuffer, payloadBuffer); }
3.68
hadoop_AzureFileSystemInstrumentation_getCurrentMaximumDownloadBandwidth
/** * Get the current maximum download bandwidth. * @return maximum download bandwidth in bytes per second. */ public long getCurrentMaximumDownloadBandwidth() { return currentMaximumDownloadBytesPerSecond; }
3.68
hbase_AbstractRpcClient_getPoolSize
/** * Return the pool size specified in the configuration, which is applicable only if the pool type * is {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#RoundRobin}. * @param config configuration * @return the maximum pool size */ private static int getPoolSize(Configuration config) { int poolSize = config.getInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, 1); if (poolSize <= 0) { LOG.warn("{} must be positive. Using default value: 1", HConstants.HBASE_CLIENT_IPC_POOL_SIZE); return 1; } else { return poolSize; } }
3.68
hadoop_TaskId_getId
/** * Getter method for TaskID. * @return TaskId: Task identifier */ public final long getId() { return taskId; }
3.68
hbase_KeyValueHeap_seek
/** * Seeks all scanners at or below the specified seek key. If we earlied-out of a row, we may end * up skipping values that were never reached yet. Rather than iterating down, we want to give the * opportunity to re-seek. * <p> * As individual scanners may run past their ends, those scanners are automatically closed and * removed from the heap. * <p> * This function (and {@link #reseek(Cell)}) does not do multi-column Bloom filter and lazy-seek * optimizations. To enable those, call {@link #requestSeek(Cell, boolean, boolean)}. * @param seekKey KeyValue to seek at or after * @return true if KeyValues exist at or after specified key, false if not */ @Override public boolean seek(Cell seekKey) throws IOException { return generalizedSeek(false, // This is not a lazy seek seekKey, false, // forward (false: this is not a reseek) false); // Not using Bloom filters }
3.68
graphhopper_ArrayUtil_withoutConsecutiveDuplicates
/** * Creates a copy of the given list where all consecutive duplicates are removed */ public static IntIndexedContainer withoutConsecutiveDuplicates(IntIndexedContainer arr) { IntArrayList result = new IntArrayList(); if (arr.isEmpty()) return result; int prev = arr.get(0); result.add(prev); for (int i = 1; i < arr.size(); i++) { int val = arr.get(i); if (val != prev) result.add(val); prev = val; } return result; }
3.68
querydsl_MetaDataExporter_setTargetFolder
/** * Set the target folder * * @param targetFolder target source folder to create the sources into * (e.g. target/generated-sources/java) */ public void setTargetFolder(File targetFolder) { this.targetFolder = targetFolder; }
3.68
hadoop_LocatedFileStatusFetcher_registerError
/** * Register fatal errors - example an IOException while accessing a file or a * full execution queue. */ private void registerError(Throwable t) { LOG.debug("Error", t); lock.lock(); try { if (unknownError == null) { unknownError = t; condition.signal(); } } finally { lock.unlock(); } }
3.68
hbase_NamespacesInstanceModel_toString
/* * (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{NAME => \'"); sb.append(namespaceName); sb.append("\'"); if (properties != null) { for (Map.Entry<String, String> entry : properties.entrySet()) { sb.append(", "); sb.append(entry.getKey()); sb.append(" => '"); sb.append(entry.getValue()); sb.append("\'"); } } sb.append("}"); return sb.toString(); }
3.68
dubbo_ReactorClientCalls_manyToOne
/** * Implements a stream -> unary call as Flux -> Mono * * @param invoker invoker * @param requestFlux the flux with request * @param methodDescriptor the method descriptor * @return the mono with response */ public static <TRequest, TResponse, TInvoker> Mono<TResponse> manyToOne( Invoker<TInvoker> invoker, Flux<TRequest> requestFlux, StubMethodDescriptor methodDescriptor) { try { ClientTripleReactorSubscriber<TRequest> clientSubscriber = requestFlux.subscribeWith(new ClientTripleReactorSubscriber<>()); ClientTripleReactorPublisher<TResponse> clientPublisher = new ClientTripleReactorPublisher<>( s -> clientSubscriber.subscribe((CallStreamObserver<TRequest>) s), clientSubscriber::cancel); return Mono.from(clientPublisher) .doOnSubscribe(dummy -> StubInvocationUtil.biOrClientStreamCall(invoker, methodDescriptor, clientPublisher)); } catch (Throwable throwable) { return Mono.error(throwable); } }
3.68
hibernate-validator_AnnotationParametersAbstractCheck_canCheckThisAnnotation
/** * Verify that this check class can process such annotation. * * @param annotation annotation you want to process by this class * @return {@code true} if such annotation can be processed, {@code false} otherwise. */ protected boolean canCheckThisAnnotation(AnnotationMirror annotation) { return annotationClasses.contains( annotation.getAnnotationType().asElement().toString() ); }
3.68
hadoop_ReadStatistics_getTotalBytesRead
/** * @return The total bytes read. This will always be at least as * high as the other numbers, since it includes all of them. */ public synchronized long getTotalBytesRead() { return totalBytesRead; }
3.68
hbase_WALActionsListener_postSync
/** * For notification post writer sync. Used by metrics system at least. * @param timeInNanos How long the filesystem sync took in nanoseconds. * @param handlerSyncs How many sync handler calls were released by this call to filesystem sync. */ default void postSync(final long timeInNanos, final int handlerSyncs) { }
3.68
hadoop_ConverterUtils_getYarnUrlFromPath
/* * This method is deprecated, use {@link URL#fromPath(Path)} instead. */ @Public @Deprecated public static URL getYarnUrlFromPath(Path path) { return URL.fromPath(path); }
3.68
hibernate-validator_ReflectionHelper_isList
/** * @param type the type to check. * * @return Returns {@code true} if {@code type} is implementing {@code List}, {@code false} otherwise. */ public static boolean isList(Type type) { if ( type instanceof Class && List.class.isAssignableFrom( (Class<?>) type ) ) { return true; } if ( type instanceof ParameterizedType ) { return isList( ( (ParameterizedType) type ).getRawType() ); } if ( type instanceof WildcardType ) { Type[] upperBounds = ( (WildcardType) type ).getUpperBounds(); return upperBounds.length != 0 && isList( upperBounds[0] ); } return false; }
3.68
dubbo_RpcStatus_getSucceededAverageElapsed
/** * get succeeded average elapsed. * * @return succeeded average elapsed */ public long getSucceededAverageElapsed() { long succeeded = getSucceeded(); if (succeeded == 0) { return 0; } return getSucceededElapsed() / succeeded; }
3.68
flink_ClusterClient_invalidateClusterDataset
/** * Invalidate the cached intermediate dataset with the given id. * * @param clusterDatasetId id of the cluster dataset to be invalidated. * @return Future which will be completed when the cached dataset is invalidated. */ default CompletableFuture<Void> invalidateClusterDataset(AbstractID clusterDatasetId) { return CompletableFuture.completedFuture(null); }
3.68
framework_AbstractContainer_setItemSetChangeListeners
/** * Sets the item set change listener collection. For internal use only. * * @param itemSetChangeListeners */ protected void setItemSetChangeListeners( Collection<Container.ItemSetChangeListener> itemSetChangeListeners) { this.itemSetChangeListeners = itemSetChangeListeners; }
3.68
flink_TableResultImpl_data
/** * Specifies an row list as the execution result. * * @param rowList a row list as the execution result. */ public Builder data(List<Row> rowList) { Preconditions.checkNotNull(rowList, "listRows should not be null"); this.resultProvider = new StaticResultProvider(rowList); return this; }
3.68
framework_Window_setParent
/* * (non-Javadoc) * * @see com.vaadin.ui.AbstractComponent#setParent(com.vaadin.server. * ClientConnector ) */ @Override public void setParent(HasComponents parent) { if (parent == null || parent instanceof UI) { super.setParent(parent); } else { throw new IllegalArgumentException( "A Window can only be added to a UI using UI.addWindow(Window window)"); } }
3.68
shardingsphere-elasticjob_ShardingContexts_createShardingContext
/** * Create sharding context. * * @param shardingItem sharding item * @return sharding context */ public ShardingContext createShardingContext(final int shardingItem) { return new ShardingContext(jobName, taskId, shardingTotalCount, jobParameter, shardingItem, shardingItemParameters.get(shardingItem)); }
3.68
hmily_AbstractConfig_setPassive
/** * Sets passive. * * @param passive the passive */ public void setPassive(final boolean passive) { this.passive = passive; }
3.68
querydsl_SQLExpressions_regrSlope
/** * REGR_SLOPE returns the slope of the line * * @param arg1 first arg * @param arg2 second arg * @return regr_slope(arg1, arg2) */ public static WindowOver<Double> regrSlope(Expression<? extends Number> arg1, Expression<? extends Number> arg2) { return new WindowOver<Double>(Double.class, SQLOps.REGR_SLOPE, arg1, arg2); }
3.68
flink_TimeUtils_toDuration
/** * Translates {@link Time} to {@link Duration}. * * @param time time to transform into duration * @return duration equal to the given time */ public static Duration toDuration(Time time) { return Duration.of(time.getSize(), toChronoUnit(time.getUnit())); }
3.68