name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hudi_BulkInsertWriterHelper_getInstantTime
/** * Returns the write instant time. */ public String getInstantTime() { return this.instantTime; }
3.68
hadoop_DeSelectFields_obtainType
/** * Obtain the <code>DeSelectType</code> by the literals given behind * <code>deSelects</code> in URL. * <br> e.g: deSelects="resourceRequests" * @param literals e.g: resourceRequests * @return <code>DeSelectType</code> e.g: DeSelectType.RESOURCE_REQUESTS */ public static DeSelectType obtainType(String literals) { for (DeSelectType type : values()) { if (type.literals.equalsIgnoreCase(literals)) { return type; } } return null; }
3.68
flink_Costs_addCpuCost
/** * Adds the given CPU cost to the current CPU cost for this Costs object. * * @param cost The CPU cost to add. */ public void addCpuCost(double cost) { this.cpuCost = (this.cpuCost < 0 || cost < 0) ? UNKNOWN : this.cpuCost + cost; }
3.68
hbase_ScannerModel_setBatch
/** * @param batch the number of cells to return in batch */ public void setBatch(int batch) { this.batch = batch; }
3.68
hbase_MemStoreFlusher_interruptIfNecessary
/** * Only interrupt once it's done with a run through the work loop. */ void interruptIfNecessary() { lock.writeLock().lock(); try { for (FlushHandler flushHandler : flushHandlers) { if (flushHandler != null) { flushHandler.interrupt(); } } } finally { lock.writeLock().unlock(); } }
3.68
morf_AddColumn_accept
/** * @see org.alfasoftware.morf.upgrade.SchemaChange#accept(org.alfasoftware.morf.upgrade.SchemaChangeVisitor) */ @Override public void accept(SchemaChangeVisitor visitor) { visitor.visit(this); }
3.68
hibernate-validator_ServiceLoaderBasedConstraintMappingContributor_run
/** * Runs the given privileged action, using a privileged block if required. * <p> * <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary * privileged actions within HV's protection domain. */ @IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17") private <T> T run(PrivilegedAction<T> action) { return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run(); }
3.68
graphhopper_InstructionsOutgoingEdges_outgoingEdgesAreSlowerByFactor
/** * Checks if the outgoing edges are slower by the provided factor. If they are, this indicates, that we are staying * on the prominent street that one would follow anyway. */ public boolean outgoingEdgesAreSlowerByFactor(double factor) { double tmpSpeed = getSpeed(currentEdge); double pathSpeed = getSpeed(prevEdge); // speed change indicates that we change road types if (Math.abs(pathSpeed - tmpSpeed) >= 1) { return false; } double maxSurroundingSpeed = -1; for (EdgeIteratorState edge : allowedAlternativeTurns) { tmpSpeed = getSpeed(edge); if (tmpSpeed > maxSurroundingSpeed) { maxSurroundingSpeed = tmpSpeed; } } // surrounding streets need to be slower by a factor and call round() so that tiny differences are ignored return Math.round(maxSurroundingSpeed * factor) < Math.round(pathSpeed); }
3.68
morf_DatabaseMetaDataProvider_readIndexName
/** * Retrieves index name from a result set. * * @param indexResultSet Result set to be read. * @return Name of the index. * @throws SQLException Upon errors. */ protected RealName readIndexName(ResultSet indexResultSet) throws SQLException { String indexName = indexResultSet.getString(INDEX_NAME); return createRealName(indexName, indexName); }
3.68
flink_ZooKeeperStateHandleStore_normalizePath
/** * Makes sure that every path starts with a "/". * * @param path Path to normalize * @return Normalized path such that it starts with a "/" */ private static String normalizePath(String path) { if (path.startsWith("/")) { return path; } else { return '/' + path; } }
3.68
hbase_HMaster_getNamespaces
/** * Get all Namespaces * @return All Namespace descriptors */ List<NamespaceDescriptor> getNamespaces() throws IOException { checkInitialized(); final List<NamespaceDescriptor> nsds = new ArrayList<>(); if (cpHost != null) { cpHost.preListNamespaceDescriptors(nsds); } nsds.addAll(this.clusterSchemaService.getNamespaces()); if (this.cpHost != null) { this.cpHost.postListNamespaceDescriptors(nsds); } return nsds; }
3.68
framework_VCaption_setCaptionText
/** * Sets the text of the given widget to the caption found in the state. * <p> * Uses {@link AbstractComponentState#captionAsHtml} to determine whether to * set the caption as html or plain text * * @since 7.4 * @param widget * the target widget * @param state * the state from which to read the caption text and mode */ public static void setCaptionText(HasHTML widget, AbstractComponentState state) { if (state.captionAsHtml) { widget.setHTML(state.caption); } else { widget.setText(state.caption); } }
3.68
flink_BlobClient_putInputStream
/** * Uploads data from the given input stream to the BLOB server. * * @param jobId the ID of the job the BLOB belongs to (or <tt>null</tt> if job-unrelated) * @param inputStream the input stream to read the data from * @param blobType whether the BLOB should become permanent or transient * @return the computed BLOB key of the uploaded BLOB * @throws IOException thrown if an I/O error occurs while uploading the data to the BLOB server */ BlobKey putInputStream( @Nullable JobID jobId, InputStream inputStream, BlobKey.BlobType blobType) throws IOException { if (this.socket.isClosed()) { throw new IllegalStateException( "BLOB Client is not connected. " + "Client has been shut down or encountered an error before."); } checkNotNull(inputStream); if (LOG.isDebugEnabled()) { LOG.debug("PUT BLOB stream to {}.", socket.getLocalSocketAddress()); } try (BlobOutputStream os = new BlobOutputStream(jobId, blobType, socket)) { IOUtils.copyBytes(inputStream, os, BUFFER_SIZE, false); return os.finish(); } catch (Throwable t) { BlobUtils.closeSilently(socket, LOG); throw new IOException("PUT operation failed: " + t.getMessage(), t); } }
3.68
hbase_ProcedureExecutor_getWorkerThreadCount
/** Returns the current number of worker threads. */ public int getWorkerThreadCount() { return workerThreads.size(); }
3.68
hadoop_DiskBalancerException_getResult
/** * Returns the result. * @return int */ public Result getResult() { return result; }
3.68
hudi_BinaryUtil_toBytes
/** * Copies {@link ByteBuffer} into allocated {@code byte[]} array */ public static byte[] toBytes(ByteBuffer buffer) { byte[] bytes = new byte[buffer.remaining()]; buffer.get(bytes); return bytes; }
3.68
hadoop_ConfigRedactor_redactXml
/** * Given a key / value pair, decides whether or not to redact and returns * either the original value or text indicating it has been redacted. * * @param key param key. * @param value param value, will return if conditions permit. * @return Original value, or text indicating it has been redacted */ public String redactXml(String key, String value) { if (configIsSensitive(key)) { return REDACTED_XML; } return value; }
3.68
hadoop_ServiceLauncher_createGenericOptionsParser
/** * Override point: create a generic options parser or subclass thereof. * @param conf Hadoop configuration * @param argArray array of arguments * @return a generic options parser to parse the arguments * @throws IOException on any failure */ protected GenericOptionsParser createGenericOptionsParser(Configuration conf, String[] argArray) throws IOException { return new MinimalGenericOptionsParser(conf, commandOptions, argArray); }
3.68
flink_ExecEdge_translateToPlan
/** * Translates this edge into a Flink operator. * * @param planner The {@link Planner} of the translated Table. */ public Transformation<?> translateToPlan(Planner planner) { return source.translateToPlan(planner); }
3.68
zxing_HighLevelEncoder_updateStateForChar
// Return a set of states that represent the possible ways of updating this // state for the next character. The resulting set of states are added to // the "result" list. private void updateStateForChar(State state, int index, Collection<State> result) { char ch = (char) (text[index] & 0xFF); boolean charInCurrentTable = CHAR_MAP[state.getMode()][ch] > 0; State stateNoBinary = null; for (int mode = 0; mode <= MODE_PUNCT; mode++) { int charInMode = CHAR_MAP[mode][ch]; if (charInMode > 0) { if (stateNoBinary == null) { // Only create stateNoBinary the first time it's required. stateNoBinary = state.endBinaryShift(index); } // Try generating the character by latching to its mode if (!charInCurrentTable || mode == state.getMode() || mode == MODE_DIGIT) { // If the character is in the current table, we don't want to latch to // any other mode except possibly digit (which uses only 4 bits). Any // other latch would be equally successful *after* this character, and // so wouldn't save any bits. State latchState = stateNoBinary.latchAndAppend(mode, charInMode); result.add(latchState); } // Try generating the character by switching to its mode. if (!charInCurrentTable && SHIFT_TABLE[state.getMode()][mode] >= 0) { // It never makes sense to temporarily shift to another mode if the // character exists in the current mode. That can never save bits. State shiftState = stateNoBinary.shiftAndAppend(mode, charInMode); result.add(shiftState); } } } if (state.getBinaryShiftByteCount() > 0 || CHAR_MAP[state.getMode()][ch] == 0) { // It's never worthwhile to go into binary shift mode if you're not already // in binary shift mode, and the character exists in your current mode. // That can never save bits over just outputting the char in the current mode. State binaryState = state.addBinaryShiftChar(index); result.add(binaryState); } }
3.68
hbase_MetricSampleQuantiles_getCount
/** * Returns the number of items that the estimator has processed * @return count total number of items processed */ synchronized public long getCount() { return count; }
3.68
dubbo_TypeDefinition_formatType
/** * Format the {@link String} presenting Java type * * @param type the String presenting type * @return new String presenting Java type after be formatted * @since 2.7.9 */ public static String formatType(String type) { if (isGenericType(type)) { return formatGenericType(type); } return type; }
3.68
hmily_AggregateBinder_getEnv
/** * Gets env. * * @return the env */ Binder.Env getEnv() { return env; }
3.68
framework_TypeDataStore_getOnStateChangeMethods
/** * Gets data for all methods annotated with {@link OnStateChange} in the * given connector type. * * @since 7.2 * @param type * the connector type * @return a map of state property names to handler method data */ public static FastStringMap<JsArrayObject<OnStateChangeMethod>> getOnStateChangeMethods( Class<?> type) { return get().onStateChangeMethods.get(getType(type).getSignature()); }
3.68
flink_KvStateLocation_getKvStateID
/** * Returns the registered KvStateID for the key group index or <code>null</code> if none is * registered yet. * * @param keyGroupIndex Key group index to get ID for. * @return KvStateID for the key group index or <code>null</code> if none is registered yet * @throws IndexOutOfBoundsException If key group index < 0 or >= Number of key groups */ public KvStateID getKvStateID(int keyGroupIndex) { if (keyGroupIndex < 0 || keyGroupIndex >= numKeyGroups) { throw new IndexOutOfBoundsException("Key group index"); } return kvStateIds[keyGroupIndex]; }
3.68
hmily_GrpcHmilyClient_syncInvoke
/** * grpc sync. * * @param <T> T * @param clazz clazz * @param abstractStub AbstractStub * @param method String * @param param Object * @return t T */ public <T> T syncInvoke(final AbstractStub abstractStub, final String method, final Object param, final Class<T> clazz) { GrpcHmilyContext.getHmilyFailContext().remove(); GrpcHmilyContext.getHmilyClass().set(new GrpcInvokeContext(new Object[]{abstractStub, method, param, clazz})); if (SingletonHolder.INST.get(GrpcHmilyClient.class) == null) { SingletonHolder.INST.register(GrpcHmilyClient.class, this); } for (Method m : abstractStub.getClass().getMethods()) { if (m.getName().equals(method)) { try { T res = (T) m.invoke(abstractStub, m.getParameterTypes()[0].cast(param)); if (GrpcHmilyContext.getHmilyFailContext().get() != null) { throw new HmilyRuntimeException(); } return res; } catch (Exception e) { LOGGER.error("failed to invoke grpc server"); throw new HmilyRuntimeException(); } } } return null; }
3.68
flink_MemoryLogger_getDirectMemoryStatsAsString
/** * Returns a String with the <strong>direct</strong> memory footprint. * * <p>These stats are not part of the other memory beans. * * @param bufferPoolMxBean The direct buffer pool bean or <code>null</code> if none available. * @return A string with the count, total capacity, and used direct memory. */ public static String getDirectMemoryStatsAsString(BufferPoolMXBean bufferPoolMxBean) { if (bufferPoolMxBean == null) { return "Direct memory stats: unavailable"; } else { return String.format( "Direct memory stats: Count: %d, Total Capacity: %d, Used Memory: %d", bufferPoolMxBean.getCount(), bufferPoolMxBean.getTotalCapacity(), bufferPoolMxBean.getMemoryUsed()); } }
3.68
framework_AbstractClientConnector_fireEvent
/** * Sends the event to all listeners. * * @param event * the Event to be sent to all listeners. */ protected void fireEvent(EventObject event) { if (eventRouter != null) { eventRouter.fireEvent(event); } }
3.68
framework_ComputedStyle_getPaddingHeight
/** * Returns the sum of the top and bottom padding. * * @since 7.5.3 * @return the sum of the top and bottom padding */ public double getPaddingHeight() { double paddingHeight = getDoubleProperty("paddingTop"); paddingHeight += getDoubleProperty("paddingBottom"); return paddingHeight; }
3.68
hadoop_NMTokenCache_getSingleton
/** * Returns the singleton NM token cache. * * @return the singleton NM token cache. */ public static NMTokenCache getSingleton() { return NM_TOKEN_CACHE; }
3.68
hudi_HoodieFlinkCopyOnWriteTable_bulkInsertPrepped
/** * Bulk inserts the given prepared records into the Hoodie table, at the supplied instantTime. * * <p>This implementation requires that the input records are already tagged, and de-duped if needed. * * <p>Specifies the write handle explicitly in order to have fine-grained control with * the underneath file. * * @param context HoodieEngineContext * @param instantTime Instant Time for the action * @param preppedRecords Hoodie records to bulk_insert * @return HoodieWriteMetadata */ public HoodieWriteMetadata<List<WriteStatus>> bulkInsertPrepped( HoodieEngineContext context, HoodieWriteHandle<?, ?, ?, ?> writeHandle, String instantTime, List<HoodieRecord<T>> preppedRecords) { return new FlinkBulkInsertPreppedCommitActionExecutor<>(context, writeHandle, config, this, instantTime, preppedRecords).execute(); }
3.68
flink_YarnClusterDescriptor_deployInternal
/** * This method will block until the ApplicationMaster/JobManager have been deployed on YARN. * * @param clusterSpecification Initial cluster specification for the Flink cluster to be * deployed * @param applicationName name of the Yarn application to start * @param yarnClusterEntrypoint Class name of the Yarn cluster entry point. * @param jobGraph A job graph which is deployed with the Flink cluster, {@code null} if none * @param detached True if the cluster should be started in detached mode */ private ClusterClientProvider<ApplicationId> deployInternal( ClusterSpecification clusterSpecification, String applicationName, String yarnClusterEntrypoint, @Nullable JobGraph jobGraph, boolean detached) throws Exception { final UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); if (HadoopUtils.isKerberosSecurityEnabled(currentUser)) { boolean useTicketCache = flinkConfiguration.getBoolean(SecurityOptions.KERBEROS_LOGIN_USETICKETCACHE); if (!HadoopUtils.areKerberosCredentialsValid(currentUser, useTicketCache)) { throw new RuntimeException( "Hadoop security with Kerberos is enabled but the login user " + "does not have Kerberos credentials or delegation tokens!"); } final boolean fetchToken = flinkConfiguration.getBoolean(SecurityOptions.KERBEROS_FETCH_DELEGATION_TOKEN); final boolean yarnAccessFSEnabled = !CollectionUtil.isNullOrEmpty( flinkConfiguration.get( SecurityOptions.KERBEROS_HADOOP_FILESYSTEMS_TO_ACCESS)); if (!fetchToken && yarnAccessFSEnabled) { throw new IllegalConfigurationException( String.format( "When %s is disabled, %s must be disabled as well.", SecurityOptions.KERBEROS_FETCH_DELEGATION_TOKEN.key(), SecurityOptions.KERBEROS_HADOOP_FILESYSTEMS_TO_ACCESS.key())); } } isReadyForDeployment(clusterSpecification); // ------------------ Check if the specified queue exists -------------------- checkYarnQueues(yarnClient); // ------------------ Check if the YARN ClusterClient has the requested resources // -------------- // Create application via yarnClient final YarnClientApplication yarnApplication = yarnClient.createApplication(); final GetNewApplicationResponse appResponse = yarnApplication.getNewApplicationResponse(); Resource maxRes = appResponse.getMaximumResourceCapability(); final ClusterResourceDescription freeClusterMem; try { freeClusterMem = getCurrentFreeClusterResources(yarnClient); } catch (YarnException | IOException e) { failSessionDuringDeployment(yarnClient, yarnApplication); throw new YarnDeploymentException( "Could not retrieve information about free cluster resources.", e); } final int yarnMinAllocationMB = yarnConfiguration.getInt( YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB); if (yarnMinAllocationMB <= 0) { throw new YarnDeploymentException( "The minimum allocation memory " + "(" + yarnMinAllocationMB + " MB) configured via '" + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB + "' should be greater than 0."); } final ClusterSpecification validClusterSpecification; try { validClusterSpecification = validateClusterResources( clusterSpecification, yarnMinAllocationMB, maxRes, freeClusterMem); } catch (YarnDeploymentException yde) { failSessionDuringDeployment(yarnClient, yarnApplication); throw yde; } LOG.info("Cluster specification: {}", validClusterSpecification); final ClusterEntrypoint.ExecutionMode executionMode = detached ? ClusterEntrypoint.ExecutionMode.DETACHED : ClusterEntrypoint.ExecutionMode.NORMAL; flinkConfiguration.setString( ClusterEntrypoint.INTERNAL_CLUSTER_EXECUTION_MODE, executionMode.toString()); ApplicationReport report = startAppMaster( flinkConfiguration, applicationName, yarnClusterEntrypoint, jobGraph, yarnClient, yarnApplication, validClusterSpecification); // print the application id for user to cancel themselves. if (detached) { final ApplicationId yarnApplicationId = report.getApplicationId(); logDetachedClusterInformation(yarnApplicationId, LOG); } setClusterEntrypointInfoToConfig(report); return () -> { try { return new RestClusterClient<>(flinkConfiguration, report.getApplicationId()); } catch (Exception e) { throw new RuntimeException("Error while creating RestClusterClient.", e); } }; }
3.68
morf_DataSetAdapter_open
/** * @see org.alfasoftware.morf.dataset.DataSetConsumer#open() */ @Override public void open() { consumer.open(); }
3.68
flink_TemporalRowTimeJoinOperator_cleanupExpiredVersionInState
/** * Removes all expired version in the versioned table's state according to current watermark. */ private void cleanupExpiredVersionInState(long currentWatermark, List<RowData> rightRowsSorted) throws Exception { int i = 0; int indexToKeep = firstIndexToKeep(currentWatermark, rightRowsSorted); // clean old version data that behind current watermark while (i < indexToKeep) { long rightTime = getRightTime(rightRowsSorted.get(i)); rightState.remove(rightTime); i += 1; } }
3.68
hbase_RegionPlacementMaintainer_updateAssignmentPlanToRegionServers
/** * Update the assignment plan to all the region servers */ private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws IOException { LOG.info("Start to update the region servers with the new assignment plan"); // Get the region to region server map Map<ServerName, List<RegionInfo>> currentAssignment = this.getRegionAssignmentSnapshot().getRegionServerToRegionMap(); // track of the failed and succeeded updates int succeededNum = 0; Map<ServerName, Exception> failedUpdateMap = new HashMap<>(); for (Map.Entry<ServerName, List<RegionInfo>> entry : currentAssignment.entrySet()) { List<Pair<RegionInfo, List<ServerName>>> regionUpdateInfos = new ArrayList<>(); try { // Keep track of the favored updates for the current region server FavoredNodesPlan singleServerPlan = null; // Find out all the updates for the current region server for (RegionInfo region : entry.getValue()) { List<ServerName> favoredServerList = plan.getFavoredNodes(region); if ( favoredServerList != null && favoredServerList.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM ) { // Create the single server plan if necessary if (singleServerPlan == null) { singleServerPlan = new FavoredNodesPlan(); } // Update the single server update singleServerPlan.updateFavoredNodesMap(region, favoredServerList); regionUpdateInfos.add(new Pair<>(region, favoredServerList)); } } if (singleServerPlan != null) { // Update the current region server with its updated favored nodes AsyncRegionServerAdmin rsAdmin = getConnection().getRegionServerAdmin(entry.getKey()); UpdateFavoredNodesRequest request = RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos); UpdateFavoredNodesResponse updateFavoredNodesResponse = FutureUtils.get(rsAdmin.updateFavoredNodes(request)); LOG.info("Region server " + FutureUtils.get(rsAdmin.getServerInfo(RequestConverter.buildGetServerInfoRequest())) .getServerInfo() + " has updated " + updateFavoredNodesResponse.getResponse() + " / " + singleServerPlan.size() + " regions with the assignment plan"); succeededNum++; } } catch (Exception e) { failedUpdateMap.put(entry.getKey(), e); } } // log the succeeded updates LOG.info("Updated " + succeededNum + " region servers with " + "the new assignment plan"); // log the failed updates int failedNum = failedUpdateMap.size(); if (failedNum != 0) { LOG.error("Failed to update the following + " + failedNum + " region servers with its corresponding favored nodes"); for (Map.Entry<ServerName, Exception> entry : failedUpdateMap.entrySet()) { LOG.error("Failed to update " + entry.getKey().getAddress() + " because of " + entry.getValue().getMessage()); } } }
3.68
flink_TestSignalHandler_register
/** Register some signal handlers. */ public static void register() { synchronized (TestSignalHandler.class) { if (registered) { return; } registered = true; final String[] signals = System.getProperty("os.name").startsWith("Windows") ? new String[] {"TERM", "INT"} : new String[] {"TERM", "HUP", "INT"}; for (String signalName : signals) { try { new Handler(signalName); } catch (Exception e) { LOG.info("Error while registering signal handler", e); } } } }
3.68
flink_DeletePushDownUtils_getResolvedFilterExpressions
/** * Get the resolved filter expressions from the {@code WHERE} clause in DELETE statement, return * Optional.empty() if {@code WHERE} clause contains sub-query. */ public static Optional<List<ResolvedExpression>> getResolvedFilterExpressions( LogicalTableModify tableModify) { FlinkContext context = ShortcutUtils.unwrapContext(tableModify.getCluster()); RelNode input = tableModify.getInput().getInput(0); // no WHERE clause, return an empty list if (input instanceof LogicalTableScan) { return Optional.of(Collections.emptyList()); } if (!(input instanceof LogicalFilter)) { return Optional.empty(); } Filter filter = (Filter) input; if (RexUtil.SubQueryFinder.containsSubQuery(filter)) { return Optional.empty(); } // optimize the filter filter = prepareFilter(filter); // resolve the filter to get resolved expression List<ResolvedExpression> resolveExpression = resolveFilter(context, filter); return Optional.ofNullable(resolveExpression); }
3.68
hadoop_Chain_getReducer
/** * Returns the Reducer instance in the chain. * * @return the Reducer instance in the chain or NULL if none. */ Reducer<?, ?, ?, ?> getReducer() { return reducer; }
3.68
flink_PythonConfigUtil_processSideOutput
/** * Process {@link SideOutputTransformation}s, set the {@link OutputTag}s into the Python * corresponding operator to make it aware of the {@link OutputTag}s. */ private static void processSideOutput(List<Transformation<?>> transformations) { final Set<Transformation<?>> visitedTransforms = Sets.newIdentityHashSet(); final Queue<Transformation<?>> queue = Queues.newArrayDeque(transformations); while (!queue.isEmpty()) { Transformation<?> transform = queue.poll(); visitedTransforms.add(transform); if (transform instanceof SideOutputTransformation) { final SideOutputTransformation<?> sideTransform = (SideOutputTransformation<?>) transform; final Transformation<?> upTransform = Iterables.getOnlyElement(sideTransform.getInputs()); if (PythonConfigUtil.isPythonDataStreamOperator(upTransform)) { final DataStreamPythonFunctionOperator<?> upOperator = (DataStreamPythonFunctionOperator<?>) ((SimpleOperatorFactory<?>) getOperatorFactory(upTransform)) .getOperator(); upOperator.addSideOutputTags( Collections.singletonList(sideTransform.getOutputTag())); } } for (Transformation<?> upTransform : transform.getInputs()) { if (!visitedTransforms.contains(upTransform)) { queue.add(upTransform); } } } }
3.68
hbase_CompactionConfiguration_getMaxFilesToCompact
/** Returns upper bound on number of files to be included in minor compactions */ public int getMaxFilesToCompact() { return maxFilesToCompact; }
3.68
hbase_Scan_getReadType
/** Returns the read type for this scan */ public ReadType getReadType() { return readType; }
3.68
hudi_Pair_hashCode
/** * <p> * Returns a suitable hash code. The hash code follows the definition in {@code Map.Entry}. * </p> * * @return the hash code */ @Override public int hashCode() { // see Map.Entry API specification return (getKey() == null ? 0 : getKey().hashCode()) ^ (getValue() == null ? 0 : getValue().hashCode()); }
3.68
pulsar_ObjectMapperFactory_getThreadLocal
/** * This method is deprecated. Use {@link #getMapper()} and {@link MapperReference#getObjectMapper()} */ @Deprecated public static ObjectMapper getThreadLocal() { return getMapper().getObjectMapper(); }
3.68
hadoop_BlockData_getBlockNumber
/** * Gets the id of the block that contains the given absolute offset. * @param offset the absolute offset to check. * @return the id of the block that contains the given absolute offset. * @throws IllegalArgumentException if offset is invalid. */ public int getBlockNumber(long offset) { throwIfInvalidOffset(offset); return (int) (offset / blockSize); }
3.68
incubator-hugegraph-toolchain_SplicingIdGenerator_concat
/** * Concat multiple ids into one composite id with IDS_SPLITOR * * @param ids the string id values to be contacted * @return contacted string value */ public static String concat(String... ids) { // NOTE: must support string id when using this method return IdUtil.escape(IDS_SPLITOR, ESCAPE, ids); }
3.68
flink_HandlerUtils_sendResponse
/** * Sends the given response and status code to the given channel. * * @param channelHandlerContext identifying the open channel * @param keepAlive If the connection should be kept alive. * @param message which should be sent * @param statusCode of the message to send * @param headers additional header values */ public static CompletableFuture<Void> sendResponse( @Nonnull ChannelHandlerContext channelHandlerContext, boolean keepAlive, @Nonnull String message, @Nonnull HttpResponseStatus statusCode, @Nonnull Map<String, String> headers) { HttpResponse response = new DefaultHttpResponse(HTTP_1_1, statusCode); response.headers().set(CONTENT_TYPE, RestConstants.REST_CONTENT_TYPE); for (Map.Entry<String, String> headerEntry : headers.entrySet()) { response.headers().set(headerEntry.getKey(), headerEntry.getValue()); } if (keepAlive) { response.headers().set(CONNECTION, HttpHeaderValues.KEEP_ALIVE); } byte[] buf = message.getBytes(ConfigConstants.DEFAULT_CHARSET); ByteBuf b = Unpooled.copiedBuffer(buf); HttpUtil.setContentLength(response, buf.length); // write the initial line and the header. channelHandlerContext.write(response); channelHandlerContext.write(b); ChannelFuture lastContentFuture = channelHandlerContext.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT); // close the connection, if no keep-alive is needed if (!keepAlive) { lastContentFuture.addListener(ChannelFutureListener.CLOSE); } return toCompletableFuture(lastContentFuture); }
3.68
hadoop_EditLogBackupInputStream_length
/** * Number of bytes read from the stream so far. */ int length() { return count; }
3.68
flink_CheckpointStatsCounts_getNumberOfFailedCheckpoints
/** * Returns the number of failed checkpoints. * * @return Number of failed checkpoints. */ public long getNumberOfFailedCheckpoints() { return numFailedCheckpoints; }
3.68
hadoop_ManifestSuccessData_serializer
/** * Get a JSON serializer for this class. * @return a serializer. */ public static JsonSerialization<ManifestSuccessData> serializer() { return new JsonSerialization<>(ManifestSuccessData.class, false, true); }
3.68
hbase_HFileCleaner_deleteFile
/** * Construct an {@link HFileDeleteTask} for each file to delete and add into the correct queue * @param file the file to delete * @return HFileDeleteTask to track progress */ private HFileDeleteTask deleteFile(FileStatus file) { HFileDeleteTask task = new HFileDeleteTask(file, cleanerThreadTimeoutMsec); boolean enqueued = dispatch(task); return enqueued ? task : null; }
3.68
hmily_AbstractHmilySQLParserExecutor_generateHmilyDeleteStatement
/** * Generate Hmily delete statement. * * @param deleteStatement delete statement * @param hmilyDeleteStatement hmily delete statement * @return hmily delete statement */ public HmilyDeleteStatement generateHmilyDeleteStatement(final DeleteStatement deleteStatement, final HmilyDeleteStatement hmilyDeleteStatement) { return DeleteStatementAssembler.assembleHmilyDeleteStatement(deleteStatement, hmilyDeleteStatement); }
3.68
hbase_CatalogFamilyFormat_getTableState
/** * Decode table state from META Result. Should contain cell from HConstants.TABLE_FAMILY * @return null if not found */ @Nullable public static TableState getTableState(Result r) throws IOException { Cell cell = r.getColumnLatestCell(HConstants.TABLE_FAMILY, HConstants.TABLE_STATE_QUALIFIER); if (cell == null) { return null; } try { return TableState.parseFrom(TableName.valueOf(r.getRow()), Arrays.copyOfRange(cell.getValueArray(), cell.getValueOffset(), cell.getValueOffset() + cell.getValueLength())); } catch (DeserializationException e) { throw new IOException(e); } }
3.68
hadoop_HdfsNamedFileStatus_getStoragePolicy
/** @return the storage policy id */ @Override public byte getStoragePolicy() { return storagePolicy; }
3.68
hadoop_StateStoreMetrics_setCacheSize
/** * Set the size of the cache for a State Store interface. * * @param name Name of the record to cache. * @param size Number of records. */ public void setCacheSize(String name, int size) { String counterName = "Cache" + name + "Size"; MutableGaugeInt counter = cacheSizes.get(counterName); if (counter == null) { counter = registry.newGauge(counterName, name, size); cacheSizes.put(counterName, counter); } counter.set(size); }
3.68
morf_Join_deepCopy
/** * @see org.alfasoftware.morf.util.DeepCopyableWithTransformation#deepCopy(org.alfasoftware.morf.util.DeepCopyTransformation) */ @Override public Builder<Join> deepCopy(DeepCopyTransformation transformer) { return TempTransitionalBuilderWrapper.wrapper(new Join(this,transformer)); }
3.68
flink_MutableHashTable_spillPartition
/** * Selects a partition and spills it. The number of the spilled partition is returned. * * @return The number of the spilled partition. */ protected int spillPartition() throws IOException { // find the largest partition ArrayList<HashPartition<BT, PT>> partitions = this.partitionsBeingBuilt; int largestNumBlocks = 0; int largestPartNum = -1; for (int i = 0; i < partitions.size(); i++) { HashPartition<BT, PT> p = partitions.get(i); if (p.isInMemory() && p.getNumOccupiedMemorySegments() > largestNumBlocks) { largestNumBlocks = p.getNumOccupiedMemorySegments(); largestPartNum = i; } } final HashPartition<BT, PT> p = partitions.get(largestPartNum); if (useBloomFilters) { buildBloomFilterForBucketsInPartition(largestPartNum, p); } // spill the partition int numBuffersFreed = p.spillPartition( this.availableMemory, this.ioManager, this.currentEnumerator.next(), this.writeBehindBuffers); this.writeBehindBuffersAvailable += numBuffersFreed; // grab as many buffers as are available directly MemorySegment currBuff; while (this.writeBehindBuffersAvailable > 0 && (currBuff = this.writeBehindBuffers.poll()) != null) { this.availableMemory.add(currBuff); this.writeBehindBuffersAvailable--; } return largestPartNum; }
3.68
hadoop_ScriptBasedNodeLabelsProvider_serviceInit
/* * Method which initializes the values for the script path and interval time. */ @Override protected void serviceInit(Configuration conf) throws Exception { String nodeLabelsScriptPath = conf.get(YarnConfiguration.NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_PATH); long scriptTimeout = conf.getLong(YarnConfiguration.NM_NODE_LABELS_PROVIDER_FETCH_TIMEOUT_MS, YarnConfiguration.DEFAULT_NM_NODE_LABELS_PROVIDER_FETCH_TIMEOUT_MS); String[] scriptArgs = conf.getStrings( YarnConfiguration.NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_SCRIPT_OPTS, new String[] {}); verifyConfiguredScript(nodeLabelsScriptPath); long taskInterval = conf.getLong( YarnConfiguration.NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS); this.setIntervalTime(taskInterval); this.runner = new NodeLabelScriptRunner(nodeLabelsScriptPath, scriptArgs, scriptTimeout, this); super.serviceInit(conf); }
3.68
hadoop_AbfsDelegationTokenManager_getDelegationToken
/** * Get a delegation token by invoking * {@link CustomDelegationTokenManager#getDelegationToken(String)}. * If the token returned already has a Kind; that is used. * If not, then the token kind is set to * {@link AbfsDelegationTokenIdentifier#TOKEN_KIND}, which implicitly * resets any token renewer class. * @param renewer the principal permitted to renew the token. * @return a token for the filesystem. * @throws IOException failure. */ public Token<DelegationTokenIdentifier> getDelegationToken( String renewer) throws IOException { LOG.debug("Requesting Delegation token for {}", renewer); Token<DelegationTokenIdentifier> token = tokenManager.getDelegationToken(renewer); if (token.getKind() == null) { // if a token type is not set, use the default. // note: this also sets the renewer to null. token.setKind(AbfsDelegationTokenIdentifier.TOKEN_KIND); } return token; }
3.68
hadoop_OBSDataBlocks_getOwner
/** * Owner. * * @return obsFileSystem instance */ protected OBSFileSystem getOwner() { return owner; }
3.68
dubbo_ProviderConfig_setDispather
/** * typo, switch to use {@link #getDispatcher()} * * @deprecated {@link #setDispatcher(String)} */ @Deprecated public void setDispather(String dispather) { setDispatcher(dispather); }
3.68
pulsar_BrokerInterceptor_onMessagePublish
/** * Intercept message when broker receive a send request. * * @param headersAndPayload entry's header and payload * @param publishContext Publish Context */ default void onMessagePublish(Producer producer, ByteBuf headersAndPayload, Topic.PublishContext publishContext) { }
3.68
framework_Potus_setFirstName
/** * @param firstName * the firstName to set */ public void setFirstName(String firstName) { this.firstName = firstName; }
3.68
querydsl_PathBuilder_get
/** * Create a new Time typed path * * @param <A> * @param path existing path * @return property path */ @SuppressWarnings("unchecked") public <A extends Comparable<?>> TimePath<A> get(TimePath<A> path) { TimePath<A> newPath = getTime(toString(path), (Class<A>) path.getType()); return addMetadataOf(newPath, path); }
3.68
hadoop_BlockManagerParameters_getFuturePool
/** * @return The Executor future pool to perform async prefetch tasks. */ public ExecutorServiceFuturePool getFuturePool() { return futurePool; }
3.68
hadoop_ItemInfo_isDir
/** * Returns true if the tracking path is a directory, false otherwise. */ public boolean isDir() { return !(startPathId == fileId); }
3.68
hbase_HFileReaderImpl_positionThisBlockBuffer
/** * Set the position on current backing blockBuffer. */ private void positionThisBlockBuffer() { try { blockBuffer.skip(getCurCellSerializedSize()); } catch (IllegalArgumentException e) { LOG.error("Current pos = " + blockBuffer.position() + "; currKeyLen = " + currKeyLen + "; currValLen = " + currValueLen + "; block limit = " + blockBuffer.limit() + "; currBlock currBlockOffset = " + this.curBlock.getOffset() + "; path=" + reader.getPath()); throw e; } }
3.68
hbase_KeyValue_getKeyLength
/** Returns Length of key portion. */ public int getKeyLength() { return Bytes.toInt(this.bytes, this.offset); }
3.68
flink_CoGroupOperator_where
/** * Continues a CoGroup transformation and defines a {@link KeySelector} function for the * first co-grouped {@link DataSet}. * * <p>The KeySelector function is called for each element of the first DataSet and extracts * a single key value on which the DataSet is grouped. * * @param keyExtractor The KeySelector function which extracts the key values from the * DataSet on which it is grouped. * @return An incomplete CoGroup transformation. Call {@link * org.apache.flink.api.java.operators.CoGroupOperator.CoGroupOperatorSets.CoGroupOperatorSetsPredicate#equalTo(int...)} * to continue the CoGroup. * @see KeySelector * @see DataSet */ public <K> CoGroupOperatorSetsPredicate where(KeySelector<I1, K> keyExtractor) { TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keyExtractor, input1.getType()); return new CoGroupOperatorSetsPredicate( new SelectorFunctionKeys<>( input1.clean(keyExtractor), input1.getType(), keyType)); }
3.68
morf_MySqlDialect_renameIndexStatements
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#renameIndexStatements(org.alfasoftware.morf.metadata.Table, * java.lang.String, java.lang.String) */ @Override public Collection<String> renameIndexStatements(final Table table, final String fromIndexName, final String toIndexName) { Index newIndex, existingIndex; try { newIndex = Iterables.find(table.indexes(), new Predicate<Index>() { @Override public boolean apply(Index input) { return input.getName().equals(toIndexName); } }); existingIndex = newIndex.isUnique() ? index(fromIndexName).columns(newIndex.columnNames()).unique() : index(fromIndexName).columns(newIndex.columnNames()); } catch (NoSuchElementException nsee) { // If the index wasn't found, we must have the old schema instead of the // new one so try the other way round existingIndex = Iterables.find(table.indexes(), new Predicate<Index>() { @Override public boolean apply(Index input) { return input.getName().equals(fromIndexName); } }); newIndex = existingIndex.isUnique() ? index(toIndexName).columns(existingIndex.columnNames()).unique() : index(toIndexName).columns(existingIndex.columnNames()); } return ImmutableList.<String>builder() .addAll(indexDropStatements(table, existingIndex)) .addAll(indexDeploymentStatements(table, newIndex)) .build(); }
3.68
hadoop_RpcProgram_unregister
/** * Unregister this program with the local portmapper. * @param transport transport layer for port map * @param boundPort port number of bounded RPC program */ public void unregister(int transport, int boundPort) { if (boundPort != port) { LOG.info("The bound port is " + boundPort + ", different with configured port " + port); port = boundPort; } // Unregister all the program versions with portmapper for a given transport for (int vers = lowProgVersion; vers <= highProgVersion; vers++) { PortmapMapping mapEntry = new PortmapMapping(progNumber, vers, transport, port); register(mapEntry, false); } }
3.68
flink_PartialCachingLookupProvider_of
/** * Build a {@link PartialCachingLookupProvider} from the specified {@link LookupFunction} and * {@link LookupCache}. */ static PartialCachingLookupProvider of(LookupFunction lookupFunction, LookupCache cache) { return new PartialCachingLookupProvider() { @Override public LookupCache getCache() { return cache; } @Override public LookupFunction createLookupFunction() { return lookupFunction; } }; }
3.68
hadoop_VolumeManagerImpl_initCsiAdaptorCache
// Init the CSI adaptor cache according to the configuration. // user only needs to configure a list of adaptor addresses, // this method extracts each address and init an adaptor client, // then proceed with a hand-shake by calling adaptor's getPluginInfo // method to retrieve the driver info. If the driver can be resolved, // it is then added to the cache. Note, we don't allow two drivers // specified with same driver-name even version is different. private void initCsiAdaptorCache( final Map<String, CsiAdaptorProtocol> adaptorMap, Configuration conf) throws IOException, YarnException { LOG.info("Initializing cache for csi-driver-adaptors"); String[] addresses = conf.getStrings(YarnConfiguration.NM_CSI_ADAPTOR_ADDRESSES); if (addresses != null && addresses.length > 0) { for (String addr : addresses) { LOG.info("Found csi-driver-adaptor socket address: " + addr); InetSocketAddress address = NetUtils.createSocketAddr(addr); YarnRPC rpc = YarnRPC.create(conf); UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); CsiAdaptorProtocol adaptorClient = NMProxy .createNMProxy(conf, CsiAdaptorProtocol.class, currentUser, rpc, address); // Attempt to resolve the driver by contacting to // the diver's identity service on the given address. // If the call failed, the initialization is also failed // in order running into inconsistent state. LOG.info("Retrieving info from csi-driver-adaptor on address " + addr); GetPluginInfoResponse response = adaptorClient.getPluginInfo(GetPluginInfoRequest.newInstance()); if (!Strings.isNullOrEmpty(response.getDriverName())) { String driverName = response.getDriverName(); if (adaptorMap.containsKey(driverName)) { throw new YarnException( "Duplicate driver adaptor found," + " driver name: " + driverName); } adaptorMap.put(driverName, adaptorClient); LOG.info("CSI Adaptor added to the cache, adaptor name: " + driverName + ", driver version: " + response.getVersion()); } } } }
3.68
flink_UserDefinedFunctionHelper_validateInstantiation
/** Checks if a user-defined function can be easily instantiated. */ private static void validateInstantiation(Class<?> clazz, boolean requiresDefaultConstructor) { if (!InstantiationUtil.isPublic(clazz)) { throw new ValidationException( String.format("Function class '%s' is not public.", clazz.getName())); } else if (!InstantiationUtil.isProperClass(clazz)) { throw new ValidationException( String.format( "Function class '%s' is not a proper class. It is either abstract, an interface, or a primitive type.", clazz.getName())); } else if (requiresDefaultConstructor && !InstantiationUtil.hasPublicNullaryConstructor(clazz)) { throw new ValidationException( String.format( "Function class '%s' must have a public default constructor.", clazz.getName())); } }
3.68
zxing_ResultHandler_openProductSearch
// Uses the mobile-specific version of Product Search, which is formatted for small screens. final void openProductSearch(String upc) { Uri uri = Uri.parse("http://www.google." + LocaleManager.getProductSearchCountryTLD(activity) + "/m/products?q=" + upc + "&source=zxing"); launchIntent(new Intent(Intent.ACTION_VIEW, uri)); }
3.68
dubbo_EdsEndpointManager_getEdsListeners
// for test static ConcurrentHashMap<String, Consumer<Map<String, EndpointResult>>> getEdsListeners() { return EDS_LISTENERS; }
3.68
flink_MapValue_put
/* * (non-Javadoc) * @see java.util.Map#put(java.lang.Object, java.lang.Object) */ @Override public V put(final K key, final V value) { return this.map.put(key, value); }
3.68
framework_NotificationElement_close
/** * Closes a notification. * * @throws TimeoutException * If a notification can not be closed and the timeout expires. */ public void close() { click(); WebDriverWait wait = new WebDriverWait(getDriver(), 10); wait.until(ExpectedConditions .not(ExpectedConditions.presenceOfAllElementsLocatedBy( By.className("v-Notification")))); }
3.68
hadoop_OBSListing_getBatchSize
/** * Get the number of entries in the current batch. * * @return a number, possibly zero. */ public int getBatchSize() { return batchSize; }
3.68
hadoop_BytesWritable_set
/** * Set the value to a copy of the given byte range. * * @param newData the new values to copy in * @param offset the offset in newData to start at * @param length the number of bytes to copy */ public void set(byte[] newData, int offset, int length) { setSize(0); setSize(length); System.arraycopy(newData, offset, bytes, 0, size); }
3.68
hadoop_AbfsThrottlingInterceptFactory_referenceLost
/** * Reference lost callback. * @param accountName key lost. */ private static void referenceLost(String accountName) { lostReferences.add(accountName); }
3.68
pulsar_NarClassLoader_updateClasspath
/** * Adds URLs for the resources unpacked from this NAR: * <ul> * <li>the root: for classes, <tt>META-INF</tt>, etc.</li> * <li><tt>META-INF/dependencies</tt>: for config files, <tt>.so</tt>s, etc.</li> * <li><tt>META-INF/dependencies/*.jar</tt>: for dependent libraries</li> * </ul> * * @param root * the root directory of the unpacked NAR. * @throws IOException * if the URL list could not be updated. */ private void updateClasspath(File root) throws IOException { addURL(root.toURI().toURL()); // for compiled classes, META-INF/, etc. File dependencies = new File(root, "META-INF/bundled-dependencies"); if (!dependencies.isDirectory()) { log.warn("{} does not contain META-INF/bundled-dependencies!", narWorkingDirectory); } addURL(dependencies.toURI().toURL()); if (dependencies.isDirectory()) { final File[] jarFiles = dependencies.listFiles(JAR_FILTER); if (jarFiles != null) { Arrays.sort(jarFiles, Comparator.comparing(File::getName)); for (File libJar : jarFiles) { addURL(libJar.toURI().toURL()); } } } }
3.68
hadoop_DomainRowKey_encode
/* * (non-Javadoc) * * Encodes DomainRowKey object into a byte array * * @see org.apache.hadoop.yarn.server.timelineservice.storage.common * .KeyConverter#encode(java.lang.Object) */ @Override public byte[] encode(DomainRowKey rowKey) { if (rowKey == null) { return Separator.EMPTY_BYTES; } byte[] cluster = Separator.encode(rowKey.getClusterId(), Separator.SPACE, Separator.TAB, Separator.QUALIFIERS); byte[] domainIdBytes = Separator.encode(rowKey.getDomainId(), Separator.SPACE, Separator.TAB, Separator.QUALIFIERS); return Separator.QUALIFIERS.join(cluster, domainIdBytes); }
3.68
hbase_ClusterId_convert
/** Returns A {@link ClusterId} made from the passed in <code>cid</code> */ public static ClusterId convert(final ClusterIdProtos.ClusterId cid) { return new ClusterId(cid.getClusterId()); }
3.68
hadoop_BlockStorageMovementAttemptedItems_stopGracefully
/** * Timed wait to stop monitor thread. */ synchronized void stopGracefully() { if (timerThread == null) { return; } if (monitorRunning) { stop(); } try { timerThread.join(3000); } catch (InterruptedException ie) { } }
3.68
hbase_FileArchiverNotifierImpl_groupArchivedFiledBySnapshotAndRecordSize
/** * For each file in the map, this updates the first snapshot (lexicographic snapshot name) that * references this file. The result of this computation is serialized to the quota table. * @param snapshots A collection of HBase snapshots to group the files into * @param fileSizes A map of file names to their sizes */ void groupArchivedFiledBySnapshotAndRecordSize(List<String> snapshots, Set<Entry<String, Long>> fileSizes) throws IOException { // Make a copy as we'll modify it. final Map<String, Long> filesToUpdate = new HashMap<>(fileSizes.size()); for (Entry<String, Long> entry : fileSizes) { filesToUpdate.put(entry.getKey(), entry.getValue()); } // Track the change in size to each snapshot final Map<String, Long> snapshotSizeChanges = new HashMap<>(); for (String snapshot : snapshots) { // For each file in `filesToUpdate`, check if `snapshot` refers to it. // If `snapshot` does, remove it from `filesToUpdate` and add it to `snapshotSizeChanges`. bucketFilesToSnapshot(snapshot, filesToUpdate, snapshotSizeChanges); if (filesToUpdate.isEmpty()) { // If we have no more files recently archived, we have nothing more to check break; } } // We have computed changes to the snapshot size, we need to record them. if (!snapshotSizeChanges.isEmpty()) { if (LOG.isTraceEnabled()) { LOG.trace("Writing snapshot size changes for: " + snapshotSizeChanges); } persistSnapshotSizeChanges(snapshotSizeChanges); } }
3.68
hbase_AsyncAdmin_getReplicationPeerSyncReplicationState
/** * Get the current cluster state in a synchronous replication peer. * @param peerId a short name that identifies the peer * @return the current cluster state wrapped by a {@link CompletableFuture}. */ default CompletableFuture<SyncReplicationState> getReplicationPeerSyncReplicationState(String peerId) { CompletableFuture<SyncReplicationState> future = new CompletableFuture<>(); addListener(listReplicationPeers(Pattern.compile(peerId)), (peers, error) -> { if (error != null) { future.completeExceptionally(error); } else if (peers.isEmpty() || !peers.get(0).getPeerId().equals(peerId)) { future .completeExceptionally(new IOException("Replication peer " + peerId + " does not exist")); } else { future.complete(peers.get(0).getSyncReplicationState()); } }); return future; }
3.68
graphhopper_ResponsePath_calcBBox2D
/** * Calculates the 2D bounding box of this route */ public Envelope calcBBox2D() { check("calcBBox2D"); Envelope bounds = new Envelope(); for (int i = 0; i < pointList.size(); i++) { bounds.expandToInclude(pointList.getLon(i), pointList.getLat(i)); } return bounds; }
3.68
framework_PopupDateField_getInputPrompt
/** * Gets the current input prompt. * * @see #setInputPrompt(String) * @return the current input prompt, or null if not enabled */ public String getInputPrompt() { return inputPrompt; }
3.68
flink_MetricOptions_forReporter
/** * Returns a view over the given configuration via which options can be set/retrieved for the * given reporter. * * <pre> * Configuration config = ... * MetricOptions.forReporter(config, "my_reporter") * .set(MetricOptions.REPORTER_INTERVAL, Duration.ofSeconds(10)) * ... * </pre> * * @param configuration backing configuration * @param reporterName reporter name * @return view over configuration */ @Experimental public static Configuration forReporter(Configuration configuration, String reporterName) { return new DelegatingConfiguration( configuration, ConfigConstants.METRICS_REPORTER_PREFIX + reporterName + "."); }
3.68
hbase_KeyValue_compareRows
/** * Get the b[],o,l for left and right rowkey portions and compare. * @param left the left kv serialized byte[] to be compared with * @param loffset the offset in the left byte[] * @param llength the length in the left byte[] * @param right the right kv serialized byte[] to be compared with * @param roffset the offset in the right byte[] * @param rlength the length in the right byte[] * @return 0 if equal, &lt;0 if left smaller, &gt;0 if right smaller */ public int compareRows(byte[] left, int loffset, int llength, byte[] right, int roffset, int rlength) { return Bytes.compareTo(left, loffset, llength, right, roffset, rlength); }
3.68
flink_TieredStorageProducerClient_write
/** * Write records to the producer client. The {@link BufferAccumulator} will accumulate the * records into buffers. * * <p>Note that isBroadcast indicates whether the record is broadcast, while isBroadcastOnly * indicates whether the result partition is broadcast-only. When the result partition is not * broadcast-only and the record is a broadcast record, the record will be written to all the * subpartitions. * * @param record the written record data * @param subpartitionId the subpartition identifier * @param dataType the data type of the record * @param isBroadcast whether the record is a broadcast record */ public void write( ByteBuffer record, TieredStorageSubpartitionId subpartitionId, Buffer.DataType dataType, boolean isBroadcast) throws IOException { if (isBroadcast && !isBroadcastOnly) { for (int i = 0; i < numSubpartitions; ++i) { // As the tiered storage subpartition ID is created only for broadcast records, // which are fewer than normal records, the performance impact of generating new // TieredStorageSubpartitionId objects is expected to be manageable. If the // performance is significantly affected, this logic will be optimized accordingly. bufferAccumulator.receive( record.duplicate(), new TieredStorageSubpartitionId(i), dataType, isBroadcast); } } else { bufferAccumulator.receive(record, subpartitionId, dataType, isBroadcast); } }
3.68
morf_AbstractSqlDialectTest_testSelectForUpdate
/** * Tests the SQL for select for update */ @Test public void testSelectForUpdate() { SelectStatement stmt = new SelectStatement().from(new TableReference(TEST_TABLE)).forUpdate(); assertEquals("SQL to select for update", "SELECT * FROM " + tableName(TEST_TABLE) + expectedForUpdate(), testDialect.convertStatementToSQL(stmt)); }
3.68
hbase_MetricsTableRequests_updateIncrement
/** * Update the Increment time histogram. * @param time time it took * @param blockBytesScanned size of block bytes scanned to retrieve the response */ public void updateIncrement(long time, long blockBytesScanned) { if (isEnableTableLatenciesMetrics()) { incrementTimeHistogram.update(time); if (blockBytesScanned > 0) { blockBytesScannedCount.increment(blockBytesScanned); incrementBlockBytesScanned.update(blockBytesScanned); } } }
3.68
framework_VSlider_setId
/** * Sets the id of this component's connector. * * @param id * the connector id * @deprecated the updated field is no longer used by the framework */ @Deprecated public void setId(String id) { this.id = id; }
3.68
hbase_Bytes_toShort
/** * Converts a byte array to a short value * @param bytes byte array * @param offset offset into array * @param length length, has to be {@link #SIZEOF_SHORT} * @return the short value * @throws IllegalArgumentException if length is not {@link #SIZEOF_SHORT} or if there's not * enough room in the array at the offset indicated. */ public static short toShort(byte[] bytes, int offset, final int length) { if (length != SIZEOF_SHORT || offset + length > bytes.length) { throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_SHORT); } return ConverterHolder.BEST_CONVERTER.toShort(bytes, offset, length); }
3.68
hadoop_TypedBytesInput_readBool
/** * Reads the boolean following a <code>Type.BOOL</code> code. * @return the obtained boolean * @throws IOException */ public boolean readBool() throws IOException { return in.readBoolean(); }
3.68
dubbo_AdaptiveClassCodeGenerator_generateExtNameAssignment
/** * generate extName assignment code */ private String generateExtNameAssignment(String[] value, boolean hasInvocation) { // TODO: refactor it String getNameCode = null; for (int i = value.length - 1; i >= 0; --i) { if (i == value.length - 1) { if (null != defaultExtName) { if (!CommonConstants.PROTOCOL_KEY.equals(value[i])) { if (hasInvocation) { getNameCode = String.format( "url.getMethodParameter(methodName, \"%s\", \"%s\")", value[i], defaultExtName); } else { getNameCode = String.format("url.getParameter(\"%s\", \"%s\")", value[i], defaultExtName); } } else { getNameCode = String.format( "( url.getProtocol() == null ? \"%s\" : url.getProtocol() )", defaultExtName); } } else { if (!CommonConstants.PROTOCOL_KEY.equals(value[i])) { if (hasInvocation) { getNameCode = String.format( "url.getMethodParameter(methodName, \"%s\", \"%s\")", value[i], defaultExtName); } else { getNameCode = String.format("url.getParameter(\"%s\")", value[i]); } } else { getNameCode = "url.getProtocol()"; } } } else { if (!CommonConstants.PROTOCOL_KEY.equals(value[i])) { if (hasInvocation) { getNameCode = String.format( "url.getMethodParameter(methodName, \"%s\", \"%s\")", value[i], defaultExtName); } else { getNameCode = String.format("url.getParameter(\"%s\", %s)", value[i], getNameCode); } } else { getNameCode = String.format("url.getProtocol() == null ? (%s) : url.getProtocol()", getNameCode); } } } return String.format(CODE_EXT_NAME_ASSIGNMENT, getNameCode); }
3.68
dubbo_RpcServiceContext_setRemoteAddress
/** * set remote address. * * @param address * @return context */ @Override public RpcServiceContext setRemoteAddress(InetSocketAddress address) { this.remoteAddress = address; return this; }
3.68
framework_TableScrollAfterAddRow_getTicketNumber
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber() */ @Override protected Integer getTicketNumber() { return 14147; }
3.68
framework_AbstractSplitPanel_setFirstComponent
/** * Sets the first component of this split panel. Depending on the direction * the first component is shown at the top or to the left. * * @param c * The component to use as first component */ public void setFirstComponent(Component c) { if (getFirstComponent() == c) { // Nothing to do return; } if (getFirstComponent() != null) { // detach old removeComponent(getFirstComponent()); } getState().firstChild = c; if (c != null) { super.addComponent(c); } }
3.68