name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
framework_AbstractRenderer_encode
/** * Encodes the given value to JSON. * <p> * This is a helper method that can be invoked by an {@link #encode(Object) * encode(T)} override if serializing a value of type other than * {@link #getPresentationType() the presentation type} is desired. For * instance, a {@code Renderer<Date>} could first turn a date value into a * formatted string and return {@code encode(dateString, String.class)}. * * @param value * the value to be encoded * @param type * the type of the value * @return a JSON representation of the given value */ protected <U> JsonValue encode(U value, Class<U> type) { return JsonCodec .encode(value, null, type, getUI().getConnectorTracker()) .getEncodedValue(); }
3.68
flink_CallExpression_getFunctionName
/** * Returns a string representation of the call's function for logging or printing to a console. */ public String getFunctionName() { if (functionIdentifier == null) { return functionDefinition.toString(); } else { return functionIdentifier.asSummaryString(); } }
3.68
graphhopper_HeadingExample_createGraphHopperInstance
/** * See {@link RoutingExample#createGraphHopperInstance} for more comments on creating the GraphHopper instance. */ static GraphHopper createGraphHopperInstance(String ghLoc) { GraphHopper hopper = new GraphHopper(); hopper.setOSMFile(ghLoc); hopper.setGraphHopperLocation("target/heading-graph-cache"); hopper.setProfiles(new Profile("car").setCustomModel(new CustomModel(). addToPriority(If("road_access == DESTINATION", MULTIPLY, "0.1"))). setVehicle("car").setTurnCosts(false)); hopper.getCHPreparationHandler().setCHProfiles(new CHProfile("car")); hopper.importOrLoad(); return hopper; }
3.68
flink_SlotSharingGroup_setManagedMemory
/** Set the task managed memory for this SlotSharingGroup. */ public Builder setManagedMemory(MemorySize managedMemory) { this.managedMemory = managedMemory; return this; }
3.68
framework_ConnectorTracker_markClientSideInitialized
/** * Marks the given connector as initialized, meaning that the client-side * state has been initialized for the connector. * * @see #isClientSideInitialized(ClientConnector) * * @param connector * the connector that should be marked as initialized */ public void markClientSideInitialized(ClientConnector connector) { uninitializedConnectors.remove(connector); }
3.68
hbase_VisibilityClient_getAuths
/** * Get the authorization for a given user * @param connection the Connection instance to use * @param user the user * @return labels the given user is globally authorized for */ public static GetAuthsResponse getAuths(Connection connection, final String user) throws Throwable { try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call<VisibilityLabelsService, GetAuthsResponse> callable = new Batch.Call<VisibilityLabelsService, GetAuthsResponse>() { ServerRpcController controller = new ServerRpcController(); CoprocessorRpcUtils.BlockingRpcCallback<GetAuthsResponse> rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); @Override public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); getAuthReqBuilder.setUser(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(user))); service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); GetAuthsResponse response = rpcCallback.get(); if (controller.failedOnException()) { throw controller.getFailedOn(); } return response; } }; Map<byte[], GetAuthsResponse> result = table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } }
3.68
framework_Embedded_getType
/** * Gets the type of the embedded object. * <p> * This can be one of the following: * <ul> * <li>TYPE_OBJECT <i>(This is the default)</i> * <li>TYPE_IMAGE * </ul> * </p> * * @return the type. */ public int getType() { return getState(false).type; }
3.68
framework_AbstractDateField_getRangeStart
/** * Returns the precise rangeStart used. * * @return the precise rangeStart used, may be {@code null}. */ public T getRangeStart() { return convertFromDateString(getState(false).rangeStart); }
3.68
dubbo_ReflectUtils_isCompatible
/** * is compatible. * * @param cs class array. * @param os object array. * @return compatible or not. */ public static boolean isCompatible(Class<?>[] cs, Object[] os) { int len = cs.length; if (len != os.length) { return false; } if (len == 0) { return true; } for (int i = 0; i < len; i++) { if (!isCompatible(cs[i], os[i])) { return false; } } return true; }
3.68
flink_ExecutionVertexInputInfo_getSubtaskIndex
/** Get the index of this subtask. */ public int getSubtaskIndex() { return subtaskIndex; }
3.68
incubator-hugegraph-toolchain_EdgeLabelController_checkDisplayFields
/** * TODO:merge with VertexLabelController.checkDisplayFields */ private static void checkDisplayFields(EdgeLabelEntity entity) { EdgeLabelStyle style = entity.getStyle(); List<String> displayFields = style.getDisplayFields(); if (!CollectionUtils.isEmpty(displayFields)) { Set<String> nullableProps = entity.getNullableProps(); Ex.check(!CollectionUtil.hasIntersection(displayFields, nullableProps), "schema.display-fields.cannot-be-nullable"); } }
3.68
querydsl_Alias_getAny
/** * Convert the given alias to an expression * * @param <D> * @param arg alias instance * @return underlying expression */ @SuppressWarnings("unchecked") public static <D> Expression<D> getAny(D arg) { Object current = aliasFactory.getCurrentAndReset(); if (arg instanceof ManagedObject) { return (Expression<D>) ((ManagedObject) arg).__mappedPath(); } else if (current != null) { return (Expression<D>) current; } else { throw new IllegalArgumentException("No path mapped to " + arg); } }
3.68
framework_AbstractBeanContainer_addItemAfter
/** * Adds the bean after the given bean. * * @see Container.Ordered#addItemAfter(Object, Object) */ protected BeanItem<BEANTYPE> addItemAfter(IDTYPE previousItemId, IDTYPE newItemId, BEANTYPE bean) { if (!validateBean(bean)) { return null; } return internalAddItemAfter(previousItemId, newItemId, createBeanItem(bean), true); }
3.68
framework_VLayoutSlot_reportActualRelativeHeight
/** * Override this method to report the expected outer height to the * LayoutManager. By default does nothing. * * @param allocatedHeight * the height to set (including margins, borders and paddings) in * pixels */ protected void reportActualRelativeHeight(int allocatedHeight) { // Default implementation does nothing }
3.68
zilla_HpackContext_staticIndex11
// Index in static table for the given name of length 11 private static int staticIndex11(DirectBuffer name) { return (name.getByte(10) == 'r' && STATIC_TABLE[53].name.equals(name)) ? 53 : -1; // retry-after }
3.68
graphhopper_Transfers_getTransfersToStop
// Starts implementing the proposed GTFS extension for route and trip specific transfer rules. // So far, only the route is supported. List<Transfer> getTransfersToStop(String toStopId, String toRouteId) { final List<Transfer> allInboundTransfers = transfersToStop.getOrDefault(toStopId, Collections.emptyList()); final Map<String, List<Transfer>> byFromStop = allInboundTransfers.stream() .filter(t -> t.transfer_type == 0 || t.transfer_type == 2) .filter(t -> t.to_route_id == null || toRouteId.equals(t.to_route_id)) .collect(Collectors.groupingBy(t -> t.from_stop_id)); final List<Transfer> result = new ArrayList<>(); byFromStop.forEach((fromStop, transfers) -> { if (hasNoRouteSpecificArrivalTransferRules(fromStop)) { Transfer myRule = new Transfer(); myRule.from_stop_id = fromStop; myRule.to_stop_id = toStopId; if(transfers.size() == 1) myRule.min_transfer_time = transfers.get(0).min_transfer_time; result.add(myRule); } else { routesByStop.getOrDefault(fromStop, Collections.emptySet()).forEach(fromRoute -> { final Transfer mostSpecificRule = findMostSpecificRule(transfers, fromRoute, toRouteId); final Transfer myRule = new Transfer(); myRule.to_route_id = toRouteId; myRule.from_route_id = fromRoute; myRule.to_stop_id = mostSpecificRule.to_stop_id; myRule.from_stop_id = mostSpecificRule.from_stop_id; myRule.transfer_type = mostSpecificRule.transfer_type; myRule.min_transfer_time = mostSpecificRule.min_transfer_time; myRule.from_trip_id = mostSpecificRule.from_trip_id; myRule.to_trip_id = mostSpecificRule.to_trip_id; result.add(myRule); }); } }); if (result.stream().noneMatch(t -> t.from_stop_id.equals(toStopId))) { final Transfer withinStationTransfer = new Transfer(); withinStationTransfer.from_stop_id = toStopId; withinStationTransfer.to_stop_id = toStopId; result.add(withinStationTransfer); } return result; }
3.68
morf_UpgradeTestHelper_validateUpgradeStep
/** * Validate that the upgrade step meets the basic requirements. */ private void validateUpgradeStep(UpgradeStep upgradeStep) { Class<? extends UpgradeStep> upgradeStepClass = upgradeStep.getClass(); // Check the upgrade step has a Sequence if (upgradeStepClass.getAnnotation(Sequence.class) == null) { fail(String.format("Upgrade step [%s] should have a Sequence set. How about [%d]", upgradeStepClass.getSimpleName(), System.currentTimeMillis() / 1000)); } // Check the upgrade step has a UUID UUID uuidAnnotation = upgradeStepClass.getAnnotation(UUID.class); String currentUuid = uuidAnnotation == null ? null : uuidAnnotation.value(); if (StringUtils.isBlank(currentUuid) || !uuids.add(currentUuid)) { fail(String.format("Upgrade step [%s] should have a non blank, unique UUID set. How about [%s]", upgradeStepClass.getSimpleName(), java.util.UUID.randomUUID().toString())); } // verify we can parse the UUID try { assertNotNull(java.util.UUID.fromString(currentUuid)); } catch (Exception e) { throw new RuntimeException(String.format("Could not parse UUID [%s] from [%s]", currentUuid, upgradeStepClass.getSimpleName()), e); } // Check the upgrade step has a description final String description = upgradeStep.getDescription(); assertTrue(String.format("[%s] should have a description", upgradeStepClass.getSimpleName()), StringUtils.isNotEmpty(description)); assertTrue(String.format("Description for [%s] must not be more than 200 characters", upgradeStepClass.getSimpleName()), description.length() <= 200); assertFalse(String.format("Description for [%s] should not end with full stop", upgradeStepClass.getSimpleName()), description.endsWith(".")); assertTrue(String.format("[%s] should have a JIRA ID", upgradeStepClass.getSimpleName()), StringUtils.isNotEmpty(upgradeStep.getJiraId())); for (String jiraId : StringUtils.split(upgradeStep.getJiraId(), ',')) { assertTrue(String.format("[%s] should have a valid JIRA ID [%s]", upgradeStepClass.getSimpleName(), upgradeStep.getJiraId()), jiraIdIsValid(jiraId)); } }
3.68
hbase_Filter_parseFrom
/** * Concrete implementers can signal a failure condition in their code by throwing an * {@link IOException}. * @param pbBytes A pb serialized {@link Filter} instance * @return An instance of {@link Filter} made from <code>bytes</code> * @throws DeserializationException if an error occurred * @see #toByteArray */ public static Filter parseFrom(final byte[] pbBytes) throws DeserializationException { throw new DeserializationException( "parseFrom called on base Filter, but should be called on derived type"); }
3.68
hudi_AbstractTableFileSystemView_filterUncommittedLogs
/** * Ignores the uncommitted log files. * * @param fileSlice File Slice */ private FileSlice filterUncommittedLogs(FileSlice fileSlice) { List<HoodieLogFile> committedLogFiles = fileSlice.getLogFiles().filter(logFile -> completionTimeQueryView.isCompleted(logFile.getDeltaCommitTime())).collect(Collectors.toList()); if (committedLogFiles.size() != fileSlice.getLogFiles().count()) { LOG.debug("File Slice (" + fileSlice + ") has uncommitted log files."); // A file is filtered out of the file-slice if the corresponding // instant has not completed yet. FileSlice transformed = new FileSlice(fileSlice.getPartitionPath(), fileSlice.getBaseInstantTime(), fileSlice.getFileId()); fileSlice.getBaseFile().ifPresent(transformed::setBaseFile); committedLogFiles.forEach(transformed::addLogFile); return transformed; } return fileSlice; }
3.68
hadoop_S3AReadOpContext_getPrefetchBlockCount
/** * Gets the size of prefetch queue (in number of blocks). * * @return the size of prefetch queue (in number of blocks). */ public int getPrefetchBlockCount() { return this.prefetchBlockCount; }
3.68
hadoop_S3AReadOpContext_build
/** * validate the context. * @return a read operation context ready for use. */ public S3AReadOpContext build() { requireNonNull(inputPolicy, "inputPolicy"); requireNonNull(changeDetectionPolicy, "changeDetectionPolicy"); requireNonNull(auditSpan, "auditSpan"); requireNonNull(inputPolicy, "inputPolicy"); Preconditions.checkArgument(readahead >= 0, "invalid readahead %d", readahead); Preconditions.checkArgument(asyncDrainThreshold >= 0, "invalid drainThreshold %d", asyncDrainThreshold); requireNonNull(ioStatisticsAggregator, "ioStatisticsAggregator"); return this; }
3.68
flink_Task_run
/** The core work method that bootstraps the task and executes its code. */ @Override public void run() { try { doRun(); } finally { terminationFuture.complete(executionState); } }
3.68
flink_SortMergeFullOuterJoinIterator_bufferRows1
/** Buffer rows from iterator1 with same key. */ private void bufferRows1() throws IOException { BinaryRowData copy = key1.copy(); buffer1.reset(); do { buffer1.add(row1); } while (nextRow1() && keyComparator.compare(key1, copy) == 0); buffer1.complete(); }
3.68
framework_VComboBox_clearPendingNavigation
/** * Called by the connector any pending navigation operations should be * cleared. */ public void clearPendingNavigation() { navigationCallback = null; }
3.68
hadoop_FilterFileSystem_open
/** * Opens an FSDataInputStream at the indicated Path. * @param f the file name to open * @param bufferSize the size of the buffer to be used. */ @Override public FSDataInputStream open(Path f, int bufferSize) throws IOException { return fs.open(f, bufferSize); }
3.68
querydsl_Expressions_predicate
/** * Create a new Predicate operation * * @param operator operator * @param args operation arguments * @return operation expression */ public static BooleanOperation predicate(Operator operator, Expression<?>... args) { return new BooleanOperation(operator, args); }
3.68
framework_DateField_getResolution
/** * Gets the resolution. * * @return int */ public Resolution getResolution() { return resolution; }
3.68
graphhopper_CustomModelParser_createWeightingParameters
/** * This method compiles a new subclass of CustomWeightingHelper composed from the provided CustomModel caches this * and returns an instance. * * @param priorityEnc can be null */ public static CustomWeighting.Parameters createWeightingParameters(CustomModel customModel, EncodedValueLookup lookup, DecimalEncodedValue avgSpeedEnc, double globalMaxSpeed, DecimalEncodedValue priorityEnc) { double globalMaxPriority = priorityEnc == null ? 1 : priorityEnc.getMaxStorableDecimal(); // if the same custom model is used with a different base profile we cannot use the cached version String key = customModel + ",speed:" + avgSpeedEnc.getName() + ",global_max_speed:" + globalMaxSpeed + (priorityEnc == null ? "" : "prio:" + priorityEnc.getName() + ",global_max_priority:" + globalMaxPriority); if (key.length() > 100_000) throw new IllegalArgumentException("Custom Model too big: " + key.length()); Class<?> clazz = customModel.isInternal() ? INTERNAL_CACHE.get(key) : null; if (CACHE_SIZE > 0 && clazz == null) clazz = CACHE.get(key); if (clazz == null) { clazz = createClazz(customModel, lookup, globalMaxSpeed, globalMaxPriority); if (customModel.isInternal()) { INTERNAL_CACHE.put(key, clazz); if (INTERNAL_CACHE.size() > 100) { CACHE.putAll(INTERNAL_CACHE); INTERNAL_CACHE.clear(); LoggerFactory.getLogger(CustomModelParser.class).warn("Internal cache must stay small but was " + INTERNAL_CACHE.size() + ". Cleared it. Misuse of CustomModel::internal?"); } } else if (CACHE_SIZE > 0) { CACHE.put(key, clazz); } } try { // The class does not need to be thread-safe as we create an instance per request CustomWeightingHelper prio = (CustomWeightingHelper) clazz.getDeclaredConstructor().newInstance(); prio.init(lookup, avgSpeedEnc, priorityEnc, CustomModel.getAreasAsMap(customModel.getAreas())); return new CustomWeighting.Parameters(prio::getSpeed, prio::getPriority, prio.getMaxSpeed(), prio.getMaxPriority(), customModel.getDistanceInfluence() == null ? 0 : customModel.getDistanceInfluence(), customModel.getHeadingPenalty() == null ? Parameters.Routing.DEFAULT_HEADING_PENALTY : customModel.getHeadingPenalty()); } catch (ReflectiveOperationException ex) { throw new IllegalArgumentException("Cannot compile expression " + ex.getMessage(), ex); } }
3.68
hadoop_RouterFedBalance_setBandWidth
/** * Specify bandwidth per map in MB. * @param value the bandwidth. */ public Builder setBandWidth(int value) { this.bandwidth = value; return this; }
3.68
framework_Table_getFirstAddedItemIndex
/** * Subclass and override this to enable partial row additions, bypassing the * normal caching mechanism. This is useful for e.g. TreeTable, where * expanding a node should only fetch and add the items inside of that node. * * @return The index of the first added item. For plain Table it is always * 0. */ protected int getFirstAddedItemIndex() { return 0; }
3.68
framework_IndexedContainer_getAddedItemIndex
/** * If and only if one item is added, gives its index. * * @return -1 if either multiple items are changed or some other change * than add is done. */ public int getAddedItemIndex() { return addedItemIndex; }
3.68
flink_DataTypeTemplate_copyWithoutDataType
/** Copies this template but removes the explicit data type (if available). */ DataTypeTemplate copyWithoutDataType() { return new DataTypeTemplate( null, rawSerializer, inputGroup, version, allowRawGlobally, allowRawPattern, forceRawPattern, defaultDecimalPrecision, defaultDecimalScale, defaultYearPrecision, defaultSecondPrecision); }
3.68
flink_ExecutionConfigAccessor_fromProgramOptions
/** * Creates an {@link ExecutionConfigAccessor} based on the provided {@link ProgramOptions} as * provided by the user through the CLI. */ public static <T> ExecutionConfigAccessor fromProgramOptions( final ProgramOptions options, final List<T> jobJars) { checkNotNull(options); checkNotNull(jobJars); final Configuration configuration = new Configuration(); options.applyToConfiguration(configuration); ConfigUtils.encodeCollectionToConfig( configuration, PipelineOptions.JARS, jobJars, Object::toString); return new ExecutionConfigAccessor(configuration); }
3.68
framework_Upload_isImmediateMode
/** * Returns the immediate mode of the upload. * <p> * The default mode of an Upload component is immediate. * * @return {@code true} if the upload is in immediate mode, {@code false} if * the upload is not in immediate mode * @see #setImmediateMode(boolean) * @since 8.0 */ public boolean isImmediateMode() { return getState(false).immediateMode; }
3.68
dubbo_MethodConfig_getPrefixes
/** * Get method prefixes * * @return */ @Override @Parameter(excluded = true, attribute = false) public List<String> getPrefixes() { // parent prefix + method name if (parentPrefix != null) { List<String> prefixes = new ArrayList<>(); prefixes.add(parentPrefix + "." + this.getName()); return prefixes; } else { throw new IllegalStateException("The parent prefix of MethodConfig is null"); } }
3.68
hbase_RegionStates_metaLogSplit
/** * Called after we've split the meta logs on a crashed Server. * @see #metaLogSplitting(ServerName) */ public void metaLogSplit(ServerName serverName) { setServerState(serverName, ServerState.SPLITTING_META_DONE); }
3.68
graphhopper_PbfDecoder_waitForUpdate
/** * Any thread can call this method when they wish to wait until an update has been performed by * another thread. */ private void waitForUpdate() { try { dataWaitCondition.await(); } catch (InterruptedException e) { throw new RuntimeException("Thread was interrupted.", e); } }
3.68
flink_StreamExecutionEnvironment_execute
/** * Triggers the program execution. The environment will execute all parts of the program that * have resulted in a "sink" operation. Sink operations are for example printing results or * forwarding them to a message queue. * * @param streamGraph the stream graph representing the transformations * @return The result of the job execution, containing elapsed time and accumulators. * @throws Exception which occurs during job execution. */ @Internal public JobExecutionResult execute(StreamGraph streamGraph) throws Exception { final JobClient jobClient = executeAsync(streamGraph); try { final JobExecutionResult jobExecutionResult; if (configuration.getBoolean(DeploymentOptions.ATTACHED)) { jobExecutionResult = jobClient.getJobExecutionResult().get(); } else { jobExecutionResult = new DetachedJobExecutionResult(jobClient.getJobID()); } jobListeners.forEach( jobListener -> jobListener.onJobExecuted(jobExecutionResult, null)); return jobExecutionResult; } catch (Throwable t) { // get() on the JobExecutionResult Future will throw an ExecutionException. This // behaviour was largely not there in Flink versions before the PipelineExecutor // refactoring so we should strip that exception. Throwable strippedException = ExceptionUtils.stripExecutionException(t); jobListeners.forEach( jobListener -> { jobListener.onJobExecuted(null, strippedException); }); ExceptionUtils.rethrowException(strippedException); // never reached, only make javac happy return null; } }
3.68
open-banking-gateway_FintechPsuAspspTuple_buildFintechInboxPrvKey
/** * Creates PSU-ASPSP template key pair for FinTechs' INBOX * @param path Datasafe path * @param em Entity manager to persist to * @return FinTech scoped private key for a given PSU to be used in its inbox. */ public static FintechPsuAspspPrvKeyInbox buildFintechInboxPrvKey(String path, EntityManager em) { FintechPsuAspspTuple tuple = new FintechPsuAspspTuple(path); return FintechPsuAspspPrvKeyInbox.builder() .fintech(em.find(Fintech.class, tuple.getFintechId())) .psu(em.find(Psu.class, tuple.getPsuId())) .aspsp(em.find(Bank.class, tuple.getAspspId())) .build(); }
3.68
flink_Predicates_arePublicFinalOfTypeWithAnnotation
/** * Tests that the field is {@code public final}, has the fully qualified type name of {@code * fqClassName} and is annotated with the {@code annotationType}. */ public static DescribedPredicate<JavaField> arePublicFinalOfTypeWithAnnotation( String fqClassName, Class<? extends Annotation> annotationType) { return arePublicFinalOfType(fqClassName).and(annotatedWith(annotationType)); }
3.68
flink_HsFullSpillingStrategy_onBufferConsumed
// For the case of buffer consumed, there is no need to take action for HsFullSpillingStrategy. @Override public Optional<Decision> onBufferConsumed(BufferIndexAndChannel consumedBuffer) { return Optional.of(Decision.NO_ACTION); }
3.68
dubbo_MonitorFilter_invoke
/** * The invocation interceptor,it will collect the invoke data about this invocation and send it to monitor center * * @param invoker service * @param invocation invocation. * @return {@link Result} the invoke result * @throws RpcException */ @Override public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException { if (invoker.getUrl().hasAttribute(MONITOR_KEY)) { invocation.put(MONITOR_FILTER_START_TIME, System.currentTimeMillis()); invocation.put( MONITOR_REMOTE_HOST_STORE, RpcContext.getServiceContext().getRemoteHost()); // count up getConcurrent(invoker, invocation).incrementAndGet(); } ServiceModel serviceModel = invoker.getUrl().getServiceModel(); if (serviceModel instanceof ProviderModel) { ((ProviderModel) serviceModel).updateLastInvokeTime(); } // proceed invocation chain return invoker.invoke(invocation); }
3.68
flink_JobResourceRequirements_validate
/** * This method validates that: * * <ul> * <li>The requested boundaries are less or equal than the max parallelism. * <li>The requested boundaries are greater than zero. * <li>The requested upper bound is greater than the lower bound. * <li>There are no unknown job vertex ids and that we're not missing any. * </ul> * * In case any boundary is set to {@code -1}, it will be expanded to the default value ({@code * 1} for the lower bound and the max parallelism for the upper bound), before the validation. * * @param jobResourceRequirements contains the new resources requirements for the job vertices * @param maxParallelismPerVertex allows us to look up maximum possible parallelism for a job * vertex * @return a list of validation errors */ public static List<String> validate( JobResourceRequirements jobResourceRequirements, Map<JobVertexID, Integer> maxParallelismPerVertex) { final List<String> errors = new ArrayList<>(); final Set<JobVertexID> missingJobVertexIds = new HashSet<>(maxParallelismPerVertex.keySet()); for (JobVertexID jobVertexId : jobResourceRequirements.getJobVertices()) { missingJobVertexIds.remove(jobVertexId); final Optional<Integer> maybeMaxParallelism = Optional.ofNullable(maxParallelismPerVertex.get(jobVertexId)); if (maybeMaxParallelism.isPresent()) { final JobVertexResourceRequirements.Parallelism requestedParallelism = jobResourceRequirements.getParallelism(jobVertexId); int lowerBound = requestedParallelism.getLowerBound() == -1 ? 1 : requestedParallelism.getLowerBound(); int upperBound = requestedParallelism.getUpperBound() == -1 ? maybeMaxParallelism.get() : requestedParallelism.getUpperBound(); if (lowerBound < 1 || upperBound < 1) { errors.add( String.format( "Both, the requested lower bound [%d] and upper bound [%d] for job vertex [%s] must be greater than zero.", lowerBound, upperBound, jobVertexId)); // Don't validate this vertex any further to avoid additional noise. continue; } if (lowerBound > upperBound) { errors.add( String.format( "The requested lower bound [%d] for job vertex [%s] is higher than the upper bound [%d].", lowerBound, jobVertexId, upperBound)); } if (maybeMaxParallelism.get() < upperBound) { errors.add( String.format( "The newly requested parallelism %d for the job vertex %s exceeds its maximum parallelism %d.", upperBound, jobVertexId, maybeMaxParallelism.get())); } } else { errors.add( String.format( "Job vertex [%s] was not found in the JobGraph.", jobVertexId)); } } for (JobVertexID jobVertexId : missingJobVertexIds) { errors.add( String.format( "The request is incomplete, missing job vertex [%s] resource requirements.", jobVertexId)); } return errors; }
3.68
graphhopper_Path_setFromNode
/** * We need to remember fromNode explicitly as its not saved in one edgeId of edgeIds. */ public Path setFromNode(int from) { fromNode = from; return this; }
3.68
morf_InsertStatement_copyOnWriteOrMutate
/** * Either uses {@link #shallowCopy()} and mutates the result, returning it, * or mutates the statement directly, depending on * {@link AliasedField#immutableDslEnabled()}. * * TODO for removal along with mutable behaviour. * * @param transform A transform which modifies the shallow copy builder. * @param mutator Code which applies the local changes instead. * @return The result (which may be {@code this}). */ private InsertStatement copyOnWriteOrMutate(Function<InsertStatementBuilder, InsertStatementBuilder> transform, Runnable mutator) { if (AliasedField.immutableDslEnabled()) { return transform.apply(shallowCopy()).build(); } else { mutator.run(); return this; } }
3.68
hbase_AggregateImplementation_getSum
/** * Gives the sum for a given combination of column qualifier and column family, in the given row * range as defined in the Scan object. In its current implementation, it takes one column family * and one column qualifier (if provided). In case of null column qualifier, sum for the entire * column family will be returned. */ @Override public void getSum(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) { AggregateResponse response = null; InternalScanner scanner = null; long sum = 0L; try { ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request); S sumVal = null; T temp; Scan scan = ProtobufUtil.toScan(request.getScan()); scanner = env.getRegion().getScanner(scan); byte[] colFamily = scan.getFamilies()[0]; NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily); byte[] qualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); } List<Cell> results = new ArrayList<>(); boolean hasMoreRows = false; do { hasMoreRows = scanner.next(results); int listSize = results.size(); for (int i = 0; i < listSize; i++) { temp = ci.getValue(colFamily, qualifier, results.get(i)); if (temp != null) { sumVal = ci.add(sumVal, ci.castToReturnType(temp)); } } results.clear(); } while (hasMoreRows); if (sumVal != null) { response = AggregateResponse.newBuilder() .addFirstPart(ci.getProtoForPromotedType(sumVal).toByteString()).build(); } } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); } finally { if (scanner != null) { IOUtils.closeQuietly(scanner); } } log.debug("Sum from this region is " + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + sum); done.run(response); }
3.68
flink_PartitionTransformation_getExchangeMode
/** Returns the {@link StreamExchangeMode} of this {@link PartitionTransformation}. */ public StreamExchangeMode getExchangeMode() { return exchangeMode; }
3.68
hudi_HoodieCommitMetadata_getFullPathToFileStatus
/** * Extract the file status of all affected files from the commit metadata. If a file has * been touched multiple times in the given commits, the return value will keep the one * from the latest commit. * * @param hadoopConf * @param basePath The base path * @return the file full path to file status mapping */ public Map<String, FileStatus> getFullPathToFileStatus(Configuration hadoopConf, String basePath) { Map<String, FileStatus> fullPathToFileStatus = new HashMap<>(); for (List<HoodieWriteStat> stats : getPartitionToWriteStats().values()) { // Iterate through all the written files. for (HoodieWriteStat stat : stats) { String relativeFilePath = stat.getPath(); Path fullPath = relativeFilePath != null ? FSUtils.getPartitionPath(basePath, relativeFilePath) : null; if (fullPath != null) { long blockSize = FSUtils.getFs(fullPath.toString(), hadoopConf).getDefaultBlockSize(fullPath); FileStatus fileStatus = new FileStatus(stat.getFileSizeInBytes(), false, 0, blockSize, 0, fullPath); fullPathToFileStatus.put(fullPath.getName(), fileStatus); } } } return fullPathToFileStatus; }
3.68
framework_VAbstractCalendarPanel_buildCalendarHeader
/** * Builds the top buttons and current month and year header. * * @param needsMonth * Should the month buttons be visible? * @param needsBody * indicates whether the calendar body is drawn */ private void buildCalendarHeader(boolean needsMonth, boolean needsBody) { getRowFormatter().addStyleName(0, parent.getStylePrimaryName() + "-calendarpanel-header"); if (prevMonth == null && needsMonth) { prevMonth = new VEventButton(); prevMonth.setHTML("&lsaquo;"); prevMonth.setStyleName("v-button-prevmonth"); nextMonth = new VEventButton(); nextMonth.setHTML("&rsaquo;"); nextMonth.setStyleName("v-button-nextmonth"); setWidget(0, 3, nextMonth); setWidget(0, 1, prevMonth); Roles.getButtonRole().set(prevMonth.getElement()); Roles.getButtonRole() .setTabindexExtraAttribute(prevMonth.getElement(), -1); Roles.getButtonRole().set(nextMonth.getElement()); Roles.getButtonRole() .setTabindexExtraAttribute(nextMonth.getElement(), -1); } else if (prevMonth != null && !needsMonth) { // Remove month traverse buttons remove(prevMonth); remove(nextMonth); prevMonth = null; nextMonth = null; } if (prevYear == null) { prevYear = new VEventButton(); prevYear.setHTML("&laquo;"); prevYear.setStyleName("v-button-prevyear"); nextYear = new VEventButton(); nextYear.setHTML("&raquo;"); nextYear.setStyleName("v-button-nextyear"); setWidget(0, 0, prevYear); setWidget(0, 4, nextYear); Roles.getButtonRole().set(prevYear.getElement()); Roles.getButtonRole() .setTabindexExtraAttribute(prevYear.getElement(), -1); Roles.getButtonRole().set(nextYear.getElement()); Roles.getButtonRole() .setTabindexExtraAttribute(nextYear.getElement(), -1); } updateControlButtonRangeStyles(needsMonth); updateAssistiveLabels(); @SuppressWarnings("deprecation") final String monthName = needsMonth ? getDateTimeService().getMonth(displayedMonth.getMonth()) : ""; @SuppressWarnings("deprecation") final int year = displayedMonth.getYear() + 1900; getFlexCellFormatter().setStyleName(0, 2, parent.getStylePrimaryName() + "-calendarpanel-month"); getFlexCellFormatter().setStyleName(0, 0, parent.getStylePrimaryName() + "-calendarpanel-prevyear"); getFlexCellFormatter().setStyleName(0, 4, parent.getStylePrimaryName() + "-calendarpanel-nextyear"); getFlexCellFormatter().setStyleName(0, 3, parent.getStylePrimaryName() + "-calendarpanel-nextmonth"); getFlexCellFormatter().setStyleName(0, 1, parent.getStylePrimaryName() + "-calendarpanel-prevmonth"); // Set ID to be referenced from focused date or calendar panel Element monthYearElement = getFlexCellFormatter().getElement(0, 2); AriaHelper.ensureHasId(monthYearElement); Event.sinkEvents(monthYearElement, Event.ONCLICK); Event.setEventListener(monthYearElement, event -> { // Don't handle header clicks if resolution in below month if (!isEnabled() || isReadonly() || isBelowMonth(getResolution())) { return; } selectFocused(); onSubmit(); }); if (!needsBody) { Roles.getGridRole().setAriaLabelledbyProperty(getElement(), Id.of(monthYearElement)); } else { Roles.getGridRole().removeAriaLabelledbyProperty(getElement()); } setHTML(0, 2, "<span class=\"" + parent.getStylePrimaryName() + "-calendarpanel-month\">" + monthName + " " + year + "</span>"); if (!isBelowMonth(getResolution())) { monthYearElement.addClassName("header-month-year"); } }
3.68
pulsar_AuthenticationUtil_create
/** * Create an instance of the Authentication-Plugin. * * @param authPluginClassName * name of the Authentication-Plugin you want to use * @param authParams * map which represents parameters for the Authentication-Plugin * @return instance of the Authentication-Plugin * @throws UnsupportedAuthenticationException */ @SuppressWarnings("deprecation") public static final Authentication create(String authPluginClassName, Map<String, String> authParams) throws UnsupportedAuthenticationException { try { if (isNotBlank(authPluginClassName)) { Class<?> authClass = Class.forName(authPluginClassName); Authentication auth = (Authentication) authClass.getDeclaredConstructor().newInstance(); auth.configure(authParams); return auth; } else { return new AuthenticationDisabled(); } } catch (Throwable t) { throw new UnsupportedAuthenticationException(t); } }
3.68
morf_MergeStatement_tableUniqueKey
/** * <p> * Specifies the fields which make up a unique index or primary key on the * target table. * </p> * <p> * These <em>must</em> fully match a unique index or primary key, otherwise * this statement will fail on MySQL. Note also potential issues around having * two unique indexes or a primary key and unique index, as detailed * <a href="http://dev.mysql.com/doc/refman/5.0/en/insert-on-duplicate.html">here</a>. * </p> * * @param keyFields the key fields. * @return a statement with the changes applied. */ public MergeStatement tableUniqueKey(List<AliasedField> keyFields) { if (AliasedField.immutableDslEnabled()) { return shallowCopy().tableUniqueKey(keyFields).build(); } else { this.tableUniqueKey.addAll(keyFields); return this; } }
3.68
framework_VDragEvent_getElementOver
/** * Detecting the element on which the the event is happening may be * problematic during drag and drop operation. This is especially the case * if a drag image (often called also drag proxy) is kept under the mouse * cursor (see {@link #createDragImage(Element, boolean)}. Drag and drop * event handlers (like the one provided by {@link VDragAndDropManager} ) * should set elmentOver field to reflect the the actual element on which * the pointer currently is (drag image excluded). {@link VDropHandler}s can * then more easily react properly on drag events by reading the element via * this method. * * @return the element in {@link VDropHandler} on which mouse cursor is on */ public com.google.gwt.user.client.Element getElementOver() { if (elementOver != null) { return DOM.asOld(elementOver); } else if (currentGwtEvent != null) { return currentGwtEvent.getEventTarget().cast(); } return null; }
3.68
dubbo_RestRPCInvocationUtil_getInvokerByServiceInvokeMethod
/** * get invoker by service method * <p> * compare method`s name,param types * * @param serviceMethod * @return */ public static Invoker getInvokerByServiceInvokeMethod(Method serviceMethod, ServiceDeployer serviceDeployer) { if (serviceMethod == null) { return null; } PathMatcher pathMatcher = PathMatcher.getInvokeCreatePathMatcher(serviceMethod); InvokerAndRestMethodMetadataPair pair = getRestMethodMetadataAndInvokerPair(pathMatcher, serviceDeployer); if (pair == null) { return null; } return pair.getInvoker(); }
3.68
hudi_TransactionUtils_resolveWriteConflictIfAny
/** * Resolve any write conflicts when committing data. * * @param table * @param currentTxnOwnerInstant * @param thisCommitMetadata * @param config * @param lastCompletedTxnOwnerInstant * @param pendingInstants * * @return * @throws HoodieWriteConflictException */ public static Option<HoodieCommitMetadata> resolveWriteConflictIfAny( final HoodieTable table, final Option<HoodieInstant> currentTxnOwnerInstant, final Option<HoodieCommitMetadata> thisCommitMetadata, final HoodieWriteConfig config, Option<HoodieInstant> lastCompletedTxnOwnerInstant, boolean reloadActiveTimeline, Set<String> pendingInstants) throws HoodieWriteConflictException { WriteOperationType operationType = thisCommitMetadata.map(HoodieCommitMetadata::getOperationType).orElse(null); if (config.needResolveWriteConflict(operationType)) { // deal with pendingInstants Stream<HoodieInstant> completedInstantsDuringCurrentWriteOperation = getCompletedInstantsDuringCurrentWriteOperation(table.getMetaClient(), pendingInstants); ConflictResolutionStrategy resolutionStrategy = config.getWriteConflictResolutionStrategy(); if (reloadActiveTimeline) { table.getMetaClient().reloadActiveTimeline(); } Stream<HoodieInstant> instantStream = Stream.concat(resolutionStrategy.getCandidateInstants( table.getMetaClient(), currentTxnOwnerInstant.get(), lastCompletedTxnOwnerInstant), completedInstantsDuringCurrentWriteOperation); final ConcurrentOperation thisOperation = new ConcurrentOperation(currentTxnOwnerInstant.get(), thisCommitMetadata.orElse(new HoodieCommitMetadata())); instantStream.forEach(instant -> { try { ConcurrentOperation otherOperation = new ConcurrentOperation(instant, table.getMetaClient()); if (resolutionStrategy.hasConflict(thisOperation, otherOperation)) { LOG.info("Conflict encountered between current instant = " + thisOperation + " and instant = " + otherOperation + ", attempting to resolve it..."); resolutionStrategy.resolveConflict(table, thisOperation, otherOperation); } } catch (IOException io) { throw new HoodieWriteConflictException("Unable to resolve conflict, if present", io); } }); LOG.info("Successfully resolved conflicts, if any"); return thisOperation.getCommitMetadataOption(); } return thisCommitMetadata; }
3.68
hbase_ByteBuffInputStream_available
/** * @return the number of remaining bytes that can be read (or skipped over) from this input * stream. */ @Override public int available() { return this.buf.remaining(); }
3.68
hbase_AccessChecker_validateCallerWithFilterUser
/* * Validate the hasPermission operation caller with the filter user. Self check doesn't require * any privilege but for others caller must have ADMIN privilege. */ public User validateCallerWithFilterUser(User caller, TablePermission tPerm, String inputUserName) throws IOException { User filterUser = null; if (!caller.getShortName().equals(inputUserName)) { // User should have admin privilege if checking permission for other users requirePermission(caller, "hasPermission", tPerm.getTableName(), tPerm.getFamily(), tPerm.getQualifier(), inputUserName, Action.ADMIN); // Initialize user instance for the input user name List<String> groups = getUserGroups(inputUserName); filterUser = new InputUser(inputUserName, groups.toArray(new String[groups.size()])); } else { // User don't need ADMIN privilege for self check. // Setting action as null in AuthResult to display empty action in audit log AuthResult result = AuthResult.allow("hasPermission", "Self user validation allowed", caller, null, tPerm.getTableName(), tPerm.getFamily(), tPerm.getQualifier()); logResult(result); filterUser = caller; } return filterUser; }
3.68
hadoop_RollingWindowAverage_getCurrentAverage
/** * Get the current average. * @return The current average. */ public synchronized long getCurrentAverage() { cleanupOldPoints(); if (currentPoints.isEmpty()) { return 0; } long sum = 0; for (DataPoint current : currentPoints) { sum += current.getValue(); } return sum / currentPoints.size(); }
3.68
hbase_EnvironmentEdgeManager_reset
/** * Resets the managed instance to the default instance: {@link DefaultEnvironmentEdge}. */ public static void reset() { injectEdge(new DefaultEnvironmentEdge()); }
3.68
flink_CoGroupOperatorBase_setGroupOrderForInputTwo
/** * Sets the order of the elements within a group for the second input. * * @param order The order for the elements in a group. */ public void setGroupOrderForInputTwo(Ordering order) { setGroupOrder(1, order); }
3.68
flink_LookupCacheManager_keepCacheOnRelease
// ---------------------------- For testing purpose ------------------------------ public static void keepCacheOnRelease(boolean toKeep) { keepCacheOnRelease = toKeep; }
3.68
hadoop_ListResultEntrySchema_withETag
/** * Set the eTag value. * * @param eTag the eTag value to set * @return the ListEntrySchema object itself. */ public ListResultEntrySchema withETag(final String eTag) { this.eTag = eTag; return this; }
3.68
flink_ExecEdge_hashShuffle
/** * Return hash {@link Shuffle}. * * @param keys hash keys */ public static Shuffle hashShuffle(int[] keys) { return new HashShuffle(keys); }
3.68
dubbo_ReferenceConfig_appendConfig
/** * Append all configuration required for service reference. * * @return reference parameters */ private Map<String, String> appendConfig() { Map<String, String> map = new HashMap<>(16); map.put(INTERFACE_KEY, interfaceName); map.put(SIDE_KEY, CONSUMER_SIDE); ReferenceConfigBase.appendRuntimeParameters(map); if (!ProtocolUtils.isGeneric(generic)) { String revision = Version.getVersion(interfaceClass, version); if (StringUtils.isNotEmpty(revision)) { map.put(REVISION_KEY, revision); } String[] methods = methods(interfaceClass); if (methods.length == 0) { logger.warn( CONFIG_NO_METHOD_FOUND, "", "", "No method found in service interface: " + interfaceClass.getName()); map.put(METHODS_KEY, ANY_VALUE); } else { map.put(METHODS_KEY, StringUtils.join(new TreeSet<>(Arrays.asList(methods)), COMMA_SEPARATOR)); } } AbstractConfig.appendParameters(map, getApplication()); AbstractConfig.appendParameters(map, getModule()); AbstractConfig.appendParameters(map, consumer); AbstractConfig.appendParameters(map, this); appendMetricsCompatible(map); String hostToRegistry = ConfigUtils.getSystemProperty(DUBBO_IP_TO_REGISTRY); if (StringUtils.isEmpty(hostToRegistry)) { hostToRegistry = NetUtils.getLocalHost(); } else if (isInvalidLocalHost(hostToRegistry)) { throw new IllegalArgumentException("Specified invalid registry ip from property:" + DUBBO_IP_TO_REGISTRY + ", value:" + hostToRegistry); } map.put(REGISTER_IP_KEY, hostToRegistry); if (CollectionUtils.isNotEmpty(getMethods())) { for (MethodConfig methodConfig : getMethods()) { AbstractConfig.appendParameters(map, methodConfig, methodConfig.getName()); String retryKey = methodConfig.getName() + ".retry"; if (map.containsKey(retryKey)) { String retryValue = map.remove(retryKey); if ("false".equals(retryValue)) { map.put(methodConfig.getName() + ".retries", "0"); } } } } return map; }
3.68
flink_SkipListUtils_helpGetNodeLatestVersion
/** * Return of the newest version of value for the node. * * @param node the node. * @param spaceAllocator the space allocator. */ static int helpGetNodeLatestVersion(long node, Allocator spaceAllocator) { Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node)); int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node); MemorySegment segment = chunk.getMemorySegment(offsetInChunk); int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk); long valuePointer = getValuePointer(segment, offsetInByteBuffer); return helpGetValueVersion(valuePointer, spaceAllocator); }
3.68
morf_RemoveIndex_accept
/** * {@inheritDoc} * * @see org.alfasoftware.morf.upgrade.SchemaChange#accept(org.alfasoftware.morf.upgrade.SchemaChangeVisitor) */ @Override public void accept(SchemaChangeVisitor visitor) { visitor.visit(this); }
3.68
flink_CliFrontend_main
/** Submits the job based on the arguments. */ public static void main(final String[] args) { int retCode = INITIAL_RET_CODE; try { retCode = mainInternal(args); } finally { System.exit(retCode); } }
3.68
framework_LoginForm_addLoginListener
/** * Adds a {@link LoginListener}. * <p> * The listener is called when the user presses the login button. * * @param listener * the listener to add * @return a registration object for removing the listener * @since 8.0 */ public Registration addLoginListener(LoginListener listener) { return addListener(LoginEvent.class, listener, ON_LOGIN_METHOD); }
3.68
morf_MergeStatementBuilder_tableUniqueKey
/** * <p> * Specifies the fields which make up a unique index or primary key on the * target table. * </p> * <p> * These <em>must</em> fully match a unique index or primary key, otherwise * this statement will fail on MySQL. Note also potential issues around having * two unique indexes or a primary key and unique index, as detailed * <a href="http://dev.mysql.com/doc/refman/5.0/en/insert-on-duplicate.html">here</a>. * </p> * * @param keyFields the key fields. * @return this, for method chaining. */ public MergeStatementBuilder tableUniqueKey(List<? extends AliasedFieldBuilder> keyFields) { this.tableUniqueKey.addAll(Builder.Helper.buildAll(keyFields)); return this; }
3.68
framework_VMenuBar_getMenuItemWithElement
/** * Get menu item with given DOM element. * * @param element * Element used in search * @return Menu item or null if not found * * @since 7.2 */ public CustomMenuItem getMenuItemWithElement(Element element) { return getMenuItemWithElement(DOM.asOld(element)); }
3.68
hbase_StoreFileInfo_isHFile
/** * @param path Path to check. * @return True if the path has format of a HFile. */ public static boolean isHFile(final Path path) { return isHFile(path.getName()); }
3.68
flink_TopNBuffer_containsKey
/** * Returns {@code true} if the buffer contains a mapping for the specified key. * * @param key key whose presence in the buffer is to be tested * @return {@code true} if the buffer contains a mapping for the specified key */ public boolean containsKey(RowData key) { return treeMap.containsKey(key); }
3.68
hbase_RegionCoprocessorHost_hasCustomPostScannerFilterRow
/* * Whether any configured CPs override postScannerFilterRow hook */ public boolean hasCustomPostScannerFilterRow() { return hasCustomPostScannerFilterRow; }
3.68
dubbo_ConfigurationUtils_getDynamicProperty
/** * For compact single instance * * @deprecated Replaced to {@link ConfigurationUtils#getDynamicProperty(ScopeModel, String, String)} */ @Deprecated public static String getDynamicProperty(String property, String defaultValue) { return getDynamicProperty(ApplicationModel.defaultModel(), property, defaultValue); }
3.68
morf_UpgradePath_getUpgradeSqlScript
/** * @return A single string of the sql statements appended to each other * appropriately */ public String getUpgradeSqlScript() { final StringBuilder sqlOutput = new StringBuilder(); for (final String sqlStatement : getSql()) { sqlOutput.append(connectionResources.sqlDialect().formatSqlStatement(sqlStatement)); sqlOutput.append(System.getProperty("line.separator")); } addCommentsToDropUpgradeStatusTable(sqlOutput); return sqlOutput.toString(); }
3.68
hadoop_SysInfoWindows_getNumProcessors
/** {@inheritDoc} */ @Override public synchronized int getNumProcessors() { refreshIfNeeded(); return numProcessors; }
3.68
hudi_HoodieAvroHFileReader_readAllRecords
/** * NOTE: THIS SHOULD ONLY BE USED FOR TESTING, RECORDS ARE MATERIALIZED EAGERLY * <p> * Reads all the records with given schema */ public static List<IndexedRecord> readAllRecords(HoodieAvroHFileReader reader) throws IOException { Schema schema = reader.getSchema(); return toStream(reader.getIndexedRecordIterator(schema)) .collect(Collectors.toList()); }
3.68
framework_CurrentInstance_getInstances
/** * Gets the currently set instances so that they can later be restored using * {@link #restoreInstances(Map)}. * * @since 8.0 * * @return a map containing the current instances */ public static Map<Class<?>, CurrentInstance> getInstances() { Map<Class<?>, CurrentInstance> map = INSTANCES.get(); if (map == null) { return Collections.emptyMap(); } else { Map<Class<?>, CurrentInstance> copy = new HashMap<>(); boolean removeStale = false; for (Class<?> c : map.keySet()) { CurrentInstance ci = map.get(c); if (ci.instance.get() == null) { removeStale = true; } else { copy.put(c, ci); } } if (removeStale) { removeStaleInstances(map); if (map.isEmpty()) { INSTANCES.remove(); } } return copy; } }
3.68
framework_FocusableHTML_addKeyPressHandler
/* * (non-Javadoc) * * @see * com.google.gwt.event.dom.client.HasKeyPressHandlers#addKeyPressHandler * (com.google.gwt.event.dom.client.KeyPressHandler) */ @Override public HandlerRegistration addKeyPressHandler(KeyPressHandler handler) { return addDomHandler(handler, KeyPressEvent.getType()); }
3.68
hbase_RegionCoprocessorHost_preStoreScannerOpen
/** * Called before open store scanner for user scan. */ public ScanInfo preStoreScannerOpen(HStore store, Scan scan) throws IOException { if (coprocEnvironments.isEmpty()) return store.getScanInfo(); CustomizedScanInfoBuilder builder = new CustomizedScanInfoBuilder(store.getScanInfo(), scan); execOperation(new RegionObserverOperationWithoutResult() { @Override public void call(RegionObserver observer) throws IOException { observer.preStoreScannerOpen(this, store, builder); } }); return builder.build(); }
3.68
flink_OrcSplitReader_seekToRow
/** Seek to a particular row number. */ public void seekToRow(long rowCount) throws IOException { orcRowsReader.seekToRow(rowCount); }
3.68
dubbo_MD5Utils_getMd5
/** * Calculation md5 value of specify string * * @param input */ public String getMd5(String input) { byte[] md5; // MessageDigest instance is NOT thread-safe synchronized (mdInst) { mdInst.update(input.getBytes(UTF_8)); md5 = mdInst.digest(); } int j = md5.length; char str[] = new char[j * 2]; int k = 0; for (int i = 0; i < j; i++) { byte byte0 = md5[i]; str[k++] = hexDigits[byte0 >>> 4 & 0xf]; str[k++] = hexDigits[byte0 & 0xf]; } return new String(str); }
3.68
hudi_HoodieFileGroup_getLatestFileSlice
/** * Gets the latest slice - this can contain either. * <p> * - just the log files without data file - (or) data file with 0 or more log files */ public Option<FileSlice> getLatestFileSlice() { // there should always be one return Option.fromJavaOptional(getAllFileSlices().findFirst()); }
3.68
hadoop_ListResultEntrySchema_withIsDirectory
/** * Set the isDirectory value. * * @param isDirectory the isDirectory value to set * @return the ListEntrySchema object itself. */ public ListResultEntrySchema withIsDirectory(final Boolean isDirectory) { this.isDirectory = isDirectory; return this; }
3.68
framework_EmptyTable_getTicketNumber
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber() */ @Override protected Integer getTicketNumber() { return 11189; }
3.68
hadoop_AzureNativeFileSystemStore_getBlobReference
/** * This private method uses the root directory or the original container to * get the block blob reference depending on whether the original file system * object was constructed with a short- or long-form URI. If the root * directory is non-null the URI in the file constructor was in the long form. * * @param aKey * : a key used to query Azure for the block blob. * @returns blob : a reference to the Azure block blob corresponding to the * key. * @throws URISyntaxException * */ private CloudBlobWrapper getBlobReference(String aKey) throws StorageException, URISyntaxException { CloudBlobWrapper blob = null; if (isPageBlobKey(aKey)) { blob = this.container.getPageBlobReference(aKey); } else { blob = this.container.getBlockBlobReference(aKey); blob.setStreamMinimumReadSizeInBytes(downloadBlockSizeBytes); blob.setWriteBlockSizeInBytes(uploadBlockSizeBytes); } return blob; }
3.68
shardingsphere-elasticjob_JobNodePath_getFullPath
/** * Get full path. * * @param node node * @return full path */ public String getFullPath(final String node) { return String.format("/%s/%s", jobName, node); }
3.68
hadoop_BaseContainerTokenSecretManager_createNewMasterKey
// Need lock as we increment serialNo etc. protected MasterKeyData createNewMasterKey() { this.writeLock.lock(); try { return new MasterKeyData(serialNo++, generateSecret()); } finally { this.writeLock.unlock(); } }
3.68
querydsl_JPAMapAccessVisitor_shorten
/** * Shorten the parent path to a length of max 2 elements */ private Path<?> shorten(Path<?> path, boolean outer) { if (aliases.containsKey(path)) { return aliases.get(path); } else if (path.getMetadata().isRoot()) { return path; } else if (path.getMetadata().getParent().getMetadata().isRoot() && outer) { return path; } else { Class<?> type = JPAQueryMixin.getElementTypeOrType(path); Path<?> parent = shorten(path.getMetadata().getParent(), false); Path oldPath = ExpressionUtils.path(path.getType(), new PathMetadata(parent, path.getMetadata().getElement(), path.getMetadata().getPathType())); if (oldPath.getMetadata().getParent().getMetadata().isRoot() && outer) { return oldPath; } else { Path newPath = ExpressionUtils.path(type, ExpressionUtils.createRootVariable(oldPath)); aliases.put(path, newPath); metadata.addJoin(JoinType.LEFTJOIN, ExpressionUtils.as(oldPath, newPath)); return newPath; } } }
3.68
flink_SkipListUtils_putNextKeyPointer
/** * Puts the next key pointer on level 0 to key space. * * @param memorySegment memory segment for key space. * @param offset offset of key space in the memory segment. * @param nextKeyPointer next key pointer on level 0. */ public static void putNextKeyPointer( MemorySegment memorySegment, int offset, long nextKeyPointer) { memorySegment.putLong(offset + NEXT_KEY_POINTER_OFFSET, nextKeyPointer); }
3.68
flink_TableChange_getNewWatermark
/** Returns the modified watermark. */ public WatermarkSpec getNewWatermark() { return newWatermark; }
3.68
flink_MemorySegment_getDoubleBigEndian
/** * Reads a double-precision floating point value (64bit, 8 bytes) from the given position, in * big endian byte order. This method's speed depends on the system's native byte order, and it * is possibly slower than {@link #getDouble(int)}. For most cases (such as transient storage in * memory or serialization for I/O and network), it suffices to know that the byte order in * which the value is written is the same as the one in which it is read, and {@link * #getDouble(int)} is the preferable choice. * * @param index The position from which the value will be read. * @return The long value at the given position. * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the * segment size minus 8. */ public double getDoubleBigEndian(int index) { return Double.longBitsToDouble(getLongBigEndian(index)); }
3.68
hbase_RegionRemoteProcedureBase_reportTransition
// should be called with RegionStateNode locked, to avoid race with the execute method below void reportTransition(MasterProcedureEnv env, RegionStateNode regionNode, ServerName serverName, TransitionCode transitionCode, long seqId) throws IOException { if (state != RegionRemoteProcedureBaseState.REGION_REMOTE_PROCEDURE_DISPATCH) { // should be a retry return; } if (!targetServer.equals(serverName)) { throw new UnexpectedStateException("Received report from " + serverName + ", expected " + targetServer + ", " + regionNode + ", proc=" + this); } checkTransition(regionNode, transitionCode, seqId); // this state means we have received the report from RS, does not mean the result is fine, as we // may received a FAILED_OPEN. this.state = RegionRemoteProcedureBaseState.REGION_REMOTE_PROCEDURE_REPORT_SUCCEED; this.transitionCode = transitionCode; this.seqId = seqId; // Persist the transition code and openSeqNum(if provided). // We should not update the hbase:meta directly as this may cause races when master restarts, // as the old active master may incorrectly report back to RS and cause the new master to hang // on a OpenRegionProcedure forever. See HBASE-22060 and HBASE-22074 for more details. boolean succ = false; try { persistAndWake(env, regionNode); succ = true; } finally { if (!succ) { this.state = RegionRemoteProcedureBaseState.REGION_REMOTE_PROCEDURE_DISPATCH; this.transitionCode = null; this.seqId = HConstants.NO_SEQNUM; } } try { updateTransitionWithoutPersistingToMeta(env, regionNode, transitionCode, seqId); } catch (IOException e) { throw new AssertionError("should not happen", e); } }
3.68
flink_BlobLibraryCacheManager_getNumberOfManagedJobs
/** * Returns the number of registered jobs that this library cache manager handles. * * @return number of jobs (irrespective of the actual number of tasks per job) */ int getNumberOfManagedJobs() { synchronized (lockObject) { return cacheEntries.size(); } }
3.68
morf_AbstractSqlDialectTest_matches
/** * {@inheritDoc} * @see org.mockito.ArgumentMatcher#matches(java.lang.Object) */ @Override public boolean matches(final byte[] argument) { return Arrays.equals(argument, expectedBytes); }
3.68
flink_CrossOperator_projectTuple3
/** * Projects a pair of crossed elements to a {@link Tuple} with the previously selected * fields. * * @return The projected data set. * @see Tuple * @see DataSet */ public <T0, T1, T2> ProjectCross<I1, I2, Tuple3<T0, T1, T2>> projectTuple3() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo<Tuple3<T0, T1, T2>> tType = new TupleTypeInfo<Tuple3<T0, T1, T2>>(fTypes); return new ProjectCross<I1, I2, Tuple3<T0, T1, T2>>( this.ds1, this.ds2, this.fieldIndexes, this.isFieldInFirst, tType, this, hint); }
3.68
flink_ExceptionUtils_stripCompletionException
/** * Unpacks an {@link CompletionException} and returns its cause. Otherwise the given Throwable * is returned. * * @param throwable to unpack if it is an CompletionException * @return Cause of CompletionException or given Throwable */ public static Throwable stripCompletionException(Throwable throwable) { return stripException(throwable, CompletionException.class); }
3.68
hadoop_FixedLengthInputFormat_setRecordLength
/** * Set the length of each record * @param conf configuration * @param recordLength the length of a record */ public static void setRecordLength(Configuration conf, int recordLength) { conf.setInt(FIXED_RECORD_LENGTH, recordLength); }
3.68
hadoop_FederationStateStoreUtils_logAndThrowException
/** * Throws an exception due to an error in <code>FederationStateStore</code>. * * @param log the logger interface * @param errMsg the error message * @param t the throwable raised in the called class. * @throws YarnException on failure */ public static void logAndThrowException(Logger log, String errMsg, Throwable t) throws YarnException { if (t != null) { log.error(errMsg, t); throw new YarnException(errMsg, t); } else { log.error(errMsg); throw new YarnException(errMsg); } }
3.68
flink_AbstractTopNFunction_initRankEnd
/** * Initialize rank end. * * @param row input record * @return rank end * @throws Exception */ protected long initRankEnd(RowData row) throws Exception { if (isConstantRankEnd) { return rankEnd; } else { Long rankEndValue = rankEndState.value(); long curRankEnd = rankEndFetcher.apply(row); if (rankEndValue == null) { rankEnd = curRankEnd; rankEndState.update(rankEnd); return rankEnd; } else { rankEnd = rankEndValue; if (rankEnd != curRankEnd) { // increment the invalid counter when the current rank end not equal to previous // rank end invalidCounter.inc(); } return rankEnd; } } }
3.68
framework_VLoadingIndicator_show
/** * Shows the loading indicator in its standard state and triggers timers for * transitioning into the "second" and "third" states. */ public void show() { // Reset possible style name and display mode getElement().setClassName(PRIMARY_STYLE_NAME); getElement().addClassName("first"); getElement().getStyle().setDisplay(Display.BLOCK); // Schedule the "second" loading indicator int secondTimerDelay = getSecondDelay() - getFirstDelay(); if (secondTimerDelay >= 0) { secondTimer.schedule(secondTimerDelay); } // Schedule the "third" loading indicator int thirdTimerDelay = getThirdDelay() - getFirstDelay(); if (thirdTimerDelay >= 0) { thirdTimer.schedule(thirdTimerDelay); } }
3.68