name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_SelectTool_run
/** * Execute the select operation. * @param args argument list * @param out output stream * @return an exit code * @throws IOException IO failure * @throws ExitUtil.ExitException managed failure */ public int run(String[] args, PrintStream out) throws IOException, ExitUtil.ExitException { final List<String> parsedArgs; try { parsedArgs = parseArgs(args); } catch (CommandFormat.UnknownOptionException e) { errorln(getUsage()); throw new ExitUtil.ExitException(EXIT_USAGE, e.getMessage(), e); } if (parsedArgs.size() < 2) { errorln(getUsage()); throw new ExitUtil.ExitException(EXIT_USAGE, TOO_FEW_ARGUMENTS); } // read mandatory arguments final String file = parsedArgs.get(0); final Path path = new Path(file); String expression = parsedArgs.get(1); println(out, "selecting file %s with query %s", path, expression); // and the optional arguments to adjust the configuration. final Optional<String> header = getOptValue(OPT_HEADER); header.ifPresent(h -> println(out, "Using header option %s", h)); Path destPath = getOptValue(OPT_OUTPUT).map( output -> { println(out, "Saving output to %s", output); return new Path(output); }).orElse(null); final boolean toConsole = destPath == null; // expected lines are only checked if empty final Optional<Integer> expectedLines = toConsole ? getIntValue(OPT_EXPECTED) : Optional.empty(); final Optional<Integer> limit = getIntValue(OPT_LIMIT); if (limit.isPresent()) { final int l = limit.get(); println(out, "Using line limit %s", l); if (expression.toLowerCase(Locale.ENGLISH).contains(" limit ")) { println(out, "line limit already specified in SELECT expression"); } else { expression = expression + " LIMIT " + l; } } // now bind to the filesystem. FileSystem fs = bindFilesystem(path.getFileSystem(getConf())); if (!fs.hasPathCapability(path, S3_SELECT_CAPABILITY)) { // capability disabled throw new ExitUtil.ExitException(EXIT_SERVICE_UNAVAILABLE, SELECT_IS_DISABLED + " for " + file); } linesRead = 0; selectDuration = new OperationDuration(); // open and scan the stream. final FutureDataInputStreamBuilder builder = fs.openFile(path) .must(SELECT_SQL, expression); header.ifPresent(h -> builder.must(CSV_INPUT_HEADER, h)); getOptValue(OPT_COMPRESSION).ifPresent(compression -> builder.must(SELECT_INPUT_COMPRESSION, compression.toUpperCase(Locale.ENGLISH))); getOptValue(OPT_INPUTFORMAT).ifPresent(opt -> { if (!"csv".equalsIgnoreCase(opt)) { throw invalidArgs("Unsupported input format %s", opt); } }); getOptValue(OPT_OUTPUTFORMAT).ifPresent(opt -> { if (!"csv".equalsIgnoreCase(opt)) { throw invalidArgs("Unsupported output format %s", opt); } }); // turn on SQL error reporting. builder.opt(SELECT_ERRORS_INCLUDE_SQL, true); FSDataInputStream stream; try(DurationInfo ignored = new DurationInfo(LOG, "Selecting stream")) { stream = FutureIO.awaitFuture(builder.build()); } catch (FileNotFoundException e) { // the source file is missing. throw notFound(e); } try { if (toConsole) { // logging to console bytesRead = 0; @SuppressWarnings("IOResourceOpenedButNotSafelyClosed") Scanner scanner = new Scanner( new BufferedReader( new InputStreamReader(stream, StandardCharsets.UTF_8))); scanner.useDelimiter("\n"); while (scanner.hasNextLine()) { linesRead++; String l = scanner.nextLine(); bytesRead += l.length() + 1; println(out, "%s", l); } } else { // straight dump of whole file; no line counting FileSystem destFS = destPath.getFileSystem(getConf()); try(DurationInfo ignored = new DurationInfo(LOG, "Copying File"); OutputStream destStream = destFS.createFile(destPath) .overwrite(true) .build()) { bytesRead = IOUtils.copy(stream, destStream); } } // close the stream. // this will take time if there's a lot of data remaining try (DurationInfo ignored = new DurationInfo(LOG, "Closing stream")) { stream.close(); } // generate a meaningful result depending on the operation String result = toConsole ? String.format("%s lines", linesRead) : String.format("%s bytes", bytesRead); // print some statistics selectDuration.finished(); println(out, "Read %s in time %s", result, selectDuration.getDurationString()); println(out, "Bytes Read: %,d bytes", bytesRead); println(out, "Bandwidth: %,.1f MiB/s", bandwidthMBs(bytesRead, selectDuration.value())); } finally { cleanupWithLogger(LOG, stream); } LOG.debug("Statistics {}", stream); expectedLines.ifPresent(l -> { if (l != linesRead) { throw exitException(EXIT_FAIL, "Expected %d rows but the operation returned %d", l, linesRead); } }); out.flush(); return EXIT_SUCCESS; }
3.68
querydsl_AbstractGroupExpression_as
/** * Create an alias for the expression * * @return alias expression */ public DslExpression<R> as(String alias) { return as(ExpressionUtils.path(getType(), alias)); }
3.68
framework_HasStyleNames_removeStyleNames
/** * Removes one or more style names from component. Multiple styles can be * specified by using multiple parameters. * * <p> * The parameter must be a valid CSS style name. Only user-defined style * names added with {@link #addStyleName(String) addStyleName()} or * {@link #setStyleName(String) setStyleName()} can be removed; built-in * style names defined in Vaadin or GWT can not be removed. * </p> * * @since 8.7 * @param styles * the style name or style names to be removed * @see #removeStyleName(String) * @see #setStyleName(String) * @see #addStyleName(String) */ default void removeStyleNames(String... styles) { for (String style : styles) { removeStyleName(style); } }
3.68
flink_ExceptionHistoryEntry_createGlobal
/** Creates an {@code ExceptionHistoryEntry} that is not based on an {@code Execution}. */ public static ExceptionHistoryEntry createGlobal( Throwable cause, CompletableFuture<Map<String, String>> failureLabels) { return new ExceptionHistoryEntry( cause, System.currentTimeMillis(), failureLabels, null, (ArchivedTaskManagerLocation) null); }
3.68
hbase_StoreScanner_parallelSeek
/** * Seek storefiles in parallel to optimize IO latency as much as possible * @param scanners the list {@link KeyValueScanner}s to be read from * @param kv the KeyValue on which the operation is being requested */ private void parallelSeek(final List<? extends KeyValueScanner> scanners, final Cell kv) throws IOException { if (scanners.isEmpty()) return; int storeFileScannerCount = scanners.size(); CountDownLatch latch = new CountDownLatch(storeFileScannerCount); List<ParallelSeekHandler> handlers = new ArrayList<>(storeFileScannerCount); for (KeyValueScanner scanner : scanners) { if (scanner instanceof StoreFileScanner) { ParallelSeekHandler seekHandler = new ParallelSeekHandler(scanner, kv, this.readPt, latch); executor.submit(seekHandler); handlers.add(seekHandler); } else { scanner.seek(kv); latch.countDown(); } } try { latch.await(); } catch (InterruptedException ie) { throw (InterruptedIOException) new InterruptedIOException().initCause(ie); } for (ParallelSeekHandler handler : handlers) { if (handler.getErr() != null) { throw new IOException(handler.getErr()); } } }
3.68
framework_VCaption_updateCaption
/** * Updates the caption from UIDL. * * This method may only be called when the caption has an owner - otherwise, * use {@link #updateCaptionWithoutOwner(UIDL, String, boolean, boolean)}. * * @return true if the position where the caption should be placed has * changed */ public boolean updateCaption() { boolean wasPlacedAfterComponent = placedAfterComponent; // Caption is placed after component unless there is some part which // moves it above. placedAfterComponent = true; String style = CLASSNAME; if (ComponentStateUtil.hasStyles(owner.getState())) { for (String customStyle : owner.getState().styles) { style += " " + CLASSNAME + "-" + customStyle; } } if (!owner.isEnabled()) { style += " " + StyleConstants.DISABLED; } setStyleName(style); boolean hasIcon = owner.getState().resources .containsKey(ComponentConstants.ICON_RESOURCE); boolean showRequired = false; boolean showError = false; if (owner instanceof HasRequiredIndicator) { showRequired = ((HasRequiredIndicator) owner) .isRequiredIndicatorVisible(); } if (owner instanceof HasErrorIndicator) { showError = ((HasErrorIndicator) owner).isErrorIndicatorVisible(); } if (icon != null) { getElement().removeChild(icon.getElement()); icon = null; } if (hasIcon) { String uri = owner.getState().resources .get(ComponentConstants.ICON_RESOURCE).getURL(); icon = client.getIcon(uri); if (icon instanceof ImageIcon) { // onload will set appropriate size later icon.setWidth("0"); icon.setHeight("0"); } DOM.insertChild(getElement(), icon.getElement(), getInsertPosition(InsertPosition.ICON)); // Icon forces the caption to be above the component placedAfterComponent = false; } if (owner.getState().caption != null) { // A caption text should be shown if the attribute is set // If the caption is null the ATTRIBUTE_CAPTION should not be set to // avoid ending up here. if (captionText == null) { captionText = DOM.createDiv(); captionText.setClassName("v-captiontext"); DOM.insertChild(getElement(), captionText, getInsertPosition(InsertPosition.CAPTION)); } // Update caption text String c = owner.getState().caption; // A text forces the caption to be above the component. placedAfterComponent = false; if (c == null || c.trim().isEmpty()) { // Not sure if c even can be null. Should not. // This is required to ensure that the caption uses space in all // browsers when it is set to the empty string. If there is an // icon, error indicator or required indicator they will ensure // that space is reserved. if (!hasIcon && !showRequired && !showError) { captionText.setInnerHTML("&nbsp;"); } } else { setCaptionText(captionText, owner.getState()); } } else if (captionText != null) { // Remove existing DOM.removeChild(getElement(), captionText); captionText = null; } if (ComponentStateUtil.hasDescription(owner.getState()) && captionText != null) { addStyleDependentName("hasdescription"); } else { removeStyleDependentName("hasdescription"); } AriaHelper.handleInputRequired(owner.getWidget(), showRequired); if (showRequired) { if (requiredFieldIndicator == null) { requiredFieldIndicator = DOM.createDiv(); requiredFieldIndicator .setClassName("v-required-field-indicator"); DOM.setInnerText(requiredFieldIndicator, "*"); DOM.insertChild(getElement(), requiredFieldIndicator, getInsertPosition(InsertPosition.REQUIRED)); // Hide the required indicator from assistive device Roles.getTextboxRole() .setAriaHiddenState(requiredFieldIndicator, true); } } else if (requiredFieldIndicator != null) { // Remove existing DOM.removeChild(getElement(), requiredFieldIndicator); requiredFieldIndicator = null; } AriaHelper.handleInputInvalid(owner.getWidget(), showError); if (showError) { setErrorIndicatorElementVisible(true); // Hide error indicator from assistive devices Roles.getTextboxRole().setAriaHiddenState(errorIndicatorElement, true); ErrorUtil.setErrorLevelStyle(errorIndicatorElement, StyleConstants.STYLE_NAME_ERROR_INDICATOR, owner.getState().errorLevel); } else { setErrorIndicatorElementVisible(false); } return (wasPlacedAfterComponent != placedAfterComponent); }
3.68
hbase_TableDescriptorBuilder_isNormalizationEnabled
/** * Check if normalization enable flag of the table is true. If flag is false then no region * normalizer won't attempt to normalize this table. * @return true if region normalization is enabled for this table **/ @Override public boolean isNormalizationEnabled() { return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, false); }
3.68
hbase_TableDescriptorBuilder_isReadOnly
/** * Check if the readOnly flag of the table is set. If the readOnly flag is set then the contents * of the table can only be read from but not modified. * @return true if all columns in the table should be read only */ @Override public boolean isReadOnly() { return getOrDefault(READONLY_KEY, Boolean::valueOf, DEFAULT_READONLY); }
3.68
hadoop_PlacementConstraints_nodePartition
/** * Constructs a target expression on a node partition. It is satisfied if * the specified node partition has one of the specified nodePartitions. * * @param nodePartitions the set of values that the attribute should take * values from * @return the resulting expression on the node attribute */ public static TargetExpression nodePartition( String... nodePartitions) { return new TargetExpression(TargetType.NODE_ATTRIBUTE, NODE_PARTITION, nodePartitions); }
3.68
pulsar_AuthenticationDataHttps_hasDataFromTls
/* * TLS */ @Override public boolean hasDataFromTls() { return (certificates != null); }
3.68
hadoop_DiskBalancerDataNode_getDataNodeIP
/** * Returns the IP address of this Node. * * @return IP Address string */ public String getDataNodeIP() { return dataNodeIP; }
3.68
hadoop_AbfsHttpOperation_getConnUrl
/** * Gets the connection url. * @return url. */ URL getConnUrl() { return connection.getURL(); }
3.68
hudi_IncrSourceHelper_generateQueryInfo
/** * Find begin and end instants to be set for the next fetch. * * @param jssc Java Spark Context * @param srcBasePath Base path of Hudi source table * @param numInstantsPerFetch Max Instants per fetch * @param beginInstant Last Checkpoint String * @param missingCheckpointStrategy when begin instant is missing, allow reading based on missing checkpoint strategy * @param handlingMode Hollow Commit Handling Mode * @param orderColumn Column to order by (used for size based incr source) * @param keyColumn Key column (used for size based incr source) * @param limitColumn Limit column (used for size based incr source) * @param sourceLimitBasedBatching When sourceLimit based batching is used, we need to fetch the current commit as well, * this flag is used to indicate that. * @param lastCheckpointKey Last checkpoint key (used in the upgrade code path) * @return begin and end instants along with query type and other information. */ public static QueryInfo generateQueryInfo(JavaSparkContext jssc, String srcBasePath, int numInstantsPerFetch, Option<String> beginInstant, MissingCheckpointStrategy missingCheckpointStrategy, HollowCommitHandling handlingMode, String orderColumn, String keyColumn, String limitColumn, boolean sourceLimitBasedBatching, Option<String> lastCheckpointKey) { ValidationUtils.checkArgument(numInstantsPerFetch > 0, "Make sure the config hoodie.streamer.source.hoodieincr.num_instants is set to a positive value"); HoodieTableMetaClient srcMetaClient = HoodieTableMetaClient.builder().setConf(jssc.hadoopConfiguration()).setBasePath(srcBasePath).setLoadActiveTimelineOnLoad(true).build(); HoodieTimeline completedCommitTimeline = srcMetaClient.getCommitsAndCompactionTimeline().filterCompletedInstants(); final HoodieTimeline activeCommitTimeline = handleHollowCommitIfNeeded(completedCommitTimeline, srcMetaClient, handlingMode); Function<HoodieInstant, String> timestampForLastInstant = instant -> handlingMode == HollowCommitHandling.USE_TRANSITION_TIME ? instant.getCompletionTime() : instant.getTimestamp(); String beginInstantTime = beginInstant.orElseGet(() -> { if (missingCheckpointStrategy != null) { if (missingCheckpointStrategy == MissingCheckpointStrategy.READ_LATEST) { Option<HoodieInstant> lastInstant = activeCommitTimeline.lastInstant(); return lastInstant.map(hoodieInstant -> getStrictlyLowerTimestamp(timestampForLastInstant.apply(hoodieInstant))).orElse(DEFAULT_BEGIN_TIMESTAMP); } else { return DEFAULT_BEGIN_TIMESTAMP; } } else { throw new IllegalArgumentException("Missing begin instant for incremental pull. For reading from latest " + "committed instant set hoodie.streamer.source.hoodieincr.missing.checkpoint.strategy to a valid value"); } }); // When `beginInstantTime` is present, `previousInstantTime` is set to the completed commit before `beginInstantTime` if that exists. // If there is no completed commit before `beginInstantTime`, e.g., `beginInstantTime` is the first commit in the active timeline, // `previousInstantTime` is set to `DEFAULT_BEGIN_TIMESTAMP`. String previousInstantTime = DEFAULT_BEGIN_TIMESTAMP; if (!beginInstantTime.equals(DEFAULT_BEGIN_TIMESTAMP)) { Option<HoodieInstant> previousInstant = activeCommitTimeline.findInstantBefore(beginInstantTime); if (previousInstant.isPresent()) { previousInstantTime = previousInstant.get().getTimestamp(); } else { // if begin instant time matches first entry in active timeline, we can set previous = beginInstantTime - 1 if (activeCommitTimeline.filterCompletedInstants().firstInstant().isPresent() && activeCommitTimeline.filterCompletedInstants().firstInstant().get().getTimestamp().equals(beginInstantTime)) { previousInstantTime = String.valueOf(Long.parseLong(beginInstantTime) - 1); } } } if (missingCheckpointStrategy == MissingCheckpointStrategy.READ_LATEST || !activeCommitTimeline.isBeforeTimelineStarts(beginInstantTime)) { Option<HoodieInstant> nthInstant; // When we are in the upgrade code path from non-sourcelimit-based batching to sourcelimit-based batching, we need to avoid fetching the commit // that is read already. Else we will have duplicates in append-only use case if we use "findInstantsAfterOrEquals". // As soon as we have a new format of checkpoint and a key we will move to the new code of fetching the current commit as well. if (sourceLimitBasedBatching && lastCheckpointKey.isPresent()) { nthInstant = Option.fromJavaOptional(activeCommitTimeline .findInstantsAfterOrEquals(beginInstantTime, numInstantsPerFetch).getInstantsAsStream().reduce((x, y) -> y)); } else { nthInstant = Option.fromJavaOptional(activeCommitTimeline .findInstantsAfter(beginInstantTime, numInstantsPerFetch).getInstantsAsStream().reduce((x, y) -> y)); } return new QueryInfo(DataSourceReadOptions.QUERY_TYPE_INCREMENTAL_OPT_VAL(), previousInstantTime, beginInstantTime, nthInstant.map(HoodieInstant::getTimestamp).orElse(beginInstantTime), orderColumn, keyColumn, limitColumn); } else { // when MissingCheckpointStrategy is set to read everything until latest, trigger snapshot query. Option<HoodieInstant> lastInstant = activeCommitTimeline.lastInstant(); return new QueryInfo(DataSourceReadOptions.QUERY_TYPE_SNAPSHOT_OPT_VAL(), previousInstantTime, beginInstantTime, lastInstant.get().getTimestamp(), orderColumn, keyColumn, limitColumn); } }
3.68
hbase_SnapshotInfo_getSnapshotDescription
/** Returns the snapshot descriptor */ public SnapshotDescription getSnapshotDescription() { return ProtobufUtil.createSnapshotDesc(this.snapshot); }
3.68
hadoop_StartupProgress_getCounter
/** * Returns a counter associated with the specified phase and step. Typical * usage is to increment a counter within a tight loop. Callers may use this * method to obtain a counter once and then increment that instance repeatedly * within a loop. This prevents redundant lookup operations and object * creation within the tight loop. Incrementing the counter is an atomic * operation, so there is no risk of lost updates even if multiple threads * increment the same counter. * * @param phase Phase to get * @param step Step to get * @return Counter associated with phase and step */ public Counter getCounter(Phase phase, Step step) { if (!isComplete(phase)) { final StepTracking tracking = lazyInitStep(phase, step); return new Counter() { @Override public void increment() { tracking.count.incrementAndGet(); } }; } else { return new Counter() { @Override public void increment() { // no-op, because startup has completed } }; } }
3.68
dubbo_Invoker_invoke
// This method will never be called for a legacy invoker. @Override default org.apache.dubbo.rpc.Result invoke(org.apache.dubbo.rpc.Invocation invocation) throws org.apache.dubbo.rpc.RpcException { return null; }
3.68
dubbo_Server_start
/** * start server, bind port */ public void start() throws Throwable { if (!started.compareAndSet(false, true)) { return; } boss = new NioEventLoopGroup(1, new DefaultThreadFactory("qos-boss", true)); worker = new NioEventLoopGroup(0, new DefaultThreadFactory("qos-worker", true)); ServerBootstrap serverBootstrap = new ServerBootstrap(); serverBootstrap.group(boss, worker); serverBootstrap.channel(NioServerSocketChannel.class); serverBootstrap.option(ChannelOption.SO_REUSEADDR, true); serverBootstrap.childOption(ChannelOption.TCP_NODELAY, true); serverBootstrap.childHandler(new ChannelInitializer<Channel>() { @Override protected void initChannel(Channel ch) throws Exception { ch.pipeline() .addLast(new QosProcessHandler( frameworkModel, QosConfiguration.builder() .welcome(welcome) .acceptForeignIp(acceptForeignIp) .acceptForeignIpWhitelist(acceptForeignIpWhitelist) .anonymousAccessPermissionLevel(anonymousAccessPermissionLevel) .anonymousAllowCommands(anonymousAllowCommands) .build())); } }); try { if (StringUtils.isBlank(host)) { serverBootstrap.bind(port).sync(); } else { serverBootstrap.bind(host, port).sync(); } logger.info("qos-server bind localhost:" + port); } catch (Throwable throwable) { throw new QosBindException("qos-server can not bind localhost:" + port, throwable); } }
3.68
framework_TableQuery_writeObject
/** * Custom writeObject to call rollback() if object is serialized. */ private void writeObject(java.io.ObjectOutputStream out) throws IOException { try { rollback(); } catch (SQLException ignored) { } out.defaultWriteObject(); }
3.68
flink_EmbeddedRocksDBStateBackend_getDbStoragePaths
/** * Gets the configured local DB storage paths, or null, if none were configured. * * <p>Under these directories on the TaskManager, RocksDB stores its SST files and metadata * files. These directories do not need to be persistent, they can be ephermeral, meaning that * they are lost on a machine failure, because state in RocksDB is persisted in checkpoints. * * <p>If nothing is configured, these directories default to the TaskManager's local temporary * file directories. */ public String[] getDbStoragePaths() { if (localRocksDbDirectories == null) { return null; } else { String[] paths = new String[localRocksDbDirectories.length]; for (int i = 0; i < paths.length; i++) { paths[i] = localRocksDbDirectories[i].toString(); } return paths; } }
3.68
flink_SolutionSetNode_getOperator
/** * Gets the contract object for this data source node. * * @return The contract. */ @Override public SolutionSetPlaceHolder<?> getOperator() { return (SolutionSetPlaceHolder<?>) super.getOperator(); }
3.68
flink_ResultPartitionType_isHybridResultPartition
/** * {@link #isHybridResultPartition()} is used to judge whether it is the specified {@link * #HYBRID_FULL} or {@link #HYBRID_SELECTIVE} resultPartitionType. * * <p>this method suitable for judgment conditions related to the specific implementation of * {@link ResultPartitionType}. * * <p>this method not related to data consumption and partition release. As for the logic * related to partition release, use {@link #isReleaseByScheduler()} instead, and as consume * type, use {@link #mustBePipelinedConsumed()} or {@link #canBePipelinedConsumed()} instead. */ public boolean isHybridResultPartition() { return this == HYBRID_FULL || this == HYBRID_SELECTIVE; }
3.68
flink_HiveParserTypeCheckProcFactory_getIntervalExprProcessor
/** Factory method to get IntervalExprProcessor. */ public HiveParserTypeCheckProcFactory.IntervalExprProcessor getIntervalExprProcessor() { return new HiveParserTypeCheckProcFactory.IntervalExprProcessor(); }
3.68
morf_SqlServerDialect_dropPrimaryKey
/** * Drops the primary key from a {@link Table}. * * @param table The table to drop the primary key from */ private String dropPrimaryKey(Table table) { StringBuilder dropPkStatement = new StringBuilder(); dropPkStatement.append("ALTER TABLE ").append(schemaNamePrefix()).append(table.getName()).append(" DROP "); dropPkStatement.append("CONSTRAINT ["); dropPkStatement.append(undecorateName(table.getName())); dropPkStatement.append("_PK]"); return dropPkStatement.toString(); }
3.68
flink_SourceTestSuiteBase_testTaskManagerFailure
/** * Test connector source with task manager failover. * * <p>This test will create 1 split in the external system, write test record set A into the * split, restart task manager to trigger job failover, write test record set B into the split, * and terminate the Flink job finally. * * <p>The number and order of records consumed by Flink should be identical to A before the * failover and B after the failover in order to pass the test. * * <p>An unbounded source is required for this test, since TaskManager failover will be * triggered in the middle of the test. */ @TestTemplate @DisplayName("Test TaskManager failure") public void testTaskManagerFailure( TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, ClusterControllable controller, CheckpointingMode semantic) throws Exception { // Step 1: Preparation TestingSourceSettings sourceSettings = TestingSourceSettings.builder() .setBoundedness(Boundedness.CONTINUOUS_UNBOUNDED) .setCheckpointingMode(semantic) .build(); TestEnvironmentSettings envOptions = TestEnvironmentSettings.builder() .setConnectorJarPaths(externalContext.getConnectorJarPaths()) .build(); Source<T, ?, ?> source = tryCreateSource(externalContext, sourceSettings); // Step 2: Write test data to external system int splitIndex = 0; List<T> testRecordsBeforeFailure = externalContext.generateTestData( sourceSettings, splitIndex, ThreadLocalRandom.current().nextLong()); ExternalSystemSplitDataWriter<T> externalSystemSplitDataWriter = externalContext.createSourceSplitDataWriter(sourceSettings); LOG.info( "Writing {} records for split {} to external system", testRecordsBeforeFailure.size(), splitIndex); externalSystemSplitDataWriter.writeRecords(testRecordsBeforeFailure); // Step 3: Build and execute Flink job StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(envOptions); execEnv.enableCheckpointing(50); DataStreamSource<T> stream = execEnv.fromSource(source, WatermarkStrategy.noWatermarks(), "Tested Source") .setParallelism(1); CollectIteratorBuilder<T> iteratorBuilder = addCollectSink(stream); JobClient jobClient = submitJob(execEnv, "TaskManager Failover Test"); // Step 4: Validate records before killing TaskManagers CloseableIterator<T> iterator = iteratorBuilder.build(jobClient); LOG.info("Checking records before killing TaskManagers"); checkResultWithSemantic( iterator, singletonList(testRecordsBeforeFailure), semantic, testRecordsBeforeFailure.size()); // Step 5: Trigger TaskManager failover LOG.info("Trigger TaskManager failover"); controller.triggerTaskManagerFailover(jobClient, () -> {}); LOG.info("Waiting for job recovering from failure"); waitForJobStatus(jobClient, singletonList(JobStatus.RUNNING)); // Step 6: Write test data again to external system List<T> testRecordsAfterFailure = externalContext.generateTestData( sourceSettings, splitIndex, ThreadLocalRandom.current().nextLong()); LOG.info( "Writing {} records for split {} to external system", testRecordsAfterFailure.size(), splitIndex); externalSystemSplitDataWriter.writeRecords(testRecordsAfterFailure); // Step 7: Validate test result LOG.info("Checking records after job failover"); checkResultWithSemantic( iterator, singletonList(testRecordsAfterFailure), semantic, testRecordsAfterFailure.size()); // Step 8: Clean up terminateJob(jobClient); waitForJobStatus(jobClient, singletonList(JobStatus.CANCELED)); iterator.close(); }
3.68
hadoop_TypedBytesOutput_writeFloat
/** * Writes a float as a typed bytes sequence. * * @param f the float to be written * @throws IOException */ public void writeFloat(float f) throws IOException { out.write(Type.FLOAT.code); out.writeFloat(f); }
3.68
pulsar_AuthenticationProviderToken_getTokenAudienceClaim
// get Token Audience Claim from configuration, if not configured return null. private String getTokenAudienceClaim(ServiceConfiguration conf) throws IllegalArgumentException { String tokenAudienceClaim = (String) conf.getProperty(confTokenAudienceClaimSettingName); if (StringUtils.isNotBlank(tokenAudienceClaim)) { return tokenAudienceClaim; } else { return null; } }
3.68
hbase_KeyValueHeap_requestSeek
/** * {@inheritDoc} */ @Override public boolean requestSeek(Cell key, boolean forward, boolean useBloom) throws IOException { return generalizedSeek(true, key, forward, useBloom); }
3.68
hudi_TableHeader_addTableHeaderField
/** * Add a field (column) to table. * * @param fieldName field Name */ public TableHeader addTableHeaderField(String fieldName) { fieldNames.add(fieldName); return this; }
3.68
flink_PluginLoader_load
/** * Returns in iterator over all available implementations of the given service interface (SPI) * for the plugin. * * @param service the service interface (SPI) for which implementations are requested. * @param <P> Type of the requested plugin service. * @return An iterator of all implementations of the given service interface that could be * loaded from the plugin. */ public <P> Iterator<P> load(Class<P> service) { try (TemporaryClassLoaderContext ignored = TemporaryClassLoaderContext.of(pluginClassLoader)) { return new ContextClassLoaderSettingIterator<>( ServiceLoader.load(service, pluginClassLoader).iterator(), pluginClassLoader); } }
3.68
flink_SegmentsUtil_setFloat
/** * set float from segments. * * @param segments target segments. * @param offset value offset. */ public static void setFloat(MemorySegment[] segments, int offset, float value) { if (inFirstSegment(segments, offset, 4)) { segments[0].putFloat(offset, value); } else { setFloatMultiSegments(segments, offset, value); } }
3.68
framework_Escalator_scrollToRow
/** * Scrolls the body vertically so that the row at the given index is visible * and there is at least {@literal padding} pixels to the given scroll * destination. * * @param rowIndex * the index of the logical row to scroll to * @param destination * where the row should be aligned visually after scrolling * @param padding * the number pixels to place between the scrolled-to row and the * viewport edge. * @throws IndexOutOfBoundsException * if {@code rowIndex} is not a valid index for an existing row * @throws IllegalArgumentException * if {@code destination} is {@link ScrollDestination#MIDDLE} * and padding is nonzero; or if {@code destination == null} * @see #scrollToRowAndSpacer(int, ScrollDestination, int) * @see #scrollToSpacer(int, ScrollDestination, int) */ public void scrollToRow(final int rowIndex, final ScrollDestination destination, final int padding) throws IndexOutOfBoundsException, IllegalArgumentException { Scheduler.get().scheduleFinally(new ScheduledCommand() { @Override public void execute() { validateScrollDestination(destination, padding); verifyValidRowIndex(rowIndex); scroller.scrollToRow(rowIndex, destination, padding); } }); }
3.68
morf_ResultSetMismatch_getKey
/** * @return key identifying the mismatch. */ public String[] getKey() { return Arrays.copyOf(key, key.length); }
3.68
hadoop_RegistryTypeUtils_urlEndpoint
/** * Create a URL endpoint from a list of URIs * @param api implemented API * @param protocolType protocol type * @param uris URIs * @return a new endpoint */ public static Endpoint urlEndpoint(String api, String protocolType, URI... uris) { return new Endpoint(api, protocolType, uris); }
3.68
hadoop_IOStatisticsLogging_ioStatisticsToPrettyString
/** * Convert IOStatistics to a string form, with all the metrics sorted * and empty value stripped. * This is more expensive than the simple conversion, so should only * be used for logging/output where it's known/highly likely that the * caller wants to see the values. Not for debug logging. * @param statistics A statistics instance. * @return string value or the empty string if null */ public static String ioStatisticsToPrettyString( @Nullable final IOStatistics statistics) { if (statistics != null) { StringBuilder sb = new StringBuilder(); mapToSortedString(sb, "counters", statistics.counters(), p -> p == 0); mapToSortedString(sb, "\ngauges", statistics.gauges(), p -> p == 0); mapToSortedString(sb, "\nminimums", statistics.minimums(), p -> p < 0); mapToSortedString(sb, "\nmaximums", statistics.maximums(), p -> p < 0); mapToSortedString(sb, "\nmeans", statistics.meanStatistics(), MeanStatistic::isEmpty); return sb.toString(); } else { return ""; } }
3.68
zxing_BitMatrix_getRow
/** * A fast method to retrieve one row of data from the matrix as a BitArray. * * @param y The row to retrieve * @param row An optional caller-allocated BitArray, will be allocated if null or too small * @return The resulting BitArray - this reference should always be used even when passing * your own row */ public BitArray getRow(int y, BitArray row) { if (row == null || row.getSize() < width) { row = new BitArray(width); } else { row.clear(); } int offset = y * rowSize; for (int x = 0; x < rowSize; x++) { row.setBulk(x * 32, bits[offset + x]); } return row; }
3.68
dubbo_ScriptStateRouter_getEngine
/** * create ScriptEngine instance by type from url parameters, then cache it */ private ScriptEngine getEngine(URL url) { String type = url.getParameter(TYPE_KEY, DEFAULT_SCRIPT_TYPE_KEY); return ConcurrentHashMapUtils.computeIfAbsent(ENGINES, type, t -> { ScriptEngine scriptEngine = new ScriptEngineManager().getEngineByName(type); if (scriptEngine == null) { throw new IllegalStateException("unsupported route engine type: " + type); } return scriptEngine; }); }
3.68
flink_SharedObjectsExtension_create
/** * Creates a new instance. Usually that should be done inside a JUnit test class as an * instance-field annotated with {@link org.junit.Rule}. */ public static SharedObjectsExtension create() { return new SharedObjectsExtension(LAST_ID.getAndIncrement()); }
3.68
hbase_ReflectionUtils_invokeMethod
/** * Get and invoke the target method from the given object with given parameters * @param obj the object to get and invoke method from * @param methodName the name of the method to invoke * @param params the parameters for the method to invoke * @return the return value of the method invocation */ @NonNull public static Object invokeMethod(Object obj, String methodName, Object... params) { Method m; try { m = obj.getClass().getMethod(methodName, getParameterTypes(params)); m.setAccessible(true); return m.invoke(obj, params); } catch (NoSuchMethodException e) { throw new UnsupportedOperationException("Cannot find specified method " + methodName, e); } catch (IllegalAccessException e) { throw new UnsupportedOperationException("Unable to access specified method " + methodName, e); } catch (IllegalArgumentException e) { throw new UnsupportedOperationException("Illegal arguments supplied for method " + methodName, e); } catch (InvocationTargetException e) { throw new UnsupportedOperationException("Method threw an exception for " + methodName, e); } }
3.68
framework_VTooltip_connectHandlersToWidget
/** * Connects DOM handlers to widget that are needed for tooltip presentation. * * @param widget * Widget which DOM handlers are connected */ public void connectHandlersToWidget(Widget widget) { Profiler.enter("VTooltip.connectHandlersToWidget"); widget.addDomHandler(tooltipEventHandler, MouseOutEvent.getType()); widget.addDomHandler(tooltipEventHandler, MouseMoveEvent.getType()); widget.addDomHandler(tooltipEventHandler, MouseDownEvent.getType()); widget.addDomHandler(tooltipEventHandler, KeyDownEvent.getType()); widget.addDomHandler(tooltipEventHandler, FocusEvent.getType()); widget.addDomHandler(tooltipEventHandler, BlurEvent.getType()); Profiler.leave("VTooltip.connectHandlersToWidget"); }
3.68
hadoop_JavaCommandLineBuilder_addConfOptionToCLI
/** * Ass a configuration option to the command line of the application * @param conf configuration * @param key key * @param defVal default value * @return the resolved configuration option * @throws IllegalArgumentException if key is null or the looked up value * is null (that is: the argument is missing and devVal was null. */ public String addConfOptionToCLI(Configuration conf, String key, String defVal) { Preconditions.checkArgument(key != null, "null key"); String val = conf.get(key, defVal); define(key, val); return val; }
3.68
open-banking-gateway_AuthorizationPossibleErrorHandler_handlePossibleAuthorizationError
/** * Swallows retryable (like wrong password) authorization exceptions. * @param tryAuthorize Authorization function to call * @param onFail Fallback function to call if retryable exception occurred. */ public void handlePossibleAuthorizationError(Runnable tryAuthorize, Consumer<ErrorResponseException> onFail) { try { tryAuthorize.run(); } catch (ErrorResponseException ex) { rethrowIfNotAuthorizationErrorCode(ex); onFail.accept(ex); } }
3.68
hbase_PermissionStorage_isAclTable
/** * Returns {@code true} if the given table is {@code _acl_} metadata table. */ static boolean isAclTable(TableDescriptor desc) { return ACL_TABLE_NAME.equals(desc.getTableName()); }
3.68
morf_DataSourceAdapter_unwrap
/** * @see java.sql.Wrapper#unwrap(java.lang.Class) */ @Override public <T> T unwrap(Class<T> iface) throws SQLException { throw new UnsupportedOperationException("Wrappers not supported"); }
3.68
hudi_BaseHoodieTableServiceClient_scheduleCompactionAtInstant
/** * Schedules a new compaction instant with passed-in instant time. * * @param instantTime Compaction Instant Time * @param extraMetadata Extra Metadata to be stored */ public boolean scheduleCompactionAtInstant(String instantTime, Option<Map<String, String>> extraMetadata) throws HoodieIOException { return scheduleTableService(instantTime, extraMetadata, TableServiceType.COMPACT).isPresent(); }
3.68
hibernate-validator_SizeValidatorForArraysOfDouble_isValid
/** * Checks the number of entries in an array. * * @param array The array to validate. * @param constraintValidatorContext context in which the constraint is evaluated. * * @return Returns {@code true} if the array is {@code null} or the number of entries in * {@code array} is between the specified {@code min} and {@code max} values (inclusive), * {@code false} otherwise. */ @Override public boolean isValid(double[] array, ConstraintValidatorContext constraintValidatorContext) { if ( array == null ) { return true; } return array.length >= min && array.length <= max; }
3.68
flink_TableConfig_getPlannerConfig
/** Returns the current configuration of Planner for Table API and SQL queries. */ public PlannerConfig getPlannerConfig() { return plannerConfig; }
3.68
hbase_MasterObserver_postTableFlush
/** * Called after the table memstore is flushed to disk. * @param ctx the environment to interact with the framework and master * @param tableName the name of the table */ default void postTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx, final TableName tableName) throws IOException { }
3.68
hbase_CompactingMemStore_setCompositeSnapshot
// the following three methods allow to manipulate the settings of composite snapshot public void setCompositeSnapshot(boolean useCompositeSnapshot) { this.compositeSnapshot = useCompositeSnapshot; }
3.68
flink_OperatingSystem_readOSFromSystemProperties
/** * Parses the operating system that the JVM runs on from the java system properties. If the * operating system was not successfully determined, this method returns {@code UNKNOWN}. * * @return The enum constant for the operating system, or {@code UNKNOWN}, if it was not * possible to determine. */ private static OperatingSystem readOSFromSystemProperties() { String osName = System.getProperty(OS_KEY); if (osName.startsWith(LINUX_OS_PREFIX)) { return LINUX; } if (osName.startsWith(WINDOWS_OS_PREFIX)) { return WINDOWS; } if (osName.startsWith(MAC_OS_PREFIX)) { return MAC_OS; } if (osName.startsWith(FREEBSD_OS_PREFIX)) { return FREE_BSD; } String osNameLowerCase = osName.toLowerCase(); if (osNameLowerCase.contains(SOLARIS_OS_INFIX_1) || osNameLowerCase.contains(SOLARIS_OS_INFIX_2)) { return SOLARIS; } return UNKNOWN; }
3.68
framework_VScrollTable_resizeCaptionContainer
/** * Makes room for the sorting indicator in case the column that the * header cell belongs to is sorted. This is done by resizing the width * of the caption container element by the correct amount */ public void resizeCaptionContainer(int rightSpacing) { int captionContainerWidth = width - colResizeWidget.getOffsetWidth() - rightSpacing; if (td.getClassName().contains("-asc") || td.getClassName().contains("-desc")) { // Leave room for the sort indicator captionContainerWidth -= sortIndicator.getOffsetWidth(); } if (captionContainerWidth < 0) { rightSpacing += captionContainerWidth; captionContainerWidth = 0; } captionContainer.getStyle().setPropertyPx("width", captionContainerWidth); // Apply/Remove spacing if defined if (rightSpacing > 0) { colResizeWidget.getStyle().setMarginLeft(rightSpacing, Unit.PX); } else { colResizeWidget.getStyle().clearMarginLeft(); } }
3.68
flink_VertexInputInfoComputationUtils_computeVertexInputInfoForAllToAll
/** * Compute the {@link JobVertexInputInfo} for a {@link DistributionPattern#ALL_TO_ALL} edge. * This computation algorithm will evenly distribute subpartitions to downstream subtasks * according to the number of subpartitions. Different downstream subtasks consume roughly the * same number of subpartitions. * * @param sourceCount the parallelism of upstream * @param targetCount the parallelism of downstream * @param numOfSubpartitionsRetriever a retriever to get the number of subpartitions * @param isDynamicGraph whether is dynamic graph * @param isBroadcast whether the edge is broadcast * @return the computed {@link JobVertexInputInfo} */ static JobVertexInputInfo computeVertexInputInfoForAllToAll( int sourceCount, int targetCount, Function<Integer, Integer> numOfSubpartitionsRetriever, boolean isDynamicGraph, boolean isBroadcast) { final List<ExecutionVertexInputInfo> executionVertexInputInfos = new ArrayList<>(); IndexRange partitionRange = new IndexRange(0, sourceCount - 1); for (int i = 0; i < targetCount; ++i) { IndexRange subpartitionRange = computeConsumedSubpartitionRange( i, targetCount, () -> numOfSubpartitionsRetriever.apply(0), isDynamicGraph, isBroadcast); executionVertexInputInfos.add( new ExecutionVertexInputInfo(i, partitionRange, subpartitionRange)); } return new JobVertexInputInfo(executionVertexInputInfos); }
3.68
hadoop_TFile_getValueLength
/** * Get the length of the value. isValueLengthKnown() must be tested * true. * * @return the length of the value. */ public int getValueLength() { if (vlen >= 0) { return vlen; } throw new RuntimeException("Value length unknown."); }
3.68
framework_GridLayout_isHideEmptyRowsAndColumns
/** * Checks whether whether empty rows and columns should be considered as * non-existent when rendering or not. * * @see #setHideEmptyRowsAndColumns(boolean) * @since 7.3 * @return true if empty rows and columns are hidden, false otherwise */ public boolean isHideEmptyRowsAndColumns() { return getState(false).hideEmptyRowsAndColumns; }
3.68
hadoop_SolverPreprocessor_getDiscreteSkyline
/** * Discretize job's lifespan into intervals, and return the number of * containers used by the job within each interval. * <p> Note that here we assume all containers allocated to the job have the * same {@link Resource}. This is due to the limit of * {@link RLESparseResourceAllocation}. * * @param skyList the list of {@link Resource}s used by the job. * @param timeInterval the time interval used to discretize the job's * lifespan. * @param containerMemAlloc the amount of memory allocated to each container. * @param jobLen the duration of the job. * @return the number of containers allocated to the job within discretized * time intervals. */ public final int[] getDiscreteSkyline( final RLESparseResourceAllocation skyList, final int timeInterval, final long containerMemAlloc, final int jobLen) { long jobLifeSpan = skyList.getLatestNonNullTime() - skyList.getEarliestStartTime(); int[] result = new int[jobLen]; Arrays.fill(result, 0); int index = 0; long numContainerAt = 0; for (int i = 0; i < jobLifeSpan; i++) { index = (int) Math.floor((double) i / timeInterval); numContainerAt = getResourceVector(skyList, i, containerMemAlloc); if (result[index] < numContainerAt) { result[index] = (int) numContainerAt; } } return result; }
3.68
framework_AbstractExtension_extend
/** * Add this extension to the target connector. This method is protected to * allow subclasses to require a more specific type of target. * * @param target * the connector to attach this extension to */ protected void extend(AbstractClientConnector target) { target.addExtension(this); }
3.68
hudi_BootstrapExecutorUtils_execute
/** * Executes Bootstrap. */ public void execute() throws IOException { initializeTable(); try (SparkRDDWriteClient bootstrapClient = new SparkRDDWriteClient(new HoodieSparkEngineContext(jssc), bootstrapConfig)) { HashMap<String, String> checkpointCommitMetadata = new HashMap<>(); checkpointCommitMetadata.put(CHECKPOINT_KEY, Config.checkpoint); bootstrapClient.bootstrap(Option.of(checkpointCommitMetadata)); syncHive(); } }
3.68
hbase_SnapshotManifest_open
/** * Return a SnapshotManifest instance with the information already loaded in-memory. * SnapshotManifest manifest = SnapshotManifest.open(...) TableDescriptor htd = * manifest.getDescriptor() for (SnapshotRegionManifest regionManifest: * manifest.getRegionManifests()) hri = regionManifest.getRegionInfo() for * (regionManifest.getFamilyFiles()) ... */ public static SnapshotManifest open(final Configuration conf, final FileSystem fs, final Path workingDir, final SnapshotDescription desc) throws IOException { SnapshotManifest manifest = new SnapshotManifest(conf, fs, workingDir, desc, null, null); manifest.load(); return manifest; }
3.68
framework_Escalator_isScrollLocked
/** * Checks whether or not an direction is locked for scrolling. * * @param direction * the direction of the scroll of which to check the lock status * @return <code>true</code> if the direction is locked */ public boolean isScrollLocked(ScrollbarBundle.Direction direction) { switch (direction) { case HORIZONTAL: return horizontalScrollbar.isLocked(); case VERTICAL: return verticalScrollbar.isLocked(); default: throw new UnsupportedOperationException( "Unexpected value: " + direction); } }
3.68
hadoop_MappingRuleActionBase_setFallbackSkip
/** * Sets the fallback method to skip, if the action cannot be executed * We move onto the next rule, ignoring this one. * @return MappingRuleAction The same object for method chaining. */ public MappingRuleAction setFallbackSkip() { fallback = MappingRuleResult.createSkipResult(); return this; }
3.68
flink_SkipListUtils_getKeyLen
/** * Returns the length of the key. * * @param memorySegment memory segment for key space. * @param offset offset of key space in the memory segment. */ public static int getKeyLen(MemorySegment memorySegment, int offset) { return memorySegment.getInt(offset + KEY_LEN_OFFSET); }
3.68
zxing_BinaryBitmap_crop
/** * Returns a new object with cropped image data. Implementations may keep a reference to the * original data rather than a copy. Only callable if isCropSupported() is true. * * @param left The left coordinate, which must be in [0,getWidth()) * @param top The top coordinate, which must be in [0,getHeight()) * @param width The width of the rectangle to crop. * @param height The height of the rectangle to crop. * @return A cropped version of this object. */ public BinaryBitmap crop(int left, int top, int width, int height) { LuminanceSource newSource = binarizer.getLuminanceSource().crop(left, top, width, height); return new BinaryBitmap(binarizer.createBinarizer(newSource)); }
3.68
framework_ComboBox_setTextInputAllowed
/** * Sets whether it is possible to input text into the field or whether the * field area of the component is just used to show what is selected. By * disabling text input, the comboBox will work in the same way as a * {@link NativeSelect} * * @see #isTextInputAllowed() * * @param textInputAllowed * true to allow entering text, false to just show the current * selection */ public void setTextInputAllowed(boolean textInputAllowed) { this.textInputAllowed = textInputAllowed; markAsDirty(); }
3.68
flink_FlinkContainersSettings_getHaStoragePath
/** * Gets HA storage path. * * @return The ha storage path. */ public String getHaStoragePath() { return haStoragePath; }
3.68
flink_StreamExecutionEnvironment_fromParallelCollection
// private helper for passing different names private <OUT> DataStreamSource<OUT> fromParallelCollection( SplittableIterator<OUT> iterator, TypeInformation<OUT> typeInfo, String operatorName) { return addSource( new FromSplittableIteratorFunction<>(iterator), operatorName, typeInfo, Boundedness.BOUNDED); }
3.68
graphhopper_Entity_checkRangeInclusive
/** @return whether the number actual is in the range [min, max] */ protected boolean checkRangeInclusive(double min, double max, double actual) { if (actual < min || actual > max) { feed.errors.add(new RangeError(tableName, row, null, min, max, actual)); // TODO set column name in loader so it's available in methods return false; } return true; }
3.68
flink_KeyMap_getCurrentTableCapacity
/** * Gets the current table capacity, i.e., the number of slots in the hash table, without and * overflow chaining. * * @return The number of slots in the hash table. */ public int getCurrentTableCapacity() { return table.length; }
3.68
hudi_HoodieFlinkTableServiceClient_initMetadataWriter
/** * Initialize the table metadata writer, for e.g, bootstrap the metadata table * from the filesystem if it does not exist. */ private HoodieBackedTableMetadataWriter initMetadataWriter(Option<String> latestPendingInstant) { return (HoodieBackedTableMetadataWriter) FlinkHoodieBackedTableMetadataWriter.create( FlinkClientUtil.getHadoopConf(), this.config, HoodieFlinkEngineContext.DEFAULT, latestPendingInstant); }
3.68
flink_Channel_setSerializer
/** * Sets the serializer for this Channel. * * @param serializer The serializer to set. */ public void setSerializer(TypeSerializerFactory<?> serializer) { this.serializer = serializer; }
3.68
pulsar_ClientCnx_addPendingLookupRequests
// caller of this method needs to be protected under pendingLookupRequestSemaphore private void addPendingLookupRequests(long requestId, TimedCompletableFuture<LookupDataResult> future) { pendingRequests.put(requestId, future); requestTimeoutQueue.add(new RequestTime(requestId, RequestType.Lookup)); }
3.68
dubbo_MetadataService_getServiceDefinition
/** * Interface definition. * * @return */ default String getServiceDefinition(String interfaceName, String version, String group) { return getServiceDefinition(buildKey(interfaceName, group, version)); }
3.68
framework_VTabsheet_sendTabClosedEvent
/** * Informs the server that closing of a tab has been requested. * * @param tabIndex * the index of the closed to close */ void sendTabClosedEvent(int tabIndex) { getRpcProxy().closeTab(tabKeys.get(tabIndex)); }
3.68
hbase_SimpleRpcServer_bind
/** * A convenience method to bind to a given address and report better exceptions if the address is * not a valid host. * @param socket the socket to bind * @param address the address to bind to * @param backlog the number of connections allowed in the queue * @throws BindException if the address can't be bound * @throws UnknownHostException if the address isn't a valid host name * @throws IOException other random errors from bind */ public static void bind(ServerSocket socket, InetSocketAddress address, int backlog) throws IOException { try { socket.bind(address, backlog); } catch (BindException e) { BindException bindException = new BindException("Problem binding to " + address + " : " + e.getMessage()); bindException.initCause(e); throw bindException; } catch (SocketException e) { // If they try to bind to a different host's address, give a better // error message. if ("Unresolved address".equals(e.getMessage())) { throw new UnknownHostException("Invalid hostname for server: " + address.getHostName()); } throw e; } }
3.68
hbase_StructBuilder_toStruct
/** * Retrieve the {@link Struct} represented by {@code this}. */ public Struct toStruct() { return new Struct(fields.toArray(new DataType<?>[fields.size()])); }
3.68
hbase_MasterProcedureUtil_submitProcedure
/** * Helper used to deal with submitting procs with nonce. Internally the * NonceProcedureRunnable.run() will be called only if no one else registered the nonce. any * Exception thrown by the run() method will be collected/handled and rethrown. <code> * long procId = MasterProcedureUtil.submitProcedure( * new NonceProcedureRunnable(procExec, nonceGroup, nonce) { * {@literal @}Override * public void run() { * cpHost.preOperation(); * submitProcedure(new MyProc()); * cpHost.postOperation(); * } * }); * </code> */ public static long submitProcedure(final NonceProcedureRunnable runnable) throws IOException { final ProcedureExecutor<MasterProcedureEnv> procExec = runnable.getProcedureExecutor(); final long procId = procExec.registerNonce(runnable.getNonceKey()); if (procId >= 0) return procId; // someone already registered the nonce try { runnable.run(); } catch (IOException e) { procExec.setFailureResultForNonce(runnable.getNonceKey(), runnable.getDescription(), procExec.getEnvironment().getRequestUser(), e); throw e; } finally { procExec.unregisterNonceIfProcedureWasNotSubmitted(runnable.getNonceKey()); } return runnable.getProcId(); }
3.68
hadoop_S3ClientFactory_withRequesterPays
/** * Set requester pays option. * @param value new value * @return the builder */ public S3ClientCreationParameters withRequesterPays( final boolean value) { requesterPays = value; return this; }
3.68
hbase_MemStoreLABImpl_getOrMakeChunk
/** * Get the current chunk, or, if there is no current chunk, allocate a new one from the JVM. */ private Chunk getOrMakeChunk() { // Try to get the chunk Chunk c = currChunk.get(); if (c != null) { return c; } // No current chunk, so we want to allocate one. We race // against other allocators to CAS in an uninitialized chunk // (which is cheap to allocate) if (lock.tryLock()) { try { // once again check inside the lock c = currChunk.get(); if (c != null) { return c; } c = this.chunkCreator.getChunk(); if (c != null) { // set the curChunk. No need of CAS as only one thread will be here currChunk.set(c); chunks.add(c.getId()); return c; } } finally { lock.unlock(); } } return null; }
3.68
hadoop_AbfsClientThrottlingIntercept_getReadThrottler
/** * Returns the analyzer for read operations. * @return AbfsClientThrottlingAnalyzer for read. */ AbfsClientThrottlingAnalyzer getReadThrottler() { return readThrottler; }
3.68
hbase_MultithreadedTableMapper_run
/** * Run the application's maps using a thread pool. */ @Override public void run(Context context) throws IOException, InterruptedException { outer = context; int numberOfThreads = getNumberOfThreads(context); mapClass = getMapperClass(context); if (LOG.isDebugEnabled()) { LOG.debug("Configuring multithread runner to use " + numberOfThreads + " threads"); } executor = Executors.newFixedThreadPool(numberOfThreads); for (int i = 0; i < numberOfThreads; ++i) { MapRunner thread = new MapRunner(context); executor.execute(thread); } executor.shutdown(); while (!executor.isTerminated()) { // wait till all the threads are done Thread.sleep(1000); } }
3.68
hadoop_FilterFileSystem_getDefaultBlockSize
// path variants delegate to underlying filesystem @Override public long getDefaultBlockSize(Path f) { return fs.getDefaultBlockSize(f); }
3.68
hbase_TableMapReduceUtil_getConfiguredInputFormat
/** * @return {@link TableInputFormat} .class unless Configuration has something else at * {@link #TABLE_INPUT_CLASS_KEY}. */ private static Class<? extends InputFormat> getConfiguredInputFormat(Job job) { return (Class<? extends InputFormat>) job.getConfiguration().getClass(TABLE_INPUT_CLASS_KEY, TableInputFormat.class); }
3.68
flink_ExecutionEnvironment_resetContextEnvironment
/** * Un-sets the context environment factory. After this method is called, the call to {@link * #getExecutionEnvironment()} will again return a default local execution environment, and it * is possible to explicitly instantiate the LocalEnvironment and the RemoteEnvironment. */ protected static void resetContextEnvironment() { contextEnvironmentFactory = null; threadLocalContextEnvironmentFactory.remove(); }
3.68
flink_ChangelogTruncateHelper_checkpoint
/** * Set the highest {@link SequenceNumber} of changelog used by the given checkpoint. * * @param lastUploadedTo exclusive */ public void checkpoint(long checkpointId, SequenceNumber lastUploadedTo) { checkpointedUpTo.put(checkpointId, lastUploadedTo); }
3.68
morf_OracleDialect_getSqlForAddMonths
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForAddMonths(org.alfasoftware.morf.sql.element.Function) */ @Override protected String getSqlForAddMonths(Function function) { return "ADD_MONTHS(" + getSqlFrom(function.getArguments().get(0)) + ", " + getSqlFrom(function.getArguments().get(1)) + ")"; }
3.68
morf_RemoveIndex_getIndexToBeRemoved
/** * @return The index to be removed. */ public Index getIndexToBeRemoved() { return indexToBeRemoved; }
3.68
hadoop_BalanceJob_removeAfterDone
/** * Automatically remove this job from the scheduler cache when the job is * done. */ public Builder removeAfterDone(boolean remove) { removeAfterDone = remove; return this; }
3.68
hbase_HRegionServer_getFavoredNodesForRegion
/** * Return the favored nodes for a region given its encoded name. Look at the comment around * {@link #regionFavoredNodesMap} on why we convert to InetSocketAddress[] here. * @param encodedRegionName the encoded region name. * @return array of favored locations */ @Override public InetSocketAddress[] getFavoredNodesForRegion(String encodedRegionName) { return Address.toSocketAddress(regionFavoredNodesMap.get(encodedRegionName)); }
3.68
graphhopper_Path_getWeight
/** * This weight will be updated during the algorithm. The initial value is maximum double. */ public double getWeight() { return weight; }
3.68
hadoop_JavaCommandLineBuilder_defineIfSet
/** * Add a <code>-D key=val</code> command to the CLI if <code>val</code> * is not null * @param key key * @param val value */ public boolean defineIfSet(String key, String val) { Preconditions.checkArgument(key != null, "null key"); if (val != null) { define(key, val); return true; } else { return false; } }
3.68
hadoop_Nfs3Constant_getValue
/** @return the int value representing the procedure. */ public int getValue() { return ordinal(); }
3.68
hadoop_CachingBlockManager_numReadErrors
/** * Number of errors encountered when reading. * * @return the number of errors encountered when reading. */ public int numReadErrors() { return numReadErrors.get(); }
3.68
hbase_ConfigurationUtil_setKeyValues
/** * Store a collection of Map.Entry's in conf, with each entry separated by ',' and key values * delimited by delimiter. * @param conf configuration to store the collection in * @param key overall key to store keyValues under * @param keyValues kvps to be stored under key in conf * @param delimiter character used to separate each kvp */ public static void setKeyValues(Configuration conf, String key, Collection<Map.Entry<String, String>> keyValues, char delimiter) { List<String> serializedKvps = Lists.newArrayList(); for (Map.Entry<String, String> kvp : keyValues) { serializedKvps.add(kvp.getKey() + delimiter + kvp.getValue()); } conf.setStrings(key, serializedKvps.toArray(new String[serializedKvps.size()])); }
3.68
flink_OutputFormatProvider_of
/** Helper method for creating a static provider with a provided sink parallelism. */ static OutputFormatProvider of(OutputFormat<RowData> outputFormat, Integer sinkParallelism) { return new OutputFormatProvider() { @Override public OutputFormat<RowData> createOutputFormat() { return outputFormat; } @Override public Optional<Integer> getParallelism() { return Optional.ofNullable(sinkParallelism); } }; }
3.68
framework_SerializerHelper_readClass
/** * Deserializes a class reference serialized by * {@link #writeClass(ObjectOutputStream, Class)}. Supports null class * references. * * @param in * {@code ObjectInputStream} to read from. * @return Class reference to the resolved class * @throws ClassNotFoundException * If the class could not be resolved. * @throws IOException * Rethrows IOExceptions from the ObjectInputStream */ public static Class<?> readClass(ObjectInputStream in) throws IOException, ClassNotFoundException { String className = (String) in.readObject(); if (className == null) { return null; } else { return resolveClass(className); } }
3.68
flink_DamBehavior_isMaterializing
/** * Checks whether this enumeration represents some form of materialization, either with a full * dam or without. * * @return True, if this enumeration constant represents a materializing behavior, false * otherwise. */ public boolean isMaterializing() { return this != PIPELINED; }
3.68
hbase_Response_setCode
/** * @param code the HTTP response code */ public void setCode(int code) { this.code = code; }
3.68
flink_HybridShuffleConfiguration_getBufferPoolSizeCheckIntervalMs
/** Check interval of buffer pool's size. */ public long getBufferPoolSizeCheckIntervalMs() { return bufferPoolSizeCheckIntervalMs; }
3.68
dubbo_StringUtils_join
/** * join string like javascript. * * @param array String array. * @param split split * @return String. */ public static String join(String[] array, String split) { if (ArrayUtils.isEmpty(array)) { return EMPTY_STRING; } StringBuilder sb = new StringBuilder(); for (int i = 0; i < array.length; i++) { if (i > 0) { sb.append(split); } sb.append(array[i]); } return sb.toString(); }
3.68
flink_MutableHashTable_getNewInMemoryPartition
/** * Returns a new inMemoryPartition object. This is required as a plug for * ReOpenableMutableHashTable. */ protected HashPartition<BT, PT> getNewInMemoryPartition(int number, int recursionLevel) { return new HashPartition<BT, PT>( this.buildSideSerializer, this.probeSideSerializer, number, recursionLevel, this.availableMemory.remove(this.availableMemory.size() - 1), this, this.segmentSize); }
3.68
morf_SqlUtils_clobLiteral
/** * Constructs a new ClobFieldLiteral from a given string. * @param value - the literal value to use. * @return ClobFieldLiteral */ public static ClobFieldLiteral clobLiteral(String value) { return new ClobFieldLiteral(value); }
3.68
flink_ResultPartitionType_mustBePipelinedConsumed
/** return if this partition's upstream and downstream must be scheduled in the same time. */ public boolean mustBePipelinedConsumed() { return consumingConstraint == ConsumingConstraint.MUST_BE_PIPELINED; }
3.68