name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_AbstractParameterTool_get
/** * Returns the String value for the given key. If the key does not exist it will return the * given default value. */ public String get(String key, String defaultValue) { addToDefaults(key, defaultValue); String value = get(key); if (value == null) { return defaultValue; } else { return value; } }
3.68
hadoop_SinglePendingCommit_getDate
/** * Timestamp as date; no expectation of parseability. * @return date string */ public String getDate() { return date; }
3.68
rocketmq-connect_WorkerConnector_initialize
/** * initialize connector */ public void initialize() { try { if (!isSourceConnector() && !isSinkConnector()) { throw new ConnectException("Connector implementations must be a subclass of either SourceConnector or SinkConnector"); } log.debug("{} Initializing connector {}", this, connector); connector.validate(keyValue); connector.init(context); } catch (Throwable t) { log.error("{} Error initializing connector", this, t); onFailure(t); } }
3.68
morf_AbstractSqlDialectTest_expectedHints5
/** * @return The expected SQL when no hint directive is used on the {@link InsertStatement}. */ private String expectedHints5() { return "INSERT INTO " + tableName("Foo") + " SELECT a, b FROM " + tableName("Foo_1"); }
3.68
dubbo_ReferenceCountedResource_retain
/** * Increments the reference count by 1. */ public final ReferenceCountedResource retain() { long oldCount = COUNTER_UPDATER.getAndIncrement(this); if (oldCount <= 0) { COUNTER_UPDATER.getAndDecrement(this); throw new AssertionError("This instance has been destroyed"); } return this; }
3.68
flink_PipelinedSubpartition_getBuffersInBacklogUnsafe
/** Gets the number of non-event buffers in this subpartition. */ @SuppressWarnings("FieldAccessNotGuarded") @Override public int getBuffersInBacklogUnsafe() { if (isBlocked || buffers.isEmpty()) { return 0; } if (flushRequested || isFinished || !checkNotNull(buffers.peekLast()).getBufferConsumer().isBuffer()) { return buffersInBacklog; } else { return Math.max(buffersInBacklog - 1, 0); } }
3.68
framework_SQLContainer_addContainerFilter
/** * {@inheritDoc} */ public void addContainerFilter(Object propertyId, String filterString, boolean ignoreCase, boolean onlyMatchPrefix) { if (propertyId == null || !propertyIds.contains(propertyId)) { return; } /* Generate Filter -object */ String likeStr = onlyMatchPrefix ? filterString + "%" : "%" + filterString + "%"; Like like = new Like(propertyId.toString(), likeStr); like.setCaseSensitive(!ignoreCase); filters.add(like); refresh(); }
3.68
graphhopper_CustomModelParser_injectStatements
/** * Injects the already parsed expressions (converted to BlockStatement) via Janino's DeepCopier to the provided * CompilationUnit cu (a class file). */ private static Java.CompilationUnit injectStatements(List<Java.BlockStatement> priorityStatements, List<Java.BlockStatement> speedStatements, Java.CompilationUnit cu) throws CompileException { cu = new DeepCopier() { boolean speedInjected = false; boolean priorityInjected = false; @Override public Java.MethodDeclarator copyMethodDeclarator(Java.MethodDeclarator subject) throws CompileException { if (subject.name.equals("getSpeed") && !speedStatements.isEmpty() && !speedInjected) { speedInjected = true; return injectStatements(subject, this, speedStatements); } else if (subject.name.equals("getPriority") && !priorityStatements.isEmpty() && !priorityInjected) { priorityInjected = true; return injectStatements(subject, this, priorityStatements); } else { return super.copyMethodDeclarator(subject); } } }.copyCompilationUnit(cu); return cu; }
3.68
flink_WrapJsonAggFunctionArgumentsRule_addProjections
/** * Adds (wrapped) projections for affected arguments of the aggregation. For duplicate * projection fields, we only wrap them once and record the conversion relationship in the map * valueIndicesAfterProjection. * * <p>Note that we cannot override any of the projections as a field may be used multiple times, * and in particular outside of the aggregation call. Therefore, we explicitly add the wrapped * projection as an additional one. */ private void addProjections( RelOptCluster cluster, RelBuilder relBuilder, List<Integer> affectedArgs, int inputCount, Map<Integer, Integer> valueIndicesAfterProjection) { final BridgingSqlFunction operandToStringOperator = BridgingSqlFunction.of(cluster, JSON_STRING); final List<RexNode> projects = new ArrayList<>(); for (Integer argIdx : affectedArgs) { valueIndicesAfterProjection.put(argIdx, inputCount + projects.size()); projects.add(relBuilder.call(operandToStringOperator, relBuilder.field(argIdx))); } relBuilder.projectPlus(projects); }
3.68
flink_ExternalResourceOptions_getAmountConfigOptionForResource
/** Generate the config option key for the amount of external resource with resource_name. */ public static String getAmountConfigOptionForResource(String resourceName) { return keyWithResourceNameAndSuffix(resourceName, EXTERNAL_RESOURCE_AMOUNT_SUFFIX); }
3.68
zxing_CameraManager_getFramingRect
/** * Calculates the framing rect which the UI should draw to show the user where to place the * barcode. This target helps with alignment as well as forces the user to hold the device * far enough away to ensure the image will be in focus. * * @return The rectangle to draw on screen in window coordinates. */ public synchronized Rect getFramingRect() { if (framingRect == null) { if (camera == null) { return null; } Point screenResolution = configManager.getScreenResolution(); if (screenResolution == null) { // Called early, before init even finished return null; } int width = findDesiredDimensionInRange(screenResolution.x, MIN_FRAME_WIDTH, MAX_FRAME_WIDTH); int height = findDesiredDimensionInRange(screenResolution.y, MIN_FRAME_HEIGHT, MAX_FRAME_HEIGHT); int leftOffset = (screenResolution.x - width) / 2; int topOffset = (screenResolution.y - height) / 2; framingRect = new Rect(leftOffset, topOffset, leftOffset + width, topOffset + height); } return framingRect; }
3.68
dubbo_Configuration_getProperty
/** * Gets a property from the configuration. The default value will return if the configuration doesn't contain * the mapping for the specified key. * * @param key property to retrieve * @param defaultValue default value * @return the value to which this configuration maps the specified key, or default value if the configuration * contains no mapping for this key. */ default Object getProperty(String key, Object defaultValue) { Object value = getInternalProperty(key); return value != null ? value : defaultValue; }
3.68
hudi_HoodieAvroUtils_convertValueForAvroLogicalTypes
/** * This method converts values for fields with certain Avro Logical data types that require special handling. * <p> * Logical Date Type is converted to actual Date value instead of Epoch Integer which is how it is * represented/stored in parquet. * <p> * Decimal Data Type is converted to actual decimal value instead of bytes/fixed which is how it is * represented/stored in parquet. * * @param fieldSchema avro field schema * @param fieldValue avro field value * @return field value either converted (for certain data types) or as it is. */ private static Object convertValueForAvroLogicalTypes(Schema fieldSchema, Object fieldValue, boolean consistentLogicalTimestampEnabled) { if (fieldSchema.getLogicalType() == LogicalTypes.date()) { return LocalDate.ofEpochDay(Long.parseLong(fieldValue.toString())); } else if (fieldSchema.getLogicalType() == LogicalTypes.timestampMillis() && consistentLogicalTimestampEnabled) { return new Timestamp(Long.parseLong(fieldValue.toString())); } else if (fieldSchema.getLogicalType() == LogicalTypes.timestampMicros() && consistentLogicalTimestampEnabled) { return new Timestamp(Long.parseLong(fieldValue.toString()) / 1000); } else if (fieldSchema.getLogicalType() instanceof LogicalTypes.Decimal) { Decimal dc = (Decimal) fieldSchema.getLogicalType(); DecimalConversion decimalConversion = new DecimalConversion(); if (fieldSchema.getType() == Schema.Type.FIXED) { return decimalConversion.fromFixed((GenericFixed) fieldValue, fieldSchema, LogicalTypes.decimal(dc.getPrecision(), dc.getScale())); } else if (fieldSchema.getType() == Schema.Type.BYTES) { ByteBuffer byteBuffer = (ByteBuffer) fieldValue; BigDecimal convertedValue = decimalConversion.fromBytes(byteBuffer, fieldSchema, LogicalTypes.decimal(dc.getPrecision(), dc.getScale())); byteBuffer.rewind(); return convertedValue; } } return fieldValue; }
3.68
flink_DataSet_print
/** * Writes a DataSet to the standard output stream (stdout). * * <p>For each element of the DataSet the result of {@link Object#toString()} is written. * * @param sinkIdentifier The string to prefix the output with. * @return The DataSink that writes the DataSet. * @deprecated Use {@link #printOnTaskManager(String)} instead. */ @Deprecated @PublicEvolving public DataSink<T> print(String sinkIdentifier) { return output(new PrintingOutputFormat<T>(sinkIdentifier, false)); }
3.68
hbase_BackupInfo_setIncrTimestampMap
/** * Set the new region server log timestamps after distributed log roll * @param prevTableSetTimestampMap table timestamp map */ public void setIncrTimestampMap(Map<TableName, Map<String, Long>> prevTableSetTimestampMap) { this.incrTimestampMap = prevTableSetTimestampMap; }
3.68
flink_AllWindowedStream_apply
/** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * <p>Arriving data is incrementally aggregated using the given reducer. * * @param reduceFunction The reduce function that is used for incremental aggregation. * @param function The window function. * @param resultType Type information for the result type of the window function * @return The data stream that is the result of applying the window function to the window. * @deprecated Use {@link #reduce(ReduceFunction, AllWindowFunction, TypeInformation)} instead. */ @Deprecated public <R> SingleOutputStreamOperator<R> apply( ReduceFunction<T> reduceFunction, AllWindowFunction<T, R, W> function, TypeInformation<R> resultType) { if (reduceFunction instanceof RichFunction) { throw new UnsupportedOperationException( "ReduceFunction of apply can not be a RichFunction."); } // clean the closures function = input.getExecutionEnvironment().clean(function); reduceFunction = input.getExecutionEnvironment().clean(reduceFunction); String callLocation = Utils.getCallLocationName(); String udfName = "AllWindowedStream." + callLocation; String opName; KeySelector<T, Byte> keySel = input.getKeySelector(); OneInputStreamOperator<T, R> operator; if (evictor != null) { @SuppressWarnings({"unchecked", "rawtypes"}) TypeSerializer<StreamRecord<T>> streamRecordSerializer = (TypeSerializer<StreamRecord<T>>) new StreamElementSerializer( input.getType() .createSerializer( getExecutionEnvironment().getConfig())); ListStateDescriptor<StreamRecord<T>> stateDesc = new ListStateDescriptor<>("window-contents", streamRecordSerializer); opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + evictor + ", " + udfName + ")"; operator = new EvictingWindowOperator<>( windowAssigner, windowAssigner.getWindowSerializer( getExecutionEnvironment().getConfig()), keySel, input.getKeyType() .createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalIterableAllWindowFunction<>( new ReduceApplyAllWindowFunction<>(reduceFunction, function)), trigger, evictor, allowedLateness, lateDataOutputTag); } else { ReducingStateDescriptor<T> stateDesc = new ReducingStateDescriptor<>( "window-contents", reduceFunction, input.getType() .createSerializer(getExecutionEnvironment().getConfig())); opName = "TriggerWindow(" + windowAssigner + ", " + stateDesc + ", " + trigger + ", " + udfName + ")"; operator = new WindowOperator<>( windowAssigner, windowAssigner.getWindowSerializer( getExecutionEnvironment().getConfig()), keySel, input.getKeyType() .createSerializer(getExecutionEnvironment().getConfig()), stateDesc, new InternalSingleValueAllWindowFunction<>(function), trigger, allowedLateness, lateDataOutputTag); } return input.transform(opName, resultType, operator).forceNonParallel(); }
3.68
flink_ZooKeeperUtils_generateZookeeperPath
/** Creates a ZooKeeper path of the form "/a/b/.../z". */ public static String generateZookeeperPath(String... paths) { return Arrays.stream(paths) .map(ZooKeeperUtils::trimSlashes) .filter(s -> !s.isEmpty()) .collect(Collectors.joining("/", "/", "")); }
3.68
rocketmq-connect_TimestampIncrementingQuerier_beginTimestampValue
/** * get begin timestamp from offset topic * * @return */ @Override public Timestamp beginTimestampValue() { return offset.getTimestampOffset(); }
3.68
flink_Plan_getPostPassClassName
/** * Gets the optimizer post-pass class for this job. The post-pass typically creates utility * classes for data types and is specific to a particular data model (record, tuple, Scala, ...) * * @return The name of the class implementing the optimizer post-pass. */ public String getPostPassClassName() { return "org.apache.flink.optimizer.postpass.JavaApiPostPass"; }
3.68
morf_OracleMetaDataProvider_keyMap
/** * Use to access the metadata for the primary keys in the specified connection. * Lazily initialises the metadata, and only loads it once. * * @return Primary keys metadata. */ private Map<String, List<String>> keyMap() { if (keyMap != null) { return keyMap; } keyMap = new HashMap<>(); expensiveReadTableKeys(); return keyMap; }
3.68
morf_XmlDataSetConsumer_buildIndexAttributes
/** * Build the attributes for a database index * * @param index The index * @return The attributes */ private Attributes buildIndexAttributes(Index index) { AttributesImpl indexAttributes = new AttributesImpl(); indexAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.NAME_ATTRIBUTE, XmlDataSetNode.NAME_ATTRIBUTE, XmlDataSetNode.STRING_TYPE, index.getName()); indexAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.UNIQUE_ATTRIBUTE, XmlDataSetNode.UNIQUE_ATTRIBUTE, XmlDataSetNode.STRING_TYPE, Boolean.toString(index.isUnique())); String columnNames = StringUtils.join(index.columnNames(), ","); indexAttributes.addAttribute(XmlDataSetNode.URI, XmlDataSetNode.COLUMNS_ATTRIBUTE, XmlDataSetNode.COLUMNS_ATTRIBUTE, XmlDataSetNode.STRING_TYPE, columnNames); return indexAttributes; }
3.68
cron-utils_FieldSpecialCharsDefinitionBuilder_withValidRange
/** * Allows to set a range of valid values for field. * * @param startRange - start range value * @param endRange - end range value * @return same FieldSpecialCharsDefinitionBuilder instance */ @Override public FieldSpecialCharsDefinitionBuilder withValidRange(final int startRange, final int endRange) { super.withValidRange(startRange, endRange); return this; }
3.68
dubbo_AdaptiveClassCodeGenerator_generatePackageInfo
/** * generate package info */ private String generatePackageInfo() { return String.format(CODE_PACKAGE, type.getPackage().getName()); }
3.68
flink_FileSystemCommitter_commitPartitions
/** * Commits the partitions with a filter to filter out invalid task attempt files. In speculative * execution mode, there might be some files which do not belong to the finished attempt. * * @param taskAttemptFilter the filter that accepts subtaskIndex and attemptNumber * @throws Exception if partition commitment fails */ public void commitPartitions(BiPredicate<Integer, Integer> taskAttemptFilter) throws Exception { FileSystem fs = factory.create(tmpPath.toUri()); List<Path> taskPaths = listTaskTemporaryPaths(fs, tmpPath, taskAttemptFilter); try (PartitionLoader loader = new PartitionLoader( overwrite, fs, metaStoreFactory, isToLocal, identifier, policies)) { if (partitionColumnSize > 0) { if (taskPaths.isEmpty() && !staticPartitions.isEmpty()) { if (partitionColumnSize == staticPartitions.size()) { loader.loadEmptyPartition(this.staticPartitions); } } else { for (Map.Entry<LinkedHashMap<String, String>, List<Path>> entry : collectPartSpecToPaths(fs, taskPaths, partitionColumnSize).entrySet()) { loader.loadPartition(entry.getKey(), entry.getValue(), true); } } } else { loader.loadNonPartition(taskPaths, true); } } finally { for (Path taskPath : taskPaths) { fs.delete(taskPath, true); } } }
3.68
hbase_HBaseCommonTestingUtility_setupDataTestDir
/** * Sets up a directory for a test to use. * @return New directory path, if created. */ protected Path setupDataTestDir() { if (this.dataTestDir != null) { LOG.warn("Data test dir already setup in " + dataTestDir.getAbsolutePath()); return null; } Path testPath = getRandomDir(); this.dataTestDir = new File(testPath.toString()).getAbsoluteFile(); // Set this property so if mapreduce jobs run, they will use this as their home dir. System.setProperty("test.build.dir", this.dataTestDir.toString()); if (deleteOnExit()) { this.dataTestDir.deleteOnExit(); } createSubDir("hbase.local.dir", testPath, "hbase-local-dir"); return testPath; }
3.68
hadoop_BufferPool_release
/** * Releases a previously acquired resource. * @param data the {@code BufferData} instance to release. * @throws IllegalArgumentException if data is null. * @throws IllegalArgumentException if data cannot be released due to its state. */ public synchronized void release(BufferData data) { checkNotNull(data, "data"); synchronized (data) { checkArgument( canRelease(data), String.format("Unable to release buffer: %s", data)); ByteBuffer buffer = allocated.get(data); if (buffer == null) { // Likely released earlier. return; } buffer.clear(); pool.release(buffer); allocated.remove(data); } releaseDoneBlocks(); }
3.68
hbase_OrderedBytes_unexpectedHeader
/** * Creates the standard exception when the encoded header byte is unexpected for the decoding * context. * @param header value used in error message. */ private static IllegalArgumentException unexpectedHeader(byte header) { throw new IllegalArgumentException( "unexpected value in first byte: 0x" + Long.toHexString(header)); }
3.68
framework_VAbsoluteLayout_getWidgetIndex
/* * (non-Javadoc) * * @see * com.google.gwt.user.client.ui.ComplexPanel#getWidgetIndex(com.google. * gwt.user.client.ui.Widget) */ @Override public int getWidgetIndex(Widget child) { for (int i = 0, j = 0; i < super.getWidgetCount(); i++) { Widget w = super.getWidget(i); if (w instanceof AbsoluteWrapper) { if (child == w) { return j; } else { j++; } } } return -1; }
3.68
hadoop_ConnectionPool_close
/** * Close the connection pool. */ protected synchronized void close() { long timeSinceLastActive = TimeUnit.MILLISECONDS.toSeconds( Time.now() - getLastActiveTime()); LOG.debug("Shutting down connection pool \"{}\" used {} seconds ago", this.connectionPoolId, timeSinceLastActive); for (ConnectionContext connection : this.connections) { connection.close(true); } this.connections.clear(); }
3.68
pulsar_MathUtils_signSafeMod
/** * Compute sign safe mod. * * @param dividend * @param divisor * @return */ public static int signSafeMod(long dividend, int divisor) { int mod = (int) (dividend % divisor); if (mod < 0) { mod += divisor; } return mod; }
3.68
hbase_FutureUtils_wrapFuture
/** * Return a {@link CompletableFuture} which is same with the given {@code future}, but execute all * the callbacks in the given {@code executor}. */ public static <T> CompletableFuture<T> wrapFuture(CompletableFuture<T> future, Executor executor) { CompletableFuture<T> wrappedFuture = new CompletableFuture<>(); addListener(future, (r, e) -> { if (e != null) { wrappedFuture.completeExceptionally(e); } else { wrappedFuture.complete(r); } }, executor); return wrappedFuture; }
3.68
flink_SortMergeResultPartition_writeLargeRecord
/** * Spills the large record into the target {@link PartitionedFile} as a separate data region. */ private void writeLargeRecord( ByteBuffer record, int targetSubpartition, DataType dataType, boolean isBroadcast) throws IOException { // a large record will be spilled to a separated data region fileWriter.startNewRegion(isBroadcast); List<BufferWithChannel> toWrite = new ArrayList<>(); Queue<MemorySegment> segments = new ArrayDeque<>(freeSegments); while (record.hasRemaining()) { if (segments.isEmpty()) { fileWriter.writeBuffers(toWrite); toWrite.clear(); segments = new ArrayDeque<>(freeSegments); } int toCopy = Math.min(record.remaining(), networkBufferSize); MemorySegment writeBuffer = checkNotNull(segments.poll()); writeBuffer.put(0, record, toCopy); NetworkBuffer buffer = new NetworkBuffer(writeBuffer, (buf) -> {}, dataType, toCopy); BufferWithChannel bufferWithChannel = new BufferWithChannel(buffer, targetSubpartition); updateStatistics(bufferWithChannel, isBroadcast); toWrite.add(compressBufferIfPossible(bufferWithChannel)); } fileWriter.writeBuffers(toWrite); releaseFreeBuffers(); }
3.68
morf_AbstractSqlDialectTest_verifyBooleanPrepareStatementParameter
/** * Tests the logic used for transferring a boolean {@link Record} value to a * {@link PreparedStatement}. For overriding in specific DB tests * * @throws SQLException when a database access error occurs */ protected void verifyBooleanPrepareStatementParameter() throws SQLException { final SqlParameter booleanColumn = parameter("booleanColumn").type(DataType.BOOLEAN); verify(callPrepareStatementParameter(booleanColumn, null)).setObject(booleanColumn, null); verify(callPrepareStatementParameter(booleanColumn, "true")).setBoolean(booleanColumn, true); verify(callPrepareStatementParameter(booleanColumn, "false")).setBoolean(booleanColumn, false); }
3.68
flink_RestfulGateway_reportJobClientHeartbeat
/** The client reports the heartbeat to the dispatcher for aliveness. */ default CompletableFuture<Void> reportJobClientHeartbeat( JobID jobId, long expiredTimestamp, Time timeout) { return FutureUtils.completedVoidFuture(); }
3.68
morf_DataValueLookupMetadata_hashCode
/** * @see java.lang.Object#hashCode() */ @Override public int hashCode() { int h = hash; if (h != 0) { return h; } final int prime = 31; h = 1; h = prime * h + (keys == null ? 0 : keys.hashCode()); hash = h; return h; }
3.68
hudi_ExpressionPredicates_bindValueLiterals
/** * Binds value literals to create an IN predicate. * * @param valueLiterals The value literals to negate. * @return An IN predicate. */ public ColumnPredicate bindValueLiterals(List<ValueLiteralExpression> valueLiterals) { this.literals = valueLiterals.stream().map(valueLiteral -> { Object literalObject = getValueFromLiteral(valueLiteral); // validate that literal is serializable if (literalObject instanceof Serializable) { return (Serializable) literalObject; } else { LOG.warn("Encountered a non-serializable literal. " + "Cannot push predicate with value literal [{}] into FileInputFormat. " + "This is a bug and should be reported.", valueLiteral); return null; } }).collect(Collectors.toList()); return this; }
3.68
framework_AbstractRemoteDataSource_fillCacheFromInvalidatedRows
/** * Go through items invalidated by {@link #insertRowData(int, int)}. If the * server has pre-emptively sent added row data immediately after informing * of row addition, the invalid cache can be restored to proper index range. * * @param maxCacheRange * the maximum amount of rows that can cached */ private void fillCacheFromInvalidatedRows(Range maxCacheRange) { if (invalidatedRows == null || invalidatedRows.isEmpty()) { // No old invalid cache available return; } Range potentialCache = maxCacheRange.partitionWith(cached)[2]; int start = potentialCache.getStart(); int last = start; try { if (potentialCache.isEmpty() || invalidatedRows.containsKey(start - 1)) { // Cache is already full or invalidated rows contains unexpected // indices. return; } for (int i = start; i < potentialCache.getEnd(); ++i) { if (!invalidatedRows.containsKey(i)) { return; } T row = invalidatedRows.get(i); indexToRowMap.put(i, row); keyToIndexMap.put(getRowKey(row), i); last = i; } // Cache filled from invalidated rows. Can continue as if it was // never invalidated. invalidatedRows = null; } finally { // Update cache range and clean up if (invalidatedRows != null) { invalidatedRows.clear(); } Range updated = Range.between(start, last + 1); cached = cached.combineWith(updated); dataChangeHandlers.forEach(dch -> dch .dataUpdated(updated.getStart(), updated.length())); } }
3.68
framework_BasicEvent_removeEventChangeListener
/* * (non-Javadoc) * * @see * com.vaadin.addon.calendar.ui.CalendarComponentEvents.EventChangeNotifier * #removeListener * (com.vaadin.addon.calendar.ui.CalendarComponentEvents.EventChangeListener * ) */ @Override public void removeEventChangeListener(EventChangeListener listener) { listeners.remove(listener); }
3.68
hudi_StreamerUtil_getTableConfig
/** * Returns the table config or empty if the table does not exist. */ public static Option<HoodieTableConfig> getTableConfig(String basePath, org.apache.hadoop.conf.Configuration hadoopConf) { FileSystem fs = FSUtils.getFs(basePath, hadoopConf); Path metaPath = new Path(basePath, HoodieTableMetaClient.METAFOLDER_NAME); try { if (fs.exists(new Path(metaPath, HoodieTableConfig.HOODIE_PROPERTIES_FILE))) { return Option.of(new HoodieTableConfig(fs, metaPath.toString(), null, null)); } } catch (IOException e) { throw new HoodieIOException("Get table config error", e); } return Option.empty(); }
3.68
hadoop_FileDeletionTask_getSubDir
/** * Get the subdirectory to delete. * * @return the subDir for the FileDeletionTask. */ public Path getSubDir() { return this.subDir; }
3.68
hudi_FileSystemViewManager_createViewManager
/** * Main Factory method for building file-system views. * */ public static FileSystemViewManager createViewManager(final HoodieEngineContext context, final HoodieMetadataConfig metadataConfig, final FileSystemViewStorageConfig config, final HoodieCommonConfig commonConfig, final SerializableFunctionUnchecked<HoodieTableMetaClient, HoodieTableMetadata> metadataCreator) { LOG.info("Creating View Manager with storage type :" + config.getStorageType()); final SerializableConfiguration conf = context.getHadoopConf(); switch (config.getStorageType()) { case EMBEDDED_KV_STORE: LOG.info("Creating embedded rocks-db based Table View"); return new FileSystemViewManager(context, config, (metaClient, viewConf) -> createRocksDBBasedFileSystemView(conf, viewConf, metaClient)); case SPILLABLE_DISK: LOG.info("Creating Spillable Disk based Table View"); return new FileSystemViewManager(context, config, (metaClient, viewConf) -> createSpillableMapBasedFileSystemView(conf, viewConf, metaClient, commonConfig)); case MEMORY: LOG.info("Creating in-memory based Table View"); return new FileSystemViewManager(context, config, (metaClient, viewConfig) -> createInMemoryFileSystemView(metadataConfig, viewConfig, metaClient, metadataCreator)); case REMOTE_ONLY: LOG.info("Creating remote only table view"); return new FileSystemViewManager(context, config, (metaClient, viewConfig) -> createRemoteFileSystemView(conf, viewConfig, metaClient)); case REMOTE_FIRST: LOG.info("Creating remote first table view"); return new FileSystemViewManager(context, config, (metaClient, viewConfig) -> { RemoteHoodieTableFileSystemView remoteFileSystemView = createRemoteFileSystemView(conf, viewConfig, metaClient); SyncableFileSystemView secondaryView; switch (viewConfig.getSecondaryStorageType()) { case MEMORY: secondaryView = createInMemoryFileSystemView(metadataConfig, viewConfig, metaClient, metadataCreator); break; case EMBEDDED_KV_STORE: secondaryView = createRocksDBBasedFileSystemView(conf, viewConfig, metaClient); break; case SPILLABLE_DISK: secondaryView = createSpillableMapBasedFileSystemView(conf, viewConfig, metaClient, commonConfig); break; default: throw new IllegalArgumentException("Secondary Storage type can only be in-memory or spillable. Was :" + viewConfig.getSecondaryStorageType()); } return new PriorityBasedFileSystemView(remoteFileSystemView, secondaryView); }); default: throw new IllegalArgumentException("Unknown file system view type :" + config.getStorageType()); } }
3.68
hbase_HRegion_throwOnInterrupt
/** * Throw the correct exception upon interrupt * @param t cause */ // Package scope for tests IOException throwOnInterrupt(Throwable t) { if (this.closing.get()) { return (NotServingRegionException) new NotServingRegionException( getRegionInfo().getRegionNameAsString() + " is closing").initCause(t); } return (InterruptedIOException) new InterruptedIOException().initCause(t); }
3.68
flink_LeaderRetriever_getLeaderNow
/** * Returns the current leader information if available. Otherwise it returns an empty optional. * * @return The current leader information if available. Otherwise it returns an empty optional. * @throws Exception if the leader future has been completed with an exception */ public Optional<Tuple2<String, UUID>> getLeaderNow() throws Exception { CompletableFuture<Tuple2<String, UUID>> leaderFuture = this.atomicLeaderFuture.get(); if (leaderFuture != null) { if (leaderFuture.isDone()) { return Optional.of(leaderFuture.get()); } else { return Optional.empty(); } } else { return Optional.empty(); } }
3.68
hadoop_FindOptions_setConfiguration
/** * Set the {@link Configuration} * * @param configuration {@link Configuration} */ public void setConfiguration(Configuration configuration) { this.configuration = configuration; }
3.68
hadoop_CustomResourceMetrics_initAndGetCustomResources
/** * Get a map of all custom resource metric. * @return map of custom resource */ public Map<String, Long> initAndGetCustomResources() { Map<String, Long> customResources = new HashMap<String, Long>(); ResourceInformation[] resources = ResourceUtils.getResourceTypesArray(); for (int i = 2; i < resources.length; i++) { ResourceInformation resource = resources[i]; customResources.put(resource.getName(), Long.valueOf(0)); } return customResources; }
3.68
hmily_CuratorZookeeperClient_get
/** * Get string. * * @param path the path * @return the string */ public String get(final String path) { CuratorCache cache = findTreeCache(path); if (null == cache) { return getDirectly(path); } Optional<ChildData> resultInCache = cache.get(path); if (resultInCache.isPresent()) { return null == resultInCache.get().getData() ? null : new String(resultInCache.get().getData(), Charsets.UTF_8); } return getDirectly(path); }
3.68
framework_Table_isSortDisabled
/** * Is sorting disabled altogether. * * True if no sortable columns are given even in the case where data source * would support this. * * @return True if sorting is disabled. * @deprecated As of 7.0, use {@link #isSortEnabled()} instead */ @Deprecated public boolean isSortDisabled() { return !isSortEnabled(); }
3.68
hbase_StorageClusterStatusModel_setCurrentCompactedKVs
/** * @param currentCompactedKVs The completed count of key values in currently running * compaction */ public void setCurrentCompactedKVs(long currentCompactedKVs) { this.currentCompactedKVs = currentCompactedKVs; }
3.68
pulsar_OffloadIndexBlockV2Impl_toStream
/** * Get the content of the index block as InputStream. * Read out in format: * | index_magic_header | index_block_len | data_object_len | data_header_len | * | index_entry_count | segment_metadata_len | segment metadata | index entries... | */ @Override public IndexInputStream toStream() throws IOException { int indexBlockLength = 4 /* magic header */ + 4 /* index block length */ + 8 /* data object length */ + 8; /* data header length */ Map<Long, byte[]> metaBytesMap = new HashMap<>(); for (Map.Entry<Long, TreeMap<Long, OffloadIndexEntryImpl>> e : this.indexEntries.entrySet()) { Long ledgerId = e.getKey(); TreeMap<Long, OffloadIndexEntryImpl> ledgerIndexEntries = e.getValue(); int indexEntryCount = ledgerIndexEntries.size(); byte[] ledgerMetadataByte = this.segmentMetadata.get(ledgerId).toByteArray(); int segmentMetadataLength = ledgerMetadataByte.length; indexBlockLength += 8 /* ledger id length */ + 4 /* index entry count */ + 4 /* segment metadata length */ + segmentMetadataLength + indexEntryCount * (8 + 4 + 8); metaBytesMap.put(ledgerId, ledgerMetadataByte); } ByteBuf out = PulsarByteBufAllocator.DEFAULT.buffer(indexBlockLength, indexBlockLength); out.writeInt(INDEX_MAGIC_WORD) .writeInt(indexBlockLength) .writeLong(dataObjectLength) .writeLong(dataHeaderLength); for (Map.Entry<Long, TreeMap<Long, OffloadIndexEntryImpl>> e : this.indexEntries.entrySet()) { Long ledgerId = e.getKey(); TreeMap<Long, OffloadIndexEntryImpl> ledgerIndexEntries = e.getValue(); int indexEntryCount = ledgerIndexEntries.size(); byte[] ledgerMetadataByte = metaBytesMap.get(ledgerId); out.writeLong(ledgerId) .writeInt(indexEntryCount) .writeInt(ledgerMetadataByte.length) .writeBytes(ledgerMetadataByte); ledgerIndexEntries.values().forEach(idxEntry -> { out.writeLong(idxEntry.getEntryId()) .writeInt(idxEntry.getPartId()) .writeLong(idxEntry.getOffset()); }); } return new IndexInputStream(new ByteBufInputStream(out, true), indexBlockLength); }
3.68
framework_RadioButtonGroup_isHtmlContentAllowed
/** * Checks whether captions are interpreted as html or plain text. * * @return true if the captions are used as html, false if used as plain * text * @see #setHtmlContentAllowed(boolean) */ public boolean isHtmlContentAllowed() { return getState(false).htmlContentAllowed; }
3.68
flink_FileInputFormat_getFilePath
/** * @return The path of the file to read. * @deprecated Please use getFilePaths() instead. */ @Deprecated public Path getFilePath() { if (supportsMultiPaths()) { if (this.filePaths == null || this.filePaths.length == 0) { return null; } else if (this.filePaths.length == 1) { return this.filePaths[0]; } else { throw new UnsupportedOperationException( "FileInputFormat is configured with multiple paths. Use getFilePaths() instead."); } } else { return filePath; } }
3.68
hadoop_AHSWebServices_getContainerLogsInfo
// TODO: YARN-6080: Create WebServiceUtils to have common functions used in // RMWebService, NMWebService and AHSWebService. /** * Returns log file's name as well as current file size for a container. * * @param req * HttpServletRequest * @param res * HttpServletResponse * @param containerIdStr * The container ID * @param nmId * The Node Manager NodeId * @param redirectedFromNode * Whether this is a redirected request from NM * @return * The log file's name and current file size */ @GET @Path("/containers/{containerid}/logs") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public Response getContainerLogsInfo( @Context HttpServletRequest req, @Context HttpServletResponse res, @PathParam(YarnWebServiceParams.CONTAINER_ID) String containerIdStr, @QueryParam(YarnWebServiceParams.NM_ID) String nmId, @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE) @DefaultValue("false") boolean redirectedFromNode, @QueryParam(YarnWebServiceParams.MANUAL_REDIRECTION) @DefaultValue("false") boolean manualRedirection) { initForReadableEndpoints(res); WrappedLogMetaRequest.Builder logMetaRequestBuilder = LogServlet.createRequestFromContainerId(containerIdStr); return logServlet.getContainerLogsInfo(req, logMetaRequestBuilder, nmId, redirectedFromNode, null, manualRedirection); }
3.68
morf_AbstractSqlDialectTest_expectedCreateViewOverUnionSelectStatements
/** * @return The expected SQL statements for creating the test database view over a union select. */ protected List<String> expectedCreateViewOverUnionSelectStatements() { return Arrays.asList("CREATE VIEW " + tableName("TestView") + " AS (SELECT stringField FROM " + tableName(TEST_TABLE) + " WHERE (stringField = " + stringLiteralPrefix() + "'blah') UNION ALL SELECT stringField FROM " + tableName(OTHER_TABLE) + " WHERE (stringField = " + stringLiteralPrefix() + "'blah'))"); }
3.68
hudi_HoodieCDCUtils_cdcRecord
/** * Build the cdc record when `hoodie.table.cdc.supplemental.logging.mode` is {@link HoodieCDCSupplementalLoggingMode#OP_KEY_ONLY}. */ public static GenericData.Record cdcRecord(Schema cdcSchema, String op, String recordKey) { GenericData.Record record = new GenericData.Record(cdcSchema); record.put(CDC_OPERATION_TYPE, op); record.put(CDC_RECORD_KEY, recordKey); return record; }
3.68
hbase_Hash_getHashType
/** * This utility method converts the name of the configured hash type to a symbolic constant. * @param conf configuration * @return one of the predefined constants */ public static int getHashType(Configuration conf) { String name = conf.get("hbase.hash.type", "murmur"); return parseHashType(name); }
3.68
querydsl_Coalesce_as
/** * Create an alias for the expression * * @return this as alias */ public DslExpression<T> as(String alias) { return as(ExpressionUtils.path(getType(), alias)); }
3.68
flink_AvroParquetRecordFormat_isSplittable
/** Current version does not support splitting. */ @Override public boolean isSplittable() { return false; }
3.68
druid_WallConfig_isDescribeAllow
/** * allow mysql describe statement * * @return * @since 0.2.10 */ public boolean isDescribeAllow() { return describeAllow; }
3.68
dubbo_MessageFormatter_format
/** * Performs a two argument substitution for the 'messagePattern' passed as * parameter. * <p/> * For example, * <p/> * <pre> * MessageFormatter.format(&quot;Hi {}. My name is {}.&quot;, &quot;Alice&quot;, &quot;Bob&quot;); * </pre> * <p/> * will return the string "Hi Alice. My name is Bob.". * * @param messagePattern The message pattern which will be parsed and formatted * @param argA The argument to be substituted in place of the first formatting * anchor * @param argB The argument to be substituted in place of the second formatting * anchor * @return The formatted message */ static FormattingTuple format(final String messagePattern, Object argA, Object argB) { return arrayFormat(messagePattern, new Object[] {argA, argB}); }
3.68
hbase_RequestConverter_buildGetTableNamesRequest
/** * Creates a protocol buffer GetTableNamesRequest * @param pattern The compiled regular expression to match against * @param includeSysTables False to match only against userspace tables * @return a GetTableNamesRequest */ public static GetTableNamesRequest buildGetTableNamesRequest(final Pattern pattern, boolean includeSysTables) { GetTableNamesRequest.Builder builder = GetTableNamesRequest.newBuilder(); if (pattern != null) { builder.setRegex(pattern.toString()); } builder.setIncludeSysTables(includeSysTables); return builder.build(); }
3.68
hbase_Permission_newBuilder
/** * Build a table permission * @param tableName the specific table name * @return table permission builder */ public static Builder newBuilder(TableName tableName) { return new Builder(tableName); }
3.68
hbase_HRegion_isFlushSize
/* * @return True if size is over the flush threshold */ private boolean isFlushSize(MemStoreSize size) { return size.getHeapSize() + size.getOffHeapSize() > getMemStoreFlushSize(); }
3.68
framework_ComponentLocator_getElementByPathStartingAt
/** * Locates an element using a String locator (path) which identifies a DOM * element. The path starts from the specified root element. * * @see #getElementByPath(String) * * @since 7.2 * * @param path * The path of the element to be found * @param root * The root element where the path is anchored * @return The DOM element identified by {@code path} or null if the element * could not be located. */ public com.google.gwt.user.client.Element getElementByPathStartingAt( String path, Element root) { for (LocatorStrategy strategy : locatorStrategies) { if (strategy.validatePath(path)) { Element element = strategy.getElementByPathStartingAt(path, root); if (null != element) { return DOM.asOld(element); } } } return null; }
3.68
pulsar_FunctionMetaDataManager_listFunctions
/** * List all the functions in a namespace. * @param tenant the tenant the namespace belongs to * @param namespace the namespace * @return a list of function names */ public synchronized Collection<FunctionMetaData> listFunctions(String tenant, String namespace) { List<FunctionMetaData> ret = new LinkedList<>(); if (!this.functionMetaDataMap.containsKey(tenant)) { return ret; } if (!this.functionMetaDataMap.get(tenant).containsKey(namespace)) { return ret; } for (FunctionMetaData functionMetaData : this.functionMetaDataMap.get(tenant).get(namespace).values()) { ret.add(functionMetaData); } return ret; }
3.68
morf_SqlDialect_getExistingMaxAutoNumberValue
/** * Builds SQL to get the maximum value of the specified column on the * specified {@code dataTable}. * * @param dataTable the table to query over. * @param fieldName Name of the field to query over for the max value. * @return SQL getting the maximum value from the {@code dataTable}. */ protected String getExistingMaxAutoNumberValue(TableReference dataTable, String fieldName) { return getSqlFrom(new SelectStatement(Function.coalesce( new MathsField(Function.max(new FieldReference(fieldName)), MathsOperator.PLUS, new FieldLiteral(1)), new FieldLiteral(1)) .as("CurrentValue")).from(dataTable)); }
3.68
pulsar_AuthenticationMetrics_authenticateFailure
/** * Log authenticate failure event to the authentication metrics. * @param providerName The short class name of the provider * @param authMethod Authentication method name. * @param errorCode Error code. */ public static void authenticateFailure(String providerName, String authMethod, Enum<?> errorCode) { authFailuresMetrics.labels(providerName, authMethod, errorCode.name()).inc(); }
3.68
framework_DefaultDeploymentConfiguration_checkProductionMode
/** * Log a warning if Vaadin is not running in production mode. */ private void checkProductionMode() { productionMode = getApplicationOrSystemProperty( Constants.SERVLET_PARAMETER_PRODUCTION_MODE, "false") .equals("true"); if (!productionMode) { getLogger().warning(Constants.NOT_PRODUCTION_MODE_INFO); } }
3.68
flink_SlideWithSizeAndSlideOnTime_as
/** * Assigns an alias for this window that the following {@code groupBy()} and {@code select()} * clause can refer to. {@code select()} statement can access window properties such as window * start or end time. * * @param alias alias for this window * @return this window */ public SlideWithSizeAndSlideOnTimeWithAlias as(Expression alias) { return new SlideWithSizeAndSlideOnTimeWithAlias(alias, timeField, size, slide); }
3.68
framework_AbstractTextFieldElement_setValue
/** * Set value of the field element. * * @param chars * new value of the field */ public void setValue(CharSequence chars) throws ReadOnlyException { if (isReadOnly()) { throw new ReadOnlyException(); } clearElementClientSide(this); focus(); sendKeys(chars); sendKeys(Keys.TAB); }
3.68
flink_Tuple0_equals
/** * Deep equality for tuples by calling equals() on the tuple members. * * @param o the object checked for equality * @return true if this is equal to o. */ @Override public boolean equals(Object o) { return this == o || o instanceof Tuple0; }
3.68
flink_CanalJsonFormatFactory_validateDecodingFormatOptions
/** Validator for canal decoding format. */ private static void validateDecodingFormatOptions(ReadableConfig tableOptions) { JsonFormatOptionsUtil.validateDecodingFormatOptions(tableOptions); }
3.68
dubbo_CodecSupport_isHeartBeat
/** * Check if payload is null object serialize result byte[] of serialization * * @param payload * @param proto * @return */ public static boolean isHeartBeat(byte[] payload, byte proto) { return Arrays.equals(payload, getNullBytesOf(getSerializationById(proto))); }
3.68
querydsl_SQLExpressions_countDistinct
/** * Start a window function expression * * @param expr expression * @return count(distinct expr) */ public static WindowOver<Long> countDistinct(Expression<?> expr) { return new WindowOver<Long>(Long.class, Ops.AggOps.COUNT_DISTINCT_AGG, expr); }
3.68
flink_RestServerEndpointConfiguration_getResponseHeaders
/** Response headers that should be added to every HTTP response. */ public Map<String, String> getResponseHeaders() { return responseHeaders; }
3.68
flink_CliFrontendParser_printCustomCliOptions
/** * Prints custom cli options. * * @param formatter The formatter to use for printing * @param runOptions True if the run options should be printed, False to print only general * options */ private static void printCustomCliOptions( Collection<CustomCommandLine> customCommandLines, HelpFormatter formatter, boolean runOptions) { // prints options from all available command-line classes for (CustomCommandLine cli : customCommandLines) { formatter.setSyntaxPrefix(" Options for " + cli.getId() + " mode:"); Options customOpts = new Options(); cli.addGeneralOptions(customOpts); if (runOptions) { cli.addRunOptions(customOpts); } formatter.printHelp(" ", customOpts); System.out.println(); } }
3.68
hadoop_S3ALocatedFileStatus_getETag
/** * @return the S3 object eTag when available, else null. * @deprecated use {@link EtagSource#getEtag()} for * public access. */ @Deprecated public String getETag() { return getEtag(); }
3.68
morf_RenameIndex_reverse
/** * {@inheritDoc} * * @see org.alfasoftware.morf.upgrade.SchemaChange#reverse(org.alfasoftware.morf.metadata.Schema) */ @Override public Schema reverse(Schema schema) { return applyChange(schema, toIndexName, fromIndexName); }
3.68
hadoop_MultiStateTransitionListener_addListener
/** * Add a listener to the list of listeners. * @param listener A listener. */ public void addListener(StateTransitionListener<OPERAND, EVENT, STATE> listener) { listeners.add(listener); }
3.68
framework_Escalator_sortDomElements
/** * Sorts the rows in the DOM to correspond to the visual order. * * @see #visualRowOrder */ private void sortDomElements() { final String profilingName = "Escalator.BodyRowContainer.sortDomElements"; Profiler.enter(profilingName); /* * Focus is lost from an element if that DOM element is (or any of * its parents are) removed from the document. Therefore, we sort * everything around that row instead. */ final TableRowElement focusedRow = getRowWithFocus(); if (focusedRow != null) { assert focusedRow .getParentElement() == root : "Trying to sort around a row that doesn't exist in body"; assert visualRowOrder.contains(focusedRow) || body.spacerContainer.isSpacer( focusedRow) : "Trying to sort around a row that doesn't exist in visualRowOrder or is not a spacer."; } /* * Two cases handled simultaneously: * * 1) No focus on rows. We iterate visualRowOrder backwards, and * take the respective element in the DOM, and place it as the first * child in the body element. Then we take the next-to-last from * visualRowOrder, and put that first, pushing the previous row as * the second child. And so on... * * 2) Focus on some row within Escalator body. Again, we iterate * visualRowOrder backwards. This time, we use the focused row as a * pivot: Instead of placing rows from the bottom of visualRowOrder * and placing it first, we place it underneath the focused row. * Once we hit the focused row, we don't move it (to not reset * focus) but change sorting mode. After that, we place all rows as * the first child. */ List<TableRowElement> orderedBodyRows = new ArrayList<TableRowElement>( visualRowOrder); Map<Integer, SpacerContainer.SpacerImpl> spacers = body.spacerContainer .getSpacers(); /* * Start at -1 to include a spacer that is rendered above the * viewport, but its parent row is still not shown */ for (int i = -1; i < visualRowOrder.size(); i++) { SpacerContainer.SpacerImpl spacer = spacers .remove(Integer.valueOf(getTopRowLogicalIndex() + i)); if (spacer != null) { orderedBodyRows.add(i + 1, spacer.getRootElement()); spacer.show(); } } /* * At this point, invisible spacers aren't reordered, so their * position in the DOM will remain undefined. */ // If a spacer was not reordered, it means that it's out of view. for (SpacerContainer.SpacerImpl unmovedSpacer : spacers.values()) { unmovedSpacer.hide(); } /* * If we have a focused row, start in the mode where we put * everything underneath that row. Otherwise, all rows are placed as * first child. */ boolean insertFirst = (focusedRow == null); final ListIterator<TableRowElement> i = orderedBodyRows .listIterator(orderedBodyRows.size()); while (i.hasPrevious()) { TableRowElement tr = i.previous(); if (tr == focusedRow) { insertFirst = true; } else if (insertFirst) { root.insertFirst(tr); } else { root.insertAfter(tr, focusedRow); } } Profiler.leave(profilingName); }
3.68
hudi_HoodieExampleDataGenerator_generateUniqueUpdates
/** * Generates new updates, one for each of the keys above * list * * @param commitTime Commit Timestamp * @return list of hoodie record updates */ public List<HoodieRecord<T>> generateUniqueUpdates(String commitTime) { List<HoodieRecord<T>> updates = new ArrayList<>(); for (int i = 0; i < numExistingKeys; i++) { KeyPartition kp = existingKeys.get(i); HoodieRecord<T> record = generateUpdateRecord(kp.key, commitTime); updates.add(record); } return updates; }
3.68
framework_Embedded_setType
/** * Sets the object type. * <p> * This can be one of the following: * <ul> * <li>{@link #TYPE_OBJECT} <i>(This is the default)</i> * <li>{@link #TYPE_IMAGE} <i>(Deprecated)</i> * <li>{@link #TYPE_BROWSER} <i>(Deprecated)</i> * </ul> * </p> * * @param type * the type to set. */ public void setType(int type) { if (type != TYPE_OBJECT && type != TYPE_IMAGE && type != TYPE_BROWSER) { throw new IllegalArgumentException("Unsupported type"); } if (type != getType()) { getState().type = type; } }
3.68
hudi_AbstractStreamWriteFunction_reloadWriteMetaState
/** * Reload the write metadata state as the current checkpoint. */ private void reloadWriteMetaState() throws Exception { this.writeMetadataState.clear(); WriteMetadataEvent event = WriteMetadataEvent.builder() .taskID(taskID) .instantTime(currentInstant) .writeStatus(new ArrayList<>(writeStatuses)) .bootstrap(true) .build(); this.writeMetadataState.add(event); writeStatuses.clear(); }
3.68
hadoop_PrintJarMainClass_main
/** * @param args args. */ public static void main(String[] args) { try (JarFile jar_file = new JarFile(args[0])) { Manifest manifest = jar_file.getManifest(); if (manifest != null) { String value = manifest.getMainAttributes().getValue("Main-Class"); if (value != null) { System.out.println(value.replaceAll("/", ".")); return; } } } catch (Throwable e) { // ignore it } System.out.println("UNKNOWN"); System.exit(1); }
3.68
hadoop_Chain_getCurrentValue
/** * Get the current value. * * @return the value object that was read into * @throws IOException * @throws InterruptedException */ public VALUEIN getCurrentValue() throws IOException, InterruptedException { return this.value; }
3.68
framework_VCalendarPanel_setTimeChangeListener
/** * The time change listener is triggered when the user changes the time. * * @param listener */ public void setTimeChangeListener(TimeChangeListener listener) { timeChangeListener = listener; }
3.68
hbase_RegionHDFSBlockLocationFinder_createCache
/** * Create a cache for region to list of servers * @return A new Cache. */ private LoadingCache<RegionInfo, HDFSBlocksDistribution> createCache() { return CacheBuilder.newBuilder().expireAfterWrite(CACHE_TIME, TimeUnit.MILLISECONDS) .build(loader); }
3.68
flink_TableChange_getKey
/** Returns the Option key to reset. */ public String getKey() { return key; }
3.68
hudi_AbstractTableFileSystemView_fetchMergedFileSlice
/** * If the file-slice is because of pending compaction instant, this method merges the file-slice with the one before * the compaction instant time. * * @param fileGroup File Group for which the file slice belongs to * @param fileSlice File Slice which needs to be merged */ private FileSlice fetchMergedFileSlice(HoodieFileGroup fileGroup, FileSlice fileSlice) { // if the file-group is under construction, pick the latest before compaction instant time. Option<Pair<String, CompactionOperation>> compactionOpWithInstant = getPendingCompactionOperationWithInstant(fileGroup.getFileGroupId()); if (compactionOpWithInstant.isPresent()) { String compactionInstantTime = compactionOpWithInstant.get().getKey(); if (fileSlice.getBaseInstantTime().equals(compactionInstantTime)) { Option<FileSlice> prevFileSlice = fileGroup.getLatestFileSliceBefore(compactionInstantTime); if (prevFileSlice.isPresent()) { return mergeCompactionPendingFileSlices(fileSlice, prevFileSlice.get()); } } } return fileSlice; }
3.68
flink_TypeExtractor_getBinaryOperatorReturnType
/** * Returns the binary operator's return type. * * <p>This method can extract a type in 4 different ways: * * <p>1. By using the generics of the base class like MyFunction<X, Y, Z, IN, OUT>. This is what * outputTypeArgumentIndex (in this example "4") is good for. * * <p>2. By using input type inference SubMyFunction<T, String, String, String, T>. This is what * inputTypeArgumentIndex (in this example "0") and inType is good for. * * <p>3. By using the static method that a compiler generates for Java lambdas. This is what * lambdaOutputTypeArgumentIndices is good for. Given that MyFunction has the following single * abstract method: * * <pre> * <code> * void apply(IN value, Collector<OUT> value) * </code> * </pre> * * <p>Lambda type indices allow the extraction of a type from lambdas. To extract the output * type <b>OUT</b> from the function one should pass {@code new int[] {1, 0}}. "1" for selecting * the parameter and 0 for the first generic in this type. Use {@code TypeExtractor.NO_INDEX} * for selecting the return type of the lambda for extraction or if the class cannot be a lambda * because it is not a single abstract method interface. * * <p>4. By using interfaces such as {@link TypeInfoFactory} or {@link ResultTypeQueryable}. * * <p>See also comments in the header of this class. * * @param function Function to extract the return type from * @param baseClass Base class of the function * @param input1TypeArgumentIndex Index of first input generic type in the class specification * (ignored if in1Type is null) * @param input2TypeArgumentIndex Index of second input generic type in the class specification * (ignored if in2Type is null) * @param outputTypeArgumentIndex Index of output generic type in the class specification * @param lambdaOutputTypeArgumentIndices Table of indices of the type argument specifying the * output type. See example. * @param in1Type Type of the left side input elements (In case of an iterable, it is the * element type) * @param in2Type Type of the right side input elements (In case of an iterable, it is the * element type) * @param functionName Function name * @param allowMissing Can the type information be missing (this generates a MissingTypeInfo for * postponing an exception) * @param <IN1> Left side input type * @param <IN2> Right side input type * @param <OUT> Output type * @return TypeInformation of the return type of the function */ @SuppressWarnings("unchecked") @PublicEvolving public static <IN1, IN2, OUT> TypeInformation<OUT> getBinaryOperatorReturnType( Function function, Class<?> baseClass, int input1TypeArgumentIndex, int input2TypeArgumentIndex, int outputTypeArgumentIndex, int[] lambdaOutputTypeArgumentIndices, TypeInformation<IN1> in1Type, TypeInformation<IN2> in2Type, String functionName, boolean allowMissing) { Preconditions.checkArgument( in1Type == null || input1TypeArgumentIndex >= 0, "Input 1 type argument index was not provided"); Preconditions.checkArgument( in2Type == null || input2TypeArgumentIndex >= 0, "Input 2 type argument index was not provided"); Preconditions.checkArgument( outputTypeArgumentIndex >= 0, "Output type argument index was not provided"); Preconditions.checkArgument( lambdaOutputTypeArgumentIndices != null, "Indices for output type arguments within lambda not provided"); // explicit result type has highest precedence if (function instanceof ResultTypeQueryable) { return ((ResultTypeQueryable<OUT>) function).getProducedType(); } // perform extraction try { final LambdaExecutable exec; try { exec = checkAndExtractLambda(function); } catch (TypeExtractionException e) { throw new InvalidTypesException("Internal error occurred.", e); } if (exec != null) { final Method sam = TypeExtractionUtils.getSingleAbstractMethod(baseClass); final int baseParametersLen = sam.getParameterCount(); // parameters must be accessed from behind, since JVM can add additional parameters // e.g. when using local variables inside lambda function final int paramLen = exec.getParameterTypes().length; final Type output; if (lambdaOutputTypeArgumentIndices.length > 0) { output = TypeExtractionUtils.extractTypeFromLambda( baseClass, exec, lambdaOutputTypeArgumentIndices, paramLen, baseParametersLen); } else { output = exec.getReturnType(); TypeExtractionUtils.validateLambdaType(baseClass, output); } return new TypeExtractor().privateCreateTypeInfo(output, in1Type, in2Type); } else { if (in1Type != null) { validateInputType( baseClass, function.getClass(), input1TypeArgumentIndex, in1Type); } if (in2Type != null) { validateInputType( baseClass, function.getClass(), input2TypeArgumentIndex, in2Type); } return new TypeExtractor() .privateCreateTypeInfo( baseClass, function.getClass(), outputTypeArgumentIndex, in1Type, in2Type); } } catch (InvalidTypesException e) { if (allowMissing) { return (TypeInformation<OUT>) new MissingTypeInfo( functionName != null ? functionName : function.toString(), e); } else { throw e; } } }
3.68
hadoop_ChainMapper_addMapper
/** * Adds a {@link Mapper} class to the chain mapper. * * <p> * The key and values are passed from one element of the chain to the next, by * value. For the added Mapper the configuration given for it, * <code>mapperConf</code>, have precedence over the job's Configuration. This * precedence is in effect when the task is running. * </p> * <p> * IMPORTANT: There is no need to specify the output key/value classes for the * ChainMapper, this is done by the addMapper for the last mapper in the chain * </p> * * @param job * The job. * @param klass * the Mapper class to add. * @param inputKeyClass * mapper input key class. * @param inputValueClass * mapper input value class. * @param outputKeyClass * mapper output key class. * @param outputValueClass * mapper output value class. * @param mapperConf * a configuration for the Mapper class. It is recommended to use a * Configuration without default values using the * <code>Configuration(boolean loadDefaults)</code> constructor with * FALSE. */ public static void addMapper(Job job, Class<? extends Mapper> klass, Class<?> inputKeyClass, Class<?> inputValueClass, Class<?> outputKeyClass, Class<?> outputValueClass, Configuration mapperConf) throws IOException { job.setMapperClass(ChainMapper.class); job.setMapOutputKeyClass(outputKeyClass); job.setMapOutputValueClass(outputValueClass); Chain.addMapper(true, job, klass, inputKeyClass, inputValueClass, outputKeyClass, outputValueClass, mapperConf); }
3.68
pulsar_MultiTopicsConsumerImpl_getPartitionsOfTheTopicMap
// get all partitions that in the topics map int getPartitionsOfTheTopicMap() { return partitionedTopics.values().stream().mapToInt(Integer::intValue).sum(); }
3.68
morf_Join_getCriterion
/** * Get the criteria used in the join. * * @return the criteria */ public Criterion getCriterion() { return criterion; }
3.68
dubbo_InternalThreadLocal_size
/** * Returns the number of thread local variables bound to the current thread. */ public static int size() { InternalThreadLocalMap threadLocalMap = InternalThreadLocalMap.getIfSet(); if (threadLocalMap == null) { return 0; } else { return threadLocalMap.size(); } }
3.68
flink_BinarySegmentUtils_bitUnSet
/** * unset bit from segments. * * @param segments target segments. * @param baseOffset bits base offset. * @param index bit index from base offset. */ public static void bitUnSet(MemorySegment[] segments, int baseOffset, int index) { if (segments.length == 1) { MemorySegment segment = segments[0]; int offset = baseOffset + byteIndex(index); byte current = segment.get(offset); current &= ~(1 << (index & BIT_BYTE_INDEX_MASK)); segment.put(offset, current); } else { bitUnSetMultiSegments(segments, baseOffset, index); } }
3.68
framework_SuperTextAreaConnector_getState
// @DelegateToWidget will not work with overridden state @Override public SuperTextAreaState getState() { return (SuperTextAreaState) super.getState(); }
3.68
hbase_ConnectionUtils_getStubKey
/** * Get a unique key for the rpc stub to the given server. */ static String getStubKey(String serviceName, ServerName serverName) { return String.format("%s@%s", serviceName, serverName); }
3.68
morf_AbstractSqlDialectTest_testCastToDecimal
/** * Tests the output of a cast to a decimal. */ @Test public void testCastToDecimal() { String result = testDialect.getSqlFrom(new Cast(new FieldReference("value"), DataType.DECIMAL, 10, 2)); assertEquals(expectedDecimalCast(), result); }
3.68
framework_Navigator_removeView
/** * Removes a view from navigator. * <p> * This method only applies to views registered using * {@link #addView(String, View)} or {@link #addView(String, Class)}. * * @param viewName * name of the view to remove */ public void removeView(String viewName) { Iterator<ViewProvider> it = providers.iterator(); while (it.hasNext()) { ViewProvider provider = it.next(); if (provider instanceof StaticViewProvider) { StaticViewProvider staticProvider = (StaticViewProvider) provider; if (staticProvider.getViewName().equals(viewName)) { it.remove(); } } else if (provider instanceof ClassBasedViewProvider) { ClassBasedViewProvider classBasedProvider = (ClassBasedViewProvider) provider; if (classBasedProvider.getViewName().equals(viewName)) { it.remove(); } } } }
3.68
framework_DefaultDeploymentConfiguration_isSyncIdCheckEnabled
/** * {@inheritDoc} * <p> * The default value is <code>true</code>. */ @Override public boolean isSyncIdCheckEnabled() { return syncIdCheck; }
3.68
hbase_RegionLocator_getStartKeys
/** * Gets the starting row key for every region in the currently open table. * <p> * This is mainly useful for the MapReduce integration. * @return Array of region starting row keys * @throws IOException if a remote or network exception occurs */ default byte[][] getStartKeys() throws IOException { return getStartEndKeys().getFirst(); }
3.68