name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_DynamicSinkUtils_convertPredicateToNegative
/** Convert the predicate in WHERE clause to the negative predicate. */ private static void convertPredicateToNegative(LogicalTableModify tableModify) { RexBuilder rexBuilder = tableModify.getCluster().getRexBuilder(); RelNode input = tableModify.getInput(); LogicalFilter newFilter; // if the input is a table scan, there's no predicate which means it's always true // the negative predicate should be false if (input.getInput(0) instanceof LogicalTableScan) { newFilter = LogicalFilter.create(input.getInput(0), rexBuilder.makeLiteral(false)); } else { LogicalFilter filter = (LogicalFilter) input.getInput(0); // create a filter with negative predicate RexNode complementFilter = rexBuilder.makeCall( filter.getCondition().getType(), FlinkSqlOperatorTable.NOT, Collections.singletonList(filter.getCondition())); newFilter = filter.copy(filter.getTraitSet(), filter.getInput(), complementFilter); } // replace with the new filter input.replaceInput(0, newFilter); }
3.68
hbase_HFileOutputFormat2_createFamilyBlockSizeMap
/** * Runs inside the task to deserialize column family to block size map from the configuration. * @param conf to read the serialized values from * @return a map from column family to the configured block size */ @InterfaceAudience.Private static Map<byte[], Integer> createFamilyBlockSizeMap(Configuration conf) { Map<byte[], String> stringMap = createFamilyConfValueMap(conf, BLOCK_SIZE_FAMILIES_CONF_KEY); Map<byte[], Integer> blockSizeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry<byte[], String> e : stringMap.entrySet()) { Integer blockSize = Integer.parseInt(e.getValue()); blockSizeMap.put(e.getKey(), blockSize); } return blockSizeMap; }
3.68
hadoop_FederationCache_buildSubClusterInfoMap
/** * According to the subClusters, build SubClusterInfoMap. * * @param subClusters subCluster List. * @return SubClusterInfoMap. */ private static Map<SubClusterId, SubClusterInfo> buildSubClusterInfoMap( List<SubClusterInfo> subClusters) { Map<SubClusterId, SubClusterInfo> subClustersMap = new HashMap<>(subClusters.size()); for (SubClusterInfo subCluster : subClusters) { subClustersMap.put(subCluster.getSubClusterId(), subCluster); } return subClustersMap; }
3.68
dubbo_ReflectUtils_getAllFieldNames
/** * Get all field names of target type * @param type * @return */ public static Set<String> getAllFieldNames(Class<?> type) { Set<String> fieldNames = new HashSet<>(); for (Field field : type.getDeclaredFields()) { fieldNames.add(field.getName()); } Set<Class<?>> allSuperClasses = ClassUtils.getAllSuperClasses(type); for (Class<?> aClass : allSuperClasses) { for (Field field : aClass.getDeclaredFields()) { fieldNames.add(field.getName()); } } return fieldNames; }
3.68
morf_AbstractSqlDialectTest_testSelectMinimum
/** * Tests use of a minimum function in a select. */ @Test public void testSelectMinimum() { SelectStatement stmt = new SelectStatement(min(new FieldReference(INT_FIELD))).from(new TableReference(TEST_TABLE)); String expectedSql = "SELECT MIN(intField) FROM " + tableName(TEST_TABLE); assertEquals("Select with minimum function", expectedSql, testDialect.convertStatementToSQL(stmt)); }
3.68
streampipes_SpTrajectoryBuilder_returnAsLineString
/** * returns a JTS LineString geometry from the trajectory object. LineString only stores the point * geometry without M value. The lineString is oriented to the trajectory direction. This means: the * newest point is always the last point and has the highest subpoint index. The First point is the * oldest point with the lowest index [0] * * @param factory a Geometry factory for creating the lineString with the same precision and CRS and should be * the factory of the input point geometry * @return JTS LineString JTS LineString */ public LineString returnAsLineString(GeometryFactory factory) { LineString geom; if (coordinateList.size() > 1) { //only linestring if more than 2 points. // reverse output of linestring. so last added point is first geom = factory.createLineString(coordinateList.toCoordinateArray()); } else { geom = factory.createLineString(); } return geom; }
3.68
pulsar_ManagedLedgerImpl_releaseReadHandleIfNoLongerRead
/** * @param ledgerId the ledger handle which maybe will be released. * @return if the ledger handle was released. */ private boolean releaseReadHandleIfNoLongerRead(long ledgerId, long slowestNonDurationLedgerId) { if (ledgerId < slowestNonDurationLedgerId) { if (log.isDebugEnabled()) { log.debug("[{}] Ledger {} no longer needs to be read, close the cached readHandle", name, ledgerId); } invalidateReadHandle(ledgerId); return true; } return false; }
3.68
framework_AbstractBeanContainer_createBeanItem
/** * Create a BeanItem for a bean using pre-parsed bean metadata (based on * {@link #getBeanType()}). * * @param bean * @return created {@link BeanItem} or null if bean is null */ protected BeanItem<BEANTYPE> createBeanItem(BEANTYPE bean) { return bean == null ? null : new BeanItem<BEANTYPE>(bean, model); }
3.68
framework_Slot_isRelativeInDirection
/** * Returns whether this slot has relative size in the indicated direction. * * @param vertical * {@code true} if the height should be checked, {@code false} if * the width should be checked * @return {@code true} if the slot's indicated dimension is relative, * {@code false} otherwise */ public boolean isRelativeInDirection(boolean vertical) { if (vertical) { return hasRelativeHeight(); } else { return hasRelativeWidth(); } }
3.68
flink_BuiltInFunctionDefinition_newBuilder
/** Builder for configuring and creating instances of {@link BuiltInFunctionDefinition}. */ public static BuiltInFunctionDefinition.Builder newBuilder() { return new BuiltInFunctionDefinition.Builder(); }
3.68
hadoop_MappingRuleResult_isCreateAllowed
/** * The method returns true if the result queue should be created when it does * not exist yet. * @return true if non-existent queues should be created */ public boolean isCreateAllowed() { return allowCreate; }
3.68
pulsar_SchemaReader_read
/** * serialize bytes convert pojo. * * @param inputStream the stream of message * @param schemaVersion the schema version of message * @return the serialized object */ default T read(InputStream inputStream, byte[] schemaVersion) { return read(inputStream); }
3.68
framework_DesignAttributeHandler_getSupportedAttributes
/** * Searches for supported setter and getter types from the specified class * and returns the list of corresponding design attributes. * * @param clazz * the class scanned for setters * @return the list of supported design attributes */ public static Collection<String> getSupportedAttributes(Class<?> clazz) { resolveSupportedAttributes(clazz); return CACHE.get(clazz).getAttributes(); }
3.68
hbase_SnapshotDescriptionUtils_isSubDirectoryOf
/** * Determines if the given workingDir is a subdirectory of the given "root directory" * @param workingDir a directory to check * @param rootDir root directory of the HBase installation * @return true if the given workingDir is a subdirectory of the given root directory, false * otherwise */ public static boolean isSubDirectoryOf(final Path workingDir, final Path rootDir) { return workingDir.toString().startsWith(rootDir.toString() + Path.SEPARATOR); }
3.68
flink_MethodlessRouter_addRoute
/** * This method does nothing if the path pattern has already been added. A path pattern can only * point to one target. */ public MethodlessRouter<T> addRoute(String pathPattern, T target) { PathPattern p = new PathPattern(pathPattern); if (routes.containsKey(p)) { return this; } routes.put(p, target); return this; }
3.68
hbase_MasterObserver_preSplitRegionAfterMETAAction
/** * This will be called after update META step as part of split transaction * @param ctx the environment to interact with the framework and master */ default void preSplitRegionAfterMETAAction( final ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException { }
3.68
flink_ResourceProfile_getNetworkMemory
/** * Get the network memory needed. * * @return The network memory */ public MemorySize getNetworkMemory() { throwUnsupportedOperationExceptionIfUnknown(); return networkMemory; }
3.68
framework_DateCell_addEmphasisStyle
/** * @since 7.2 */ public void addEmphasisStyle(Element elementOver) { addEmphasisStyle(DOM.asOld(elementOver)); }
3.68
querydsl_CurveExpression_endPoint
/** * The end Point of this Curve. * * @return end point */ public PointExpression<Point> endPoint() { if (endPoint == null) { endPoint = GeometryExpressions.pointOperation(SpatialOps.END_POINT, mixin); } return endPoint; }
3.68
flink_CheckpointStatsTracker_reportRestoredCheckpoint
/** * Callback when a checkpoint is restored. * * @param restored The restored checkpoint stats. */ void reportRestoredCheckpoint(RestoredCheckpointStats restored) { checkNotNull(restored, "Restored checkpoint"); statsReadWriteLock.lock(); try { counts.incrementRestoredCheckpoints(); latestRestoredCheckpoint = restored; dirty = true; } finally { statsReadWriteLock.unlock(); } }
3.68
hadoop_Nfs3HttpServer_getServerURI
/** * Return the URI that locates the HTTP server. */ public URI getServerURI() { // getHttpClientScheme() only returns https for HTTPS_ONLY policy. This // matches the behavior that the first connector is a HTTPS connector only // for HTTPS_ONLY policy. InetSocketAddress addr = httpServer.getConnectorAddress(0); return URI.create(DFSUtil.getHttpClientScheme(conf) + "://" + NetUtils.getHostPortString(addr)); }
3.68
morf_SqlDialect_tableDeploymentStatements
/** * Creates SQL to deploy a database table and its associated indexes. * * @param table The meta data for the table to deploy. * @return The statements required to deploy the table and its indexes. */ public Collection<String> tableDeploymentStatements(Table table) { Builder<String> statements = ImmutableList.<String>builder(); statements.addAll(internalTableDeploymentStatements(table)); for (Index index : table.indexes()) { statements.addAll(indexDeploymentStatements(table, index)); } return statements.build(); }
3.68
hudi_PartialBindVisitor_visitPredicate
/** * If an expression is null after accept method, which means it cannot be bounded from * schema, we'll directly return {@link Predicates.TrueExpression}. */ @Override public Expression visitPredicate(Predicate predicate) { if (predicate instanceof Predicates.BinaryComparison) { Predicates.BinaryComparison binaryExp = (Predicates.BinaryComparison) predicate; Expression left = binaryExp.getLeft().accept(this); if (left == null) { return alwaysTrue(); } else { Expression right = binaryExp.getRight().accept(this); if (right == null) { return alwaysTrue(); } return new Predicates.BinaryComparison(left, binaryExp.getOperator(), right); } } if (predicate instanceof Predicates.Not) { Expression expr = ((Predicates.Not) predicate).child.accept(this); if (expr instanceof Predicates.TrueExpression) { return alwaysFalse(); } if (expr instanceof Predicates.FalseExpression) { return alwaysTrue(); } return Predicates.not(expr); } if (predicate instanceof Predicates.In) { Predicates.In in = ((Predicates.In) predicate); Expression valueExpression = in.value.accept(this); if (valueExpression == null) { return alwaysTrue(); } List<Expression> validValues = in.validValues.stream() .map(validValue -> validValue.accept(this)) .collect(Collectors.toList()); if (validValues.stream().anyMatch(Objects::isNull)) { return alwaysTrue(); } return Predicates.in(valueExpression, validValues); } if (predicate instanceof Predicates.IsNull) { Predicates.IsNull isNull = (Predicates.IsNull) predicate; return Option.ofNullable(isNull.child.accept(this)) .map(expr -> (Expression)Predicates.isNull(expr)) .orElse(alwaysTrue()); } if (predicate instanceof Predicates.IsNotNull) { Predicates.IsNotNull isNotNull = (Predicates.IsNotNull) predicate; return Option.ofNullable(isNotNull.child.accept(this)) .map(expr -> (Expression)Predicates.isNotNull(expr)) .orElse(alwaysTrue()); } if (predicate instanceof Predicates.StringStartsWith) { Predicates.StringStartsWith startsWith = (Predicates.StringStartsWith) predicate; Expression left = startsWith.getLeft().accept(this); if (left == null) { return alwaysTrue(); } else { Expression right = startsWith.getRight().accept(this); if (right == null) { return alwaysTrue(); } return Predicates.startsWith(left, right); } } if (predicate instanceof Predicates.StringContains) { Predicates.StringContains contains = (Predicates.StringContains) predicate; Expression left = contains.getLeft().accept(this); if (left == null) { return alwaysTrue(); } else { Expression right = contains.getRight().accept(this); if (right == null) { return alwaysTrue(); } return Predicates.contains(left, right); } } throw new IllegalArgumentException("The expression " + predicate + " cannot be visited as predicate"); }
3.68
hudi_HoodieInputFormatUtils_filterIncrementalFileStatus
/** * Filter a list of FileStatus based on commitsToCheck for incremental view. * * @param job * @param tableMetaClient * @param timeline * @param fileStatuses * @param commitsToCheck * @return */ public static List<FileStatus> filterIncrementalFileStatus(Job job, HoodieTableMetaClient tableMetaClient, HoodieTimeline timeline, FileStatus[] fileStatuses, List<HoodieInstant> commitsToCheck) throws IOException { TableFileSystemView.BaseFileOnlyView roView = new HoodieTableFileSystemView(tableMetaClient, timeline, fileStatuses); List<String> commitsList = commitsToCheck.stream().map(HoodieInstant::getTimestamp).collect(Collectors.toList()); List<HoodieBaseFile> filteredFiles = roView.getLatestBaseFilesInRange(commitsList).collect(Collectors.toList()); List<FileStatus> returns = new ArrayList<>(); for (HoodieBaseFile filteredFile : filteredFiles) { LOG.debug("Processing incremental hoodie file - " + filteredFile.getPath()); filteredFile = refreshFileStatus(job.getConfiguration(), filteredFile); returns.add(getFileStatus(filteredFile)); } LOG.info("Total paths to process after hoodie incremental filter " + filteredFiles.size()); return returns; }
3.68
pulsar_ProducerConfiguration_setCryptoKeyReader
/** * Sets a {@link CryptoKeyReader}. * * @param cryptoKeyReader * CryptoKeyReader object */ public ProducerConfiguration setCryptoKeyReader(CryptoKeyReader cryptoKeyReader) { Objects.requireNonNull(cryptoKeyReader); conf.setCryptoKeyReader(cryptoKeyReader); return this; }
3.68
hadoop_MoveStep_setTolerancePercent
/** * Sets the tolerance percentage. * @param tolerancePercent - long */ @Override public void setTolerancePercent(long tolerancePercent) { this.tolerancePercent = tolerancePercent; }
3.68
morf_SqlDialect_addCastsToSelect
/** * For some dialects, this casting is required, as the type may not be inferred for every field in the select statement. * @param table the table to add the casts. * @param selectStatement select statements which the casts need adding to. * @return SelectStatement with casts. */ protected SelectStatement addCastsToSelect(Table table, SelectStatement selectStatement) { SelectStatement statementWithCasts = selectStatement.deepCopy(); for (int i = 0; i < table.columns().size(); i++) { AliasedField field = statementWithCasts.getFields().get(i); Column column = table.columns().get(i); if (fieldRequiresCast(field, column)) { AliasedField fieldWithCast = field.cast().asType(column.getType(), column.getWidth(), column.getScale()).build().as(column.getName()); statementWithCasts.getFields().set(i, fieldWithCast); } } return statementWithCasts; }
3.68
flink_Transformation_declareManagedMemoryUseCaseAtOperatorScope
/** * Declares that this transformation contains certain operator scope managed memory use case. * * @param managedMemoryUseCase The use case that this transformation declares needing managed * memory for. * @param weight Use-case-specific weights for this transformation. Used for sharing managed * memory across transformations for OPERATOR scope use cases. Check the individual {@link * ManagedMemoryUseCase} for the specific weight definition. * @return The previous weight, if exist. */ public Optional<Integer> declareManagedMemoryUseCaseAtOperatorScope( ManagedMemoryUseCase managedMemoryUseCase, int weight) { Preconditions.checkNotNull(managedMemoryUseCase); Preconditions.checkArgument( managedMemoryUseCase.scope == ManagedMemoryUseCase.Scope.OPERATOR, "Use case is not operator scope."); Preconditions.checkArgument( weight > 0, "Weights for operator scope use cases must be greater than 0."); return Optional.ofNullable( managedMemoryOperatorScopeUseCaseWeights.put(managedMemoryUseCase, weight)); }
3.68
flink_AsyncDataStream_orderedWait
/** * Adds an AsyncWaitOperator. The order to process input records is guaranteed to be the same as * input ones. * * @param in Input {@link DataStream} * @param func {@link AsyncFunction} * @param timeout for the asynchronous operation to complete * @param timeUnit of the given timeout * @param <IN> Type of input record * @param <OUT> Type of output record * @return A new {@link SingleOutputStreamOperator}. */ public static <IN, OUT> SingleOutputStreamOperator<OUT> orderedWait( DataStream<IN> in, AsyncFunction<IN, OUT> func, long timeout, TimeUnit timeUnit) { return addOperator( in, func, timeUnit.toMillis(timeout), DEFAULT_QUEUE_CAPACITY, OutputMode.ORDERED, NO_RETRY_STRATEGY); }
3.68
framework_FilesystemContainer_getFileCounts
/** * Internal method to recursively calculate the number of files under a root * directory. * * @param directory * the root to start counting from. */ private int getFileCounts(File directory) { File[] l; if (filter != null) { l = directory.listFiles(filter); } else { l = directory.listFiles(); } if (l == null) { return 0; } int ret = l.length; for (File f : l) { if (f.isDirectory()) { ret += getFileCounts(f); } } return ret; }
3.68
morf_SelectStatementBuilder_getSetOperators
/** * @return the list of set operators to be applied on this select statement. */ List<SetOperator> getSetOperators() { return setOperators; }
3.68
flink_TableChange_getNewPosition
/** * Returns the position of the modified {@link Column} instance. When the return value is * null, it means modify the column at the original position. When the return value is * FIRST, it means move the modified column to the first. When the return value is AFTER, it * means move the column after the referred column. */ public @Nullable ColumnPosition getNewPosition() { return newPosition; }
3.68
framework_MenuItem_setSubMenu
/** * Sets the sub-menu associated with this item. * * @param subMenu * this item's new sub-menu */ public void setSubMenu(MenuBar subMenu) { this.subMenu = subMenu; }
3.68
framework_FileTypeResolver_getIcon
/** * Gets the descriptive icon representing a file. First the mime-type for * the given file name is resolved, and then the corresponding icon is * fetched from the internal icon storage. If it is not found the default * icon is returned. * * @param file * the file whose icon is requested. * @return the icon corresponding to the given file */ public static Resource getIcon(File file) { return getIconByMimeType(getMIMEType(file)); }
3.68
hbase_DynamicMetricsRegistry_setContext
/** * Set the metrics context tag * @param name of the context * @return the registry itself as a convenience */ public DynamicMetricsRegistry setContext(String name) { return tag(MsInfo.Context, name, true); }
3.68
querydsl_NumberExpression_abs
/** * Create a {@code abs(this)} expression * * <p>Returns the absolute value of this expression</p> * * @return abs(this) */ public NumberExpression<T> abs() { if (abs == null) { abs = Expressions.numberOperation(getType(), MathOps.ABS, mixin); } return abs; }
3.68
framework_DataCommunicator_reset
/** * Method for internal reset from a change in the component, requiring a * full data update. */ public void reset() { // Only needed if a full reset is not pending. if (!reset) { if (getParent() instanceof ComboBox) { beforeClientResponse(true); } // Soft reset through client-side re-request. getClientRpc().reset(getDataProviderSize()); } }
3.68
hadoop_AbstractDTService_requireServiceStarted
/** * Require the service to be started. * @throws IllegalStateException if it is not. */ protected void requireServiceStarted() throws IllegalStateException { requireServiceState(STATE.STARTED); }
3.68
flink_ExecutionConfig_getDefaultInputDependencyConstraint
/** * This method is deprecated. It was used to return the {@link InputDependencyConstraint} * utilized by the old scheduler implementations. These implementations were removed as part of * FLINK-20589. * * @return The previous default constraint {@link InputDependencyConstraint#ANY}. * @deprecated due to the deprecation of {@code InputDependencyConstraint}. */ @PublicEvolving @Deprecated public InputDependencyConstraint getDefaultInputDependencyConstraint() { return InputDependencyConstraint.ANY; }
3.68
flink_SqlWindowTableFunction_checkTimeColumnDescriptorOperand
/** * Checks whether the type that the operand of time col descriptor refers to is valid. * * @param callBinding The call binding * @param pos The position of the descriptor at the operands of the call * @return true if validation passes, false otherwise */ Optional<RuntimeException> checkTimeColumnDescriptorOperand( SqlCallBinding callBinding, int pos) { SqlValidator validator = callBinding.getValidator(); SqlNode operand0 = callBinding.operand(0); RelDataType type = validator.getValidatedNodeType(operand0); List<SqlNode> operands = ((SqlCall) callBinding.operand(pos)).getOperandList(); SqlIdentifier identifier = (SqlIdentifier) operands.get(0); String columnName = identifier.getSimple(); SqlNameMatcher matcher = validator.getCatalogReader().nameMatcher(); for (RelDataTypeField field : type.getFieldList()) { if (matcher.matches(field.getName(), columnName)) { RelDataType fieldType = field.getType(); if (FlinkTypeFactory.isTimeIndicatorType(fieldType)) { return Optional.empty(); } else { LogicalType timeAttributeType = FlinkTypeFactory.toLogicalType(fieldType); if (!canBeTimeAttributeType(timeAttributeType)) { ValidationException exception = new ValidationException( String.format( "The window function %s requires the timecol to be TIMESTAMP or TIMESTAMP_LTZ, but is %s.\n" + "Besides, the timecol must be a time attribute type in streaming mode.", callBinding .getOperator() .getAllowedSignatures(), field.getType())); return Optional.of(exception); } else { return Optional.empty(); } } } } IllegalArgumentException error = new IllegalArgumentException( String.format( "Can't find the time attribute field '%s' in the input schema %s.", columnName, type.getFullTypeString())); return Optional.of(error); }
3.68
zxing_PDF417Writer_rotateArray
/** * Takes and rotates the it 90 degrees */ private static byte[][] rotateArray(byte[][] bitarray) { byte[][] temp = new byte[bitarray[0].length][bitarray.length]; for (int ii = 0; ii < bitarray.length; ii++) { // This makes the direction consistent on screen when rotating the // screen; int inverseii = bitarray.length - ii - 1; for (int jj = 0; jj < bitarray[0].length; jj++) { temp[jj][inverseii] = bitarray[ii][jj]; } } return temp; }
3.68
hudi_HoodieRowDataFileWriterFactory_getRowDataFileWriter
/** * Factory method to assist in instantiating an instance of {@link HoodieRowDataFileWriter}. * * @param path path of the RowFileWriter. * @param hoodieTable instance of {@link HoodieTable} in use. * @param config instance of {@link HoodieWriteConfig} to use. * @param schema schema of the dataset in use. * @return the instantiated {@link HoodieRowDataFileWriter}. * @throws IOException if format is not supported or if any exception during instantiating the RowFileWriter. */ public static HoodieRowDataFileWriter getRowDataFileWriter( Path path, HoodieTable hoodieTable, HoodieWriteConfig config, RowType schema) throws IOException { final String extension = FSUtils.getFileExtension(path.getName()); if (PARQUET.getFileExtension().equals(extension)) { return newParquetInternalRowFileWriter(path, config, schema, hoodieTable); } throw new UnsupportedOperationException(extension + " format not supported yet."); }
3.68
hadoop_QueueACLsManager_getQueueACLsManager
/** * Get queue acl manager corresponding to the scheduler. * @param scheduler the scheduler for which the queue acl manager is required * @param conf Configuration. * @return {@link QueueACLsManager} */ public static QueueACLsManager getQueueACLsManager( ResourceScheduler scheduler, Configuration conf) { if (scheduler instanceof CapacityScheduler) { return new CapacityQueueACLsManager(scheduler, conf); } else if (scheduler instanceof FairScheduler) { return new FairQueueACLsManager(scheduler, conf); } else { return new GenericQueueACLsManager(scheduler, conf); } }
3.68
hibernate-validator_ConfigurationSource_max
/** * Returns that configuration source from the given two sources, which has * the higher priority. * * @param a * A configuration source. * @param b * Another configuration source. * * @return The source with the higher priority. Will be source {@code a} if * both have the same priority. */ public static ConfigurationSource max(ConfigurationSource a, ConfigurationSource b) { return a.getPriority() >= b.getPriority() ? a : b; }
3.68
flink_SqlShowPartitions_getPartitionKVs
/** Get partition spec as key-value strings. */ public LinkedHashMap<String, String> getPartitionKVs() { return SqlPartitionUtils.getPartitionKVs(getPartitionSpec()); }
3.68
dubbo_ReferenceConfig_configInitialized
/** * Return if ReferenceConfig has been initialized * Note: Cannot use `isInitilized` as it may be treated as a Java Bean property * * @return initialized */ @Transient public boolean configInitialized() { return initialized; }
3.68
hudi_HoodieSparkQuickstart_queryData
/** * Load the data files into a DataFrame. */ public static void queryData(SparkSession spark, JavaSparkContext jsc, String tablePath, String tableName, HoodieExampleDataGenerator<HoodieAvroPayload> dataGen) { Dataset<Row> roViewDF = spark .read() .format("hudi") .load(tablePath + "/*/*/*/*"); roViewDF.createOrReplaceTempView("hudi_ro_table"); spark.sql("select fare, begin_lon, begin_lat, ts from hudi_ro_table where fare > 20.0").show(); // +-----------------+-------------------+-------------------+---+ // | fare| begin_lon| begin_lat| ts| // +-----------------+-------------------+-------------------+---+ // |98.88075495133515|0.39556048623031603|0.17851135255091155|0.0| // ... spark.sql( "select _hoodie_commit_time, _hoodie_record_key, _hoodie_partition_path, rider, driver, fare from hudi_ro_table") .show(); // +-------------------+--------------------+----------------------+-------------------+--------------------+------------------+ // |_hoodie_commit_time| _hoodie_record_key|_hoodie_partition_path| rider| driver| fare| // +-------------------+--------------------+----------------------+-------------------+--------------------+------------------+ // | 20191231181501|31cafb9f-0196-4b1...| 2020/01/02|rider-1577787297889|driver-1577787297889| 98.88075495133515| // ... }
3.68
hibernate-validator_ConstraintViolationAssert_assertPathEquals
/** * Asserts that the path matches the expected path. * * @param path The path under test * @param expectedPath The expected path */ public static void assertPathEquals(Path path, PathExpectation expectedPath) { assertEquals( new PathExpectation( path ), expectedPath, "Path does not match" ); }
3.68
hbase_TableSplit_getTable
/** * Returns the table name. * @return The table name. */ public TableName getTable() { // It is ugly that usually to get a TableName, the method is called getTableName. We can't do // that in here though because there was an existing getTableName in place already since // deprecated. return tableName; }
3.68
flink_SharedBuffer_removeEntry
/** * Removes a ShareBufferNode from cache and state. * * @param nodeId id of the event */ void removeEntry(NodeId nodeId) throws Exception { this.entryCache.invalidate(nodeId); this.entries.remove(nodeId); }
3.68
AreaShop_GeneralRegion_isOwner
/** * Check if the players is owner of this region. * @param player Player to check ownership for * @return true if the player currently rents or buys this region */ public boolean isOwner(UUID player) { return (this instanceof RentRegion && ((RentRegion)this).isRenter(player)) || (this instanceof BuyRegion && ((BuyRegion)this).isBuyer(player)); }
3.68
hudi_AvroSchemaUtils_isSchemaCompatible
/** * Establishes whether {@code newSchema} is compatible w/ {@code prevSchema}, as * defined by Avro's {@link AvroSchemaCompatibility}. * From avro's compatability standpoint, prevSchema is writer schema and new schema is reader schema. * {@code newSchema} is considered compatible to {@code prevSchema}, iff data written using {@code prevSchema} * could be read by {@code newSchema} * * @param prevSchema previous instance of the schema * @param newSchema new instance of the schema * @param checkNaming controls whether schemas fully-qualified names should be checked */ public static boolean isSchemaCompatible(Schema prevSchema, Schema newSchema, boolean checkNaming, boolean allowProjection) { // NOTE: We're establishing compatibility of the {@code prevSchema} and {@code newSchema} // as following: {@code newSchema} is considered compatible to {@code prevSchema}, // iff data written using {@code prevSchema} could be read by {@code newSchema} // In case schema projection is not allowed, new schema has to have all the same fields as the // old schema if (!allowProjection) { if (!canProject(prevSchema, newSchema)) { return false; } } AvroSchemaCompatibility.SchemaPairCompatibility result = AvroSchemaCompatibility.checkReaderWriterCompatibility(newSchema, prevSchema, checkNaming); return result.getType() == AvroSchemaCompatibility.SchemaCompatibilityType.COMPATIBLE; }
3.68
open-banking-gateway_Xs2aRedirectExecutor_redirect
/** * Redirects PSU to some page (or emits FinTech redirection required) by performing interpolation of the * string returned by {@code uiScreenUriSpel} * @param execution Execution context of the current process * @param context Current XS2A context * @param uiScreenUriSpel UI screen SpEL expression to interpolate * @param destinationUri URL where UI screen should redirect user to if he clicks OK (i.e. to ASPSP redirection * where user must click OK button in order to be redirected to ASPSP) * @param eventFactory Allows to construct custom event with redirection parameters. */ public void redirect( DelegateExecution execution, Xs2aContext context, String uiScreenUriSpel, String destinationUri, Function<Redirect.RedirectBuilder, ? extends Redirect> eventFactory ) { setDestinationUriInContext(execution, destinationUri); URI screenUri = ContextUtil.buildAndExpandQueryParameters(uiScreenUriSpel, context); Redirect.RedirectBuilder redirect = Redirect.builder(); redirect.processId(execution.getRootProcessInstanceId()); redirect.executionId(execution.getId()); redirect.redirectUri(screenUri); setUiUriInContext(execution, screenUri); applicationEventPublisher.publishEvent(eventFactory.apply(redirect)); }
3.68
flink_Buckets_initializeState
/** * Initializes the state after recovery from a failure. * * <p>During this process: * * <ol> * <li>we set the initial value for part counter to the maximum value used before across all * tasks and buckets. This guarantees that we do not overwrite valid data, * <li>we commit any pending files for previous checkpoints (previous to the last successful * one from which we restore), * <li>we resume writing to the previous in-progress file of each bucket, and * <li>if we receive multiple states for the same bucket, we merge them. * </ol> * * @param bucketStates the state holding recovered state about active buckets. * @param partCounterState the state holding the max previously used part counters. * @throws Exception if anything goes wrong during retrieving the state or restoring/committing * of any in-progress/pending part files */ public void initializeState( final ListState<byte[]> bucketStates, final ListState<Long> partCounterState) throws Exception { initializePartCounter(partCounterState); LOG.info( "Subtask {} initializing its state (max part counter={}).", subtaskIndex, maxPartCounter); initializeActiveBuckets(bucketStates); }
3.68
hadoop_BufferData_getChecksum
/** * Computes CRC32 checksum of the given buffer's contents. * * @param buffer the buffer whose content's checksum is to be computed. * @return the computed checksum. */ public static long getChecksum(ByteBuffer buffer) { ByteBuffer tempBuffer = buffer.duplicate(); tempBuffer.rewind(); CRC32 crc32 = new CRC32(); crc32.update(tempBuffer); return crc32.getValue(); }
3.68
framework_ImmediateUpload_getTicketNumber
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber() */ @Override protected Integer getTicketNumber() { return Integer.valueOf(14238); }
3.68
framework_VTwinColSelect_setTabIndex
/** * Sets the tab index. * * @param tabIndex * the tab index to set */ public void setTabIndex(int tabIndex) { optionsListBox.setTabIndex(tabIndex); selectionsListBox.setTabIndex(tabIndex); addItemsLeftToRightButton.setTabIndex(tabIndex); removeItemsRightToLeftButton.setTabIndex(tabIndex); }
3.68
flink_HiveASTParseDriver_parse
/** * Parses a command, optionally assigning the parser's token stream to the given context. * * @param command command to parse * @param ctx context with which to associate this parser's token stream, or null if either no * context is available or the context already has an existing stream * @return parsed AST */ public HiveParserASTNode parse( String command, HiveParserContext ctx, String viewFullyQualifiedName) throws HiveASTParseException { if (LOG.isDebugEnabled()) { LOG.debug("Parsing command: " + command); } HiveLexerX lexer = new HiveLexerX(new ANTLRNoCaseStringStream(command)); TokenRewriteStream tokens = new TokenRewriteStream(lexer); if (ctx != null) { if (viewFullyQualifiedName == null) { // Top level query ctx.setTokenRewriteStream(tokens); } else { // It is a view ctx.addViewTokenRewriteStream(viewFullyQualifiedName, tokens); } lexer.setHiveConf(ctx.getConf()); } HiveASTParser parser = new HiveASTParser(tokens); if (ctx != null) { parser.setHiveConf(ctx.getConf()); } parser.setTreeAdaptor(ADAPTOR); HiveASTParser.statement_return r = null; try { r = parser.statement(); } catch (RecognitionException e) { throw new HiveASTParseException(parser.errors); } if (lexer.getErrors().size() == 0 && parser.errors.size() == 0) { LOG.debug("Parse Completed"); } else if (lexer.getErrors().size() != 0) { throw new HiveASTParseException(lexer.getErrors()); } else { throw new HiveASTParseException(parser.errors); } HiveParserASTNode tree = r.getTree(); tree.setUnknownTokenBoundaries(); return tree; }
3.68
morf_SpreadsheetDataSetProducer_records
/** * {@inheritDoc} * * @see org.alfasoftware.morf.dataset.DataSetProducer#records(java.lang.String) */ @Override public Iterable<Record> records(String tableName) { return tables.get(tableName); }
3.68
framework_VTree_getFirstRootNode
/** * Returns the first root node of the tree or null if there are no root * nodes. * * @return The first root {@link TreeNode} */ protected TreeNode getFirstRootNode() { if (body.getWidgetCount() == 0) { return null; } return (TreeNode) body.getWidget(0); }
3.68
hadoop_LocalityMulticastAMRMProxyPolicy_getTotNumLocalizedContainers
/** * Return the total number of container coming from localized requests * matching an allocation Id. */ private long getTotNumLocalizedContainers(long allocationId) { AtomicLong c = totNumLocalizedContainers.get(allocationId); return c == null ? 0 : c.get(); }
3.68
hadoop_RouterStateIdContext_setResponseHeaderState
/** * Adds the {@link #namespaceIdMap} to the response header that will be sent to a client. */ public void setResponseHeaderState(RpcResponseHeaderProto.Builder headerBuilder) { if (namespaceIdMap.isEmpty()) { return; } RouterFederatedStateProto.Builder builder = RouterFederatedStateProto.newBuilder(); namespaceIdMap.forEach((k, v) -> builder.putNamespaceStateIds(k, v.get())); headerBuilder.setRouterFederatedState(builder.build().toByteString()); }
3.68
graphhopper_DijkstraOneToMany_clear
/** * Call clear if you have a different start node and need to clear the cache. */ public DijkstraOneToMany clear() { doClear = true; return this; }
3.68
hmily_ApolloClient_pull
/** * Pull input stream. * * @param config the config * @return the input stream */ public InputStream pull(final ApolloConfig config) { setApolloConfig(config); ConfigFile configFile = ConfigService.getConfigFile(config.getNamespace(), ConfigFileFormat.fromString(config.getFileExtension())); String content = configFile.getContent(); if (LOGGER.isDebugEnabled()) { LOGGER.debug("apollo content {}", content); } if (StringUtils.isBlank(content)) { return null; } return new ByteArrayInputStream(content.getBytes()); }
3.68
framework_XhrConnectionError_getPayload
/** * Returns the payload which was sent to the server. * * @return the payload which was sent, never null */ public JsonObject getPayload() { return payload; }
3.68
hbase_ColumnSchemaModel___getTTL
/** Returns the value of the TTL attribute or its default if it is unset */ public int __getTTL() { Object o = attrs.get(TTL); return o != null ? Integer.parseInt(o.toString()) : ColumnFamilyDescriptorBuilder.DEFAULT_TTL; }
3.68
framework_BeanFieldGroup_bindFieldsBuffered
/** * Convenience method to bind Fields from a given "field container" to a * given bean with buffering enabled. * <p> * The returned {@link BeanFieldGroup} can be used for further * configuration. * * @see #bindFieldsUnbuffered(Object, Object) * @see #bindMemberFields(Object) * @since 7.2 * @param bean * the bean to be bound * @param objectWithMemberFields * the class that contains {@link Field}s for bean properties * @return the bean field group used to make binding */ public static <T> BeanFieldGroup<T> bindFieldsBuffered(T bean, Object objectWithMemberFields) { return createAndBindFields(bean, objectWithMemberFields, true); }
3.68
hbase_MetaRegionLocationCache_getMetaRegionLocation
/** * Gets the HRegionLocation for a given meta replica ID. Renews the watch on the znode for future * updates. * @param replicaId ReplicaID of the region. * @return HRegionLocation for the meta replica. * @throws KeeperException if there is any issue fetching/parsing the serialized data. */ private HRegionLocation getMetaRegionLocation(int replicaId) throws KeeperException { RegionState metaRegionState; try { byte[] data = ZKUtil.getDataAndWatch(watcher, watcher.getZNodePaths().getZNodeForReplica(replicaId)); metaRegionState = ProtobufUtil.parseMetaRegionStateFrom(data, replicaId); } catch (DeserializationException e) { throw ZKUtil.convert(e); } return new HRegionLocation(metaRegionState.getRegion(), metaRegionState.getServerName()); }
3.68
hadoop_DynoInfraUtils_fetchHadoopTarball
/** * If a file matching {@value HADOOP_TAR_FILENAME_FORMAT} and {@code version} * is found in {@code destinationDir}, return its path. Otherwise, first * download the tarball from an Apache mirror. If the * {@value APACHE_DOWNLOAD_MIRROR_KEY} configuration or system property * (checked in that order) is set, use that as the mirror; else use * {@value APACHE_DOWNLOAD_MIRROR_DEFAULT}. * * @param destinationDir destination directory to save a tarball * @param version The version of Hadoop to download, like "2.7.4" * or "3.0.0-beta1" * @param conf configuration * @param log logger instance * @return The path to the tarball. * @throws IOException on failure */ public static File fetchHadoopTarball(File destinationDir, String version, Configuration conf, Logger log) throws IOException { log.info("Looking for Hadoop tarball for version: " + version); File destinationFile = new File(destinationDir, String.format(HADOOP_TAR_FILENAME_FORMAT, version)); if (destinationFile.exists()) { log.info("Found tarball at: " + destinationFile.getAbsolutePath()); return destinationFile; } String apacheMirror = conf.get(APACHE_DOWNLOAD_MIRROR_KEY); if (apacheMirror == null) { apacheMirror = System.getProperty(APACHE_DOWNLOAD_MIRROR_KEY, APACHE_DOWNLOAD_MIRROR_DEFAULT); } if (!destinationDir.exists()) { if (!destinationDir.mkdirs()) { throw new IOException("Unable to create local dir: " + destinationDir); } } URL downloadURL = new URL(apacheMirror + String .format(APACHE_DOWNLOAD_MIRROR_SUFFIX_FORMAT, version, version)); log.info("Downloading tarball from: <{}> to <{}>", downloadURL, destinationFile.getAbsolutePath()); FileUtils.copyURLToFile(downloadURL, destinationFile, 10000, 60000); log.info("Completed downloading of Hadoop tarball"); return destinationFile; }
3.68
hudi_OptionsResolver_isCowTable
/** * Returns whether it is a COPY_ON_WRITE table. */ public static boolean isCowTable(Configuration conf) { return conf.getString(FlinkOptions.TABLE_TYPE) .toUpperCase(Locale.ROOT) .equals(FlinkOptions.TABLE_TYPE_COPY_ON_WRITE); }
3.68
pulsar_ManagedLedgerConfig_setAddEntryTimeoutSeconds
/** * Add-entry timeout after which add-entry callback will be failed if add-entry is not succeeded. * * @param addEntryTimeoutSeconds */ public ManagedLedgerConfig setAddEntryTimeoutSeconds(long addEntryTimeoutSeconds) { this.addEntryTimeoutSeconds = addEntryTimeoutSeconds; return this; }
3.68
flink_TimestampUtil_createVectorFromConstant
// creates a Hive ColumnVector of constant timestamp value public static ColumnVector createVectorFromConstant(int batchSize, Object value) { if (hiveTSColVectorClz != null) { return OrcTimestampColumnVector.createFromConstant(batchSize, value); } else { return OrcLegacyTimestampColumnVector.createFromConstant(batchSize, value); } }
3.68
streampipes_AdapterResourceManager_encryptAndUpdate
/** * Takes an {@link AdapterDescription}, encrypts the password properties and updates the corresponding database entry * * @param adapterDescription input adapter description */ public void encryptAndUpdate(AdapterDescription adapterDescription) { db.updateAdapter(cloneAndEncrypt(adapterDescription)); }
3.68
hbase_RegionServerSpaceQuotaManager_isStarted
/** Returns if the {@code Chore} has been started. */ public boolean isStarted() { return started; }
3.68
pulsar_ManagedCursor_asyncReadEntriesWithSkipOrWait
/** * Asynchronously read entries from the ManagedLedger, up to the specified number and size. * * <p/>If no entries are available, the callback will not be triggered. Instead it will be registered to wait until * a new message will be persisted into the managed ledger * * @see #readEntriesOrWait(int, long) * @param maxEntries * maximum number of entries to return * @param maxSizeBytes * max size in bytes of the entries to return * @param callback * callback object * @param ctx * opaque context * @param maxPosition * max position can read * @param skipCondition * predicate of read filter out */ default void asyncReadEntriesWithSkipOrWait(int maxEntries, long maxSizeBytes, ReadEntriesCallback callback, Object ctx, PositionImpl maxPosition, Predicate<PositionImpl> skipCondition) { asyncReadEntriesOrWait(maxEntries, maxSizeBytes, callback, ctx, maxPosition); }
3.68
hbase_ColumnFamilyDescriptorBuilder_removeConfiguration
/** * Remove a configuration setting represented by the key from the {@link #configuration} map. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor removeConfiguration(final String key) { return setConfiguration(key, null); }
3.68
flink_PlanGenerator_registerGenericTypeInfoIfConfigured
/** * Check plan for GenericTypeInfo's and register the types at the serializers. * * @param plan the generated plan. */ private void registerGenericTypeInfoIfConfigured(Plan plan) { if (!config.isAutoTypeRegistrationDisabled()) { plan.accept( new Visitor<Operator<?>>() { private final Set<Class<?>> registeredTypes = new HashSet<>(); private final Set<org.apache.flink.api.common.operators.Operator<?>> visitedOperators = new HashSet<>(); @Override public boolean preVisit( org.apache.flink.api.common.operators.Operator<?> visitable) { if (!visitedOperators.add(visitable)) { return false; } OperatorInformation<?> opInfo = visitable.getOperatorInfo(); Serializers.recursivelyRegisterType( opInfo.getOutputType(), config, registeredTypes); return true; } @Override public void postVisit( org.apache.flink.api.common.operators.Operator<?> visitable) {} }); } }
3.68
hbase_AbstractStateMachineTableProcedure_checkTableModifiable
/** * Check whether a table is modifiable - exists and either offline or online with config set * @param env MasterProcedureEnv */ protected void checkTableModifiable(final MasterProcedureEnv env) throws IOException { // Checks whether the table exists if (!env.getMasterServices().getTableDescriptors().exists(getTableName())) { throw new TableNotFoundException(getTableName()); } }
3.68
framework_VaadinSession_getLockInstance
/** * Gets the {@link Lock} instance that is used for protecting the data of * this session from concurrent access. * <p> * The <code>Lock</code> can be used to gain more control than what is * available only using {@link #lock()} and {@link #unlock()}. The returned * instance is not guaranteed to support any other features of the * <code>Lock</code> interface than {@link Lock#lock()} and * {@link Lock#unlock()}. * * @return the <code>Lock</code> that is used for synchronization, never * <code>null</code> * * @see #lock() * @see Lock */ public Lock getLockInstance() { return lock; }
3.68
flink_BroadcastConnectedStream_getType2
/** * Gets the type of the second input. * * @return The type of the second input */ public TypeInformation<IN2> getType2() { return broadcastStream.getType(); }
3.68
hbase_QuotaFilter_getRegionServerFilter
/** Returns the RegionServer filter regex */ public String getRegionServerFilter() { return regionServerRegex; }
3.68
morf_Criterion_getValue
/** * Get the value associate with the criterion. This is the right hand side * of an expression. * * @return the value */ public Object getValue() { return value; }
3.68
framework_AbstractComponentConnector_getIconUri
/** * Gets the URI of the icon set for this component. * * @return the URI of the icon, or <code>null</code> if no icon has been * defined. */ protected String getIconUri() { return getResourceUrl(ComponentConstants.ICON_RESOURCE); }
3.68
hadoop_GenericEventTypeMetricsManager_create
// Construct a GenericEventTypeMetrics for dispatcher public static <T extends Enum<T>> GenericEventTypeMetrics create(String dispatcherName, Class<T> eventTypeClass) { MetricsInfo metricsInfo = info("GenericEventTypeMetrics for " + eventTypeClass.getName(), "Metrics for " + dispatcherName); return new GenericEventTypeMetrics.EventTypeMetricsBuilder<T>() .setMs(DefaultMetricsSystem.instance()) .setInfo(metricsInfo) .setEnumClass(eventTypeClass) .setEnums(eventTypeClass.getEnumConstants()) .build().registerMetrics(); }
3.68
hadoop_RouterQuotaManager_put
/** * Put new entity into cache. * @param path Mount table path. * @param quotaUsage Corresponding cache value. */ public void put(String path, RouterQuotaUsage quotaUsage) { writeLock.lock(); try { this.cache.put(path, quotaUsage); } finally { writeLock.unlock(); } }
3.68
rocketmq-connect_DbStructure_createOrAmendIfNecessary
/** * Create or amend table. * * @param config the connector configuration // * @param connection the database connection handle * @param tableId the table ID * @param fieldsMetadata the fields metadata * @return whether a DDL operation was performed * @throws SQLException if a DDL operation was deemed necessary but failed */ public boolean createOrAmendIfNecessary( final DorisSinkConfig config, final TableId tableId, final FieldsMetadata fieldsMetadata ) throws SQLException { // It seems that doris don't support create or amend table via stream load, so do nothing return false; }
3.68
hudi_HoodieParquetRealtimeInputFormat_getRecordReader
// To make Hive on Spark queries work with RT tables. Our theory is that due to // {@link org.apache.hadoop.hive.ql.io.parquet.ProjectionPusher} // not handling empty list correctly, the ParquetRecordReaderWrapper ends up adding the same column ids multiple // times which ultimately breaks the query. @Override public RecordReader<NullWritable, ArrayWritable> getRecordReader(final InputSplit split, final JobConf jobConf, final Reporter reporter) throws IOException { // sanity check ValidationUtils.checkArgument(split instanceof RealtimeSplit, "HoodieRealtimeRecordReader can only work on RealtimeSplit and not with " + split); RealtimeSplit realtimeSplit = (RealtimeSplit) split; // add preCombineKey HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(jobConf).setBasePath(realtimeSplit.getBasePath()).build(); HoodieTableConfig tableConfig = metaClient.getTableConfig(); addProjectionToJobConf(realtimeSplit, jobConf, tableConfig); LOG.info("Creating record reader with readCols :" + jobConf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR) + ", Ids :" + jobConf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR)); // for log only split, set the parquet reader as empty. if (FSUtils.isLogFile(realtimeSplit.getPath())) { return new HoodieRealtimeRecordReader(realtimeSplit, jobConf, new HoodieEmptyRecordReader(realtimeSplit, jobConf)); } return new HoodieRealtimeRecordReader(realtimeSplit, jobConf, super.getRecordReader(split, jobConf, reporter)); }
3.68
hbase_ScannerContext_checkSizeLimit
/** * @param checkerScope The scope that the limit is being checked from * @return true when the limit is enforceable from the checker's scope and it has been reached */ boolean checkSizeLimit(LimitScope checkerScope) { return !skippingRow && hasSizeLimit(checkerScope) && (progress.getDataSize() >= limits.getDataSize() || progress.getHeapSize() >= limits.getHeapSize() || progress.getBlockSize() >= limits.getBlockSize()); }
3.68
hadoop_CommitUtilsWithMR_getMagicJobAttemptsPath
/** * Get the location of magic job attempts. * @param out the base output directory. * @param jobUUID unique Job ID. * @return the location of magic job attempts. */ public static Path getMagicJobAttemptsPath(Path out, String jobUUID) { Preconditions.checkArgument(jobUUID != null && !(jobUUID.isEmpty()), "Invalid job ID: %s", jobUUID); return new Path(out, MAGIC_PATH_PREFIX + jobUUID); }
3.68
hudi_AbstractTableFileSystemView_getLatestFileSlice
/** * Get Latest File Slice for a given fileId in a given partition. */ @Override public final Option<FileSlice> getLatestFileSlice(String partitionStr, String fileId) { try { readLock.lock(); String partitionPath = formatPartitionKey(partitionStr); ensurePartitionLoadedCorrectly(partitionPath); if (isFileGroupReplaced(partitionPath, fileId)) { return Option.empty(); } else { Option<FileSlice> fs = fetchLatestFileSlice(partitionPath, fileId); if (!fs.isPresent()) { return Option.empty(); } return Option.ofNullable(filterUncommittedFiles(fs.get(), true).map(this::addBootstrapBaseFileIfPresent).findFirst().orElse(null)); } } finally { readLock.unlock(); } }
3.68
zxing_Version_buildVersions
/** * See ISO 16022:2006 5.5.1 Table 7 */ private static Version[] buildVersions() { return new Version[]{ new Version(1, 10, 10, 8, 8, new ECBlocks(5, new ECB(1, 3))), new Version(2, 12, 12, 10, 10, new ECBlocks(7, new ECB(1, 5))), new Version(3, 14, 14, 12, 12, new ECBlocks(10, new ECB(1, 8))), new Version(4, 16, 16, 14, 14, new ECBlocks(12, new ECB(1, 12))), new Version(5, 18, 18, 16, 16, new ECBlocks(14, new ECB(1, 18))), new Version(6, 20, 20, 18, 18, new ECBlocks(18, new ECB(1, 22))), new Version(7, 22, 22, 20, 20, new ECBlocks(20, new ECB(1, 30))), new Version(8, 24, 24, 22, 22, new ECBlocks(24, new ECB(1, 36))), new Version(9, 26, 26, 24, 24, new ECBlocks(28, new ECB(1, 44))), new Version(10, 32, 32, 14, 14, new ECBlocks(36, new ECB(1, 62))), new Version(11, 36, 36, 16, 16, new ECBlocks(42, new ECB(1, 86))), new Version(12, 40, 40, 18, 18, new ECBlocks(48, new ECB(1, 114))), new Version(13, 44, 44, 20, 20, new ECBlocks(56, new ECB(1, 144))), new Version(14, 48, 48, 22, 22, new ECBlocks(68, new ECB(1, 174))), new Version(15, 52, 52, 24, 24, new ECBlocks(42, new ECB(2, 102))), new Version(16, 64, 64, 14, 14, new ECBlocks(56, new ECB(2, 140))), new Version(17, 72, 72, 16, 16, new ECBlocks(36, new ECB(4, 92))), new Version(18, 80, 80, 18, 18, new ECBlocks(48, new ECB(4, 114))), new Version(19, 88, 88, 20, 20, new ECBlocks(56, new ECB(4, 144))), new Version(20, 96, 96, 22, 22, new ECBlocks(68, new ECB(4, 174))), new Version(21, 104, 104, 24, 24, new ECBlocks(56, new ECB(6, 136))), new Version(22, 120, 120, 18, 18, new ECBlocks(68, new ECB(6, 175))), new Version(23, 132, 132, 20, 20, new ECBlocks(62, new ECB(8, 163))), new Version(24, 144, 144, 22, 22, new ECBlocks(62, new ECB(8, 156), new ECB(2, 155))), new Version(25, 8, 18, 6, 16, new ECBlocks(7, new ECB(1, 5))), new Version(26, 8, 32, 6, 14, new ECBlocks(11, new ECB(1, 10))), new Version(27, 12, 26, 10, 24, new ECBlocks(14, new ECB(1, 16))), new Version(28, 12, 36, 10, 16, new ECBlocks(18, new ECB(1, 22))), new Version(29, 16, 36, 14, 16, new ECBlocks(24, new ECB(1, 32))), new Version(30, 16, 48, 14, 22, new ECBlocks(28, new ECB(1, 49))), // extended forms as specified in // ISO 21471:2020 (DMRE) 5.5.1 Table 7 new Version(31, 8, 48, 6, 22, new ECBlocks(15, new ECB(1, 18))), new Version(32, 8, 64, 6, 14, new ECBlocks(18, new ECB(1, 24))), new Version(33, 8, 80, 6, 18, new ECBlocks(22, new ECB(1, 32))), new Version(34, 8, 96, 6, 22, new ECBlocks(28, new ECB(1, 38))), new Version(35, 8, 120, 6, 18, new ECBlocks(32, new ECB(1, 49))), new Version(36, 8, 144, 6, 22, new ECBlocks(36, new ECB(1, 63))), new Version(37, 12, 64, 10, 14, new ECBlocks(27, new ECB(1, 43))), new Version(38, 12, 88, 10, 20, new ECBlocks(36, new ECB(1, 64))), new Version(39, 16, 64, 14, 14, new ECBlocks(36, new ECB(1, 62))), new Version(40, 20, 36, 18, 16, new ECBlocks(28, new ECB(1, 44))), new Version(41, 20, 44, 18, 20, new ECBlocks(34, new ECB(1, 56))), new Version(42, 20, 64, 18, 14, new ECBlocks(42, new ECB(1, 84))), new Version(43, 22, 48, 20, 22, new ECBlocks(38, new ECB(1, 72))), new Version(44, 24, 48, 22, 22, new ECBlocks(41, new ECB(1, 80))), new Version(45, 24, 64, 22, 14, new ECBlocks(46, new ECB(1, 108))), new Version(46, 26, 40, 24, 18, new ECBlocks(38, new ECB(1, 70))), new Version(47, 26, 48, 24, 22, new ECBlocks(42, new ECB(1, 90))), new Version(48, 26, 64, 24, 14, new ECBlocks(50, new ECB(1, 118))) }; }
3.68
hbase_NamedQueueRecorder_getNamedQueueRecords
/** * Retrieve in memory queue records from ringbuffer * @param request namedQueue request with event type * @return queue records from ringbuffer after filter (if applied) */ public NamedQueueGetResponse getNamedQueueRecords(NamedQueueGetRequest request) { return this.logEventHandler.getNamedQueueRecords(request); }
3.68
hbase_BulkLoadHFilesTool_populateLoadQueue
/** * Populate the Queue with given HFiles */ private static void populateLoadQueue(Deque<LoadQueueItem> ret, Map<byte[], List<Path>> map) { map.forEach((k, v) -> v.stream().map(p -> new LoadQueueItem(k, p)).forEachOrdered(ret::add)); }
3.68
flink_BlobKey_createKey
/** * Returns the right {@link BlobKey} subclass for the given parameters. * * @param type whether the referenced BLOB is permanent or transient * @param key the actual key data * @param random the random component of the key * @return BlobKey subclass */ static BlobKey createKey(BlobType type, byte[] key, byte[] random) { if (type == PERMANENT_BLOB) { return new PermanentBlobKey(key, random); } else { return new TransientBlobKey(key, random); } }
3.68
hudi_HoodieSparkKeyGeneratorFactory_convertToSparkKeyGenerator
/** * Convert hoodie-common KeyGenerator to SparkKeyGeneratorInterface implement. */ public static String convertToSparkKeyGenerator(String keyGeneratorClassName) { return COMMON_TO_SPARK_KEYGENERATOR.getOrDefault(keyGeneratorClassName, keyGeneratorClassName); }
3.68
hadoop_ResourceCalculatorProcessTree_getResourceCalculatorProcessTree
/** * Create the ResourceCalculatorProcessTree rooted to specified process * from the class name and configure it. If class name is null, this method * will try and return a process tree plugin available for this system. * * @param pid process pid of the root of the process tree * @param clazz class-name * @param conf configure the plugin with this. * * @return ResourceCalculatorProcessTree or null if ResourceCalculatorPluginTree * is not available for this system. */ public static ResourceCalculatorProcessTree getResourceCalculatorProcessTree( String pid, Class<? extends ResourceCalculatorProcessTree> clazz, Configuration conf) { if (clazz != null) { try { Constructor <? extends ResourceCalculatorProcessTree> c = clazz.getConstructor(String.class); ResourceCalculatorProcessTree rctree = c.newInstance(pid); rctree.setConf(conf); rctree.initialize(); return rctree; } catch(Exception e) { throw new RuntimeException(e); } } // No class given, try a os specific class if (ProcfsBasedProcessTree.isAvailable()) { return new ProcfsBasedProcessTree(pid); } if (WindowsBasedProcessTree.isAvailable()) { return new WindowsBasedProcessTree(pid); } // Not supported on this system. return null; }
3.68
morf_SqlDialect_convertStatementToHash
/** * Converts a structured {@code SELECT} statement to a hash representation. * * @param statement the statement to convert * @return A hash representation of {@code statement}. */ public String convertStatementToHash(SelectFirstStatement statement) { return md5HashHexEncoded(convertStatementToSQL(statement)); }
3.68
dubbo_AdaptiveClassCodeGenerator_generate
/** * generate and return class code * @param sort - whether sort methods */ public String generate(boolean sort) { // no need to generate adaptive class since there's no adaptive method found. if (!hasAdaptiveMethod()) { throw new IllegalStateException("No adaptive method exist on extension " + type.getName() + ", refuse to create the adaptive class!"); } StringBuilder code = new StringBuilder(); code.append(generatePackageInfo()); code.append(generateImports()); code.append(generateClassDeclaration()); Method[] methods = type.getMethods(); if (sort) { Arrays.sort(methods, Comparator.comparing(Method::toString)); } for (Method method : methods) { code.append(generateMethod(method)); } code.append('}'); if (logger.isDebugEnabled()) { logger.debug(code.toString()); }
3.68
hadoop_RetriableCommand_execute
/** * The execute() method invokes doExecute() until either: * 1. doExecute() succeeds, or * 2. the command may no longer be retried (e.g. runs out of retry-attempts). * @param arguments The list of arguments for the command. * @return Generic "Object" from doExecute(), on success. * @throws Exception */ public Object execute(Object... arguments) throws Exception { Exception latestException; int counter = 0; while (true) { try { return doExecute(arguments); } catch(Exception exception) { LOG.error("Failure in Retriable command: " + description, exception); latestException = exception; } counter++; RetryAction action = retryPolicy.shouldRetry(latestException, counter, 0, true); if (action.action == RetryPolicy.RetryAction.RetryDecision.RETRY) { ThreadUtil.sleepAtLeastIgnoreInterrupts(action.delayMillis); } else { break; } } throw new IOException("Couldn't run retriable-command: " + description, latestException); }
3.68
hadoop_BlockBlobAppendStream_maybeSetFirstError
/** * Set {@link #firstError} to the exception if it is not already set. * @param exception exception to save */ private void maybeSetFirstError(IOException exception) { firstError.compareAndSet(null, exception); }
3.68