name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_FlinkContainersSettings_isZookeeperHA
/** Is zookeeper HA boolean. */ public Boolean isZookeeperHA() { return zookeeperHA; }
3.68
framework_Profiler_reset
/** * Resets the collected profiler data. Calls to this method will be removed * by the compiler unless profiling is enabled. */ public static void reset() { if (isEnabled()) { /* * Old implementations might call reset for initialization, so * ensure it is initialized here as well. Initialization has no side * effects if already done. */ initialize(); clearEventsList(); } }
3.68
hbase_Segment_getScanner
/** * Creates the scanner for the given read point * @return a scanner for the given read point */ protected KeyValueScanner getScanner(long readPoint) { return new SegmentScanner(this, readPoint); }
3.68
graphhopper_OSMReaderConfig_setElevationMaxWayPointDistance
/** * Sets the max elevation discrepancy between way points and the simplified polyline in meters */ public OSMReaderConfig setElevationMaxWayPointDistance(double elevationMaxWayPointDistance) { this.elevationMaxWayPointDistance = elevationMaxWayPointDistance; return this; }
3.68
hbase_SplitWALManager_archive
/** * Archive processed WAL */ public void archive(String wal) throws IOException { WALSplitUtil.moveWAL(this.fs, new Path(wal), this.walArchiveDir); }
3.68
dubbo_MockClusterInvoker_selectMockInvoker
/** * Return MockInvoker * Contract: * directory.list() will return a list of normal invokers if Constants.INVOCATION_NEED_MOCK is absent or not true in invocation, otherwise, a list of mock invokers will return. * if directory.list() returns more than one mock invoker, only one of them will be used. * * @param invocation * @return */ private List<Invoker<T>> selectMockInvoker(Invocation invocation) { List<Invoker<T>> invokers = null; // TODO generic invoker? if (invocation instanceof RpcInvocation) { // Note the implicit contract (although the description is added to the interface declaration, but // extensibility is a problem. The practice placed in the attachment needs to be improved) invocation.setAttachment(INVOCATION_NEED_MOCK, Boolean.TRUE.toString()); // directory will return a list of normal invokers if Constants.INVOCATION_NEED_MOCK is absent or not true // in invocation, otherwise, a list of mock invokers will return. try { RpcContext.getServiceContext().setConsumerUrl(getUrl()); invokers = directory.list(invocation); } catch (RpcException e) { if (logger.isInfoEnabled()) { logger.info( "Exception when try to invoke mock. Get mock invokers error for service:" + getUrl().getServiceInterface() + ", method:" + RpcUtils.getMethodName(invocation) + ", will construct a new mock with 'new MockInvoker()'.", e); } } } return invokers; }
3.68
framework_HasValue_isEmpty
/** * Returns whether this {@code HasValue} is considered to be empty. * <p> * By default this is an equality check between current value and empty * value. * * @return {@code true} if considered empty; {@code false} if not */ public default boolean isEmpty() { return Objects.equals(getValue(), getEmptyValue()); }
3.68
hbase_LruBlockCache_asReferencedHeapBlock
/** * The block cached in LRUBlockCache will always be an heap block: on the one side, the heap * access will be more faster then off-heap, the small index block or meta block cached in * CombinedBlockCache will benefit a lot. on other side, the LRUBlockCache size is always * calculated based on the total heap size, if caching an off-heap block in LRUBlockCache, the * heap size will be messed up. Here we will clone the block into an heap block if it's an * off-heap block, otherwise just use the original block. The key point is maintain the refCnt of * the block (HBASE-22127): <br> * 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle; <br> * 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's * reservoir, if both RPC and LRUBlockCache release the block, then it can be garbage collected by * JVM, so need a retain here. * @param buf the original block * @return an block with an heap memory backend. */ private Cacheable asReferencedHeapBlock(Cacheable buf) { if (buf instanceof HFileBlock) { HFileBlock blk = ((HFileBlock) buf); if (blk.isSharedMem()) { return HFileBlock.deepCloneOnHeap(blk); } } // The block will be referenced by this LRUBlockCache, so should increase its refCnt here. return buf.retain(); }
3.68
hudi_HoodieTableConfig_clearMetadataPartitions
/** * Clear {@link HoodieTableConfig#TABLE_METADATA_PARTITIONS} * {@link HoodieTableConfig#TABLE_METADATA_PARTITIONS_INFLIGHT}. */ public void clearMetadataPartitions(HoodieTableMetaClient metaClient) { setMetadataPartitionState(metaClient, MetadataPartitionType.FILES, false); }
3.68
hadoop_HttpReferrerAuditHeader_builder
/** * Get a builder. * @return a new builder. */ public static Builder builder() { return new Builder(); }
3.68
zxing_MinimalEncoder_getEndMode
/** Returns Mode.ASCII in case that: * - Mode is EDIFACT and characterLength is less than 4 or the remaining characters can be encoded in at most 2 * ASCII bytes. * - Mode is C40, TEXT or X12 and the remaining characters can be encoded in at most 1 ASCII byte. * Returns mode in all other cases. * */ Mode getEndMode() { if (mode == Mode.EDF) { if (characterLength < 4) { return Mode.ASCII; } int lastASCII = getLastASCII(); // see 5.2.8.2 EDIFACT encodation Rules if (lastASCII > 0 && getCodewordsRemaining(cachedTotalSize + lastASCII) <= 2 - lastASCII) { return Mode.ASCII; } } if (mode == Mode.C40 || mode == Mode.TEXT || mode == Mode.X12) { // see 5.2.5.2 C40 encodation rules and 5.2.7.2 ANSI X12 encodation rules if (fromPosition + characterLength >= input.length() && getCodewordsRemaining(cachedTotalSize) == 0) { return Mode.ASCII; } int lastASCII = getLastASCII(); if (lastASCII == 1 && getCodewordsRemaining(cachedTotalSize + 1) == 0) { return Mode.ASCII; } } return mode; }
3.68
flink_DataSet_combineGroup
/** * Applies a GroupCombineFunction on a non-grouped {@link DataSet}. A CombineFunction is similar * to a GroupReduceFunction but does not perform a full data exchange. Instead, the * CombineFunction calls the combine method once per partition for combining a group of results. * This operator is suitable for combining values into an intermediate format before doing a * proper groupReduce where the data is shuffled across the node for further reduction. The * GroupReduce operator can also be supplied with a combiner by implementing the RichGroupReduce * function. The combine method of the RichGroupReduce function demands input and output type to * be the same. The CombineFunction, on the other side, can have an arbitrary output type. * * @param combiner The GroupCombineFunction that is applied on the DataSet. * @return A GroupCombineOperator which represents the combined DataSet. */ public <R> GroupCombineOperator<T, R> combineGroup(GroupCombineFunction<T, R> combiner) { if (combiner == null) { throw new NullPointerException("GroupCombine function must not be null."); } String callLocation = Utils.getCallLocationName(); TypeInformation<R> resultType = TypeExtractor.getGroupCombineReturnTypes(combiner, getType(), callLocation, true); return new GroupCombineOperator<>(this, resultType, clean(combiner), callLocation); }
3.68
morf_TableOutputter_unsupportedOperationExceptionMessageSuffix
/** * Creates an {@link UnsupportedOperationException} message suffix for a given * {@link Column} and {@link WritableSheet}. * * @param column the {@link Column} * @param writableSheet the {@link WritableSheet} * @return the {@link UnsupportedOperationException} message suffix */ private String unsupportedOperationExceptionMessageSuffix(Column column, WritableSheet writableSheet) { return " in column [" + column.getName() + "] of table [" + writableSheet.getName() + "]"; }
3.68
hudi_BaseHoodieTableServiceClient_preCommit
/** * Any pre-commit actions like conflict resolution goes here. * * @param metadata commit metadata for which pre commit is being invoked. */ protected void preCommit(HoodieCommitMetadata metadata) { // Create a Hoodie table after startTxn which encapsulated the commits and files visible. // Important to create this after the lock to ensure the latest commits show up in the timeline without need for reload HoodieTable table = createTable(config, hadoopConf); resolveWriteConflict(table, metadata, this.pendingInflightAndRequestedInstants); }
3.68
pulsar_AwsCredentialProviderPlugin_getV2CredentialsProvider
/** * Returns a V2 credential provider for use with the v2 SDK. * * Defaults to an implementation that pulls credentials from a v1 provider */ default software.amazon.awssdk.auth.credentials.AwsCredentialsProvider getV2CredentialsProvider() { // make a small wrapper to forward requests to v1, this allows // for this interface to not "break" for implementers AWSCredentialsProvider v1Provider = getCredentialProvider(); return () -> { AWSCredentials creds = v1Provider.getCredentials(); if (creds instanceof AWSSessionCredentials) { return software.amazon.awssdk.auth.credentials.AwsSessionCredentials.create( creds.getAWSAccessKeyId(), creds.getAWSSecretKey(), ((AWSSessionCredentials) creds).getSessionToken()); } else { return software.amazon.awssdk.auth.credentials.AwsBasicCredentials.create( creds.getAWSAccessKeyId(), creds.getAWSSecretKey()); } }; }
3.68
framework_VAbstractSplitPanel_setPositionReversed
/** * For internal use only. May be removed or replaced in the future. * * @param reversed * {@code true} if split position should be measured from the * second region, {@code false} (default) if from the the first * region */ public void setPositionReversed(boolean reversed) { if (positionReversed != reversed) { if (orientation == Orientation.HORIZONTAL) { splitter.getStyle().clearRight(); splitter.getStyle().clearLeft(); } else if (orientation == Orientation.VERTICAL) { splitter.getStyle().clearTop(); splitter.getStyle().clearBottom(); } positionReversed = reversed; } }
3.68
hadoop_WordMedian_readAndFindMedian
/** * This is a standard program to read and find a median value based on a file * of word counts such as: 1 456, 2 132, 3 56... Where the first values are * the word lengths and the following values are the number of times that * words of that length appear. * * @param path * The path to read the HDFS file from (part-r-00000...00001...etc). * @param medianIndex1 * The first length value to look for. * @param medianIndex2 * The second length value to look for (will be the same as the first * if there are an even number of words total). * @throws IOException * If file cannot be found, we throw an exception. * */ private double readAndFindMedian(String path, int medianIndex1, int medianIndex2, Configuration conf) throws IOException { FileSystem fs = FileSystem.get(conf); Path file = new Path(path, "part-r-00000"); if (!fs.exists(file)) throw new IOException("Output not found!"); BufferedReader br = null; try { br = new BufferedReader(new InputStreamReader(fs.open(file), StandardCharsets.UTF_8)); int num = 0; String line; while ((line = br.readLine()) != null) { StringTokenizer st = new StringTokenizer(line); // grab length String currLen = st.nextToken(); // grab count String lengthFreq = st.nextToken(); int prevNum = num; num += Integer.parseInt(lengthFreq); if (medianIndex2 >= prevNum && medianIndex1 <= num) { System.out.println("The median is: " + currLen); br.close(); return Double.parseDouble(currLen); } else if (medianIndex2 >= prevNum && medianIndex1 < num) { String nextCurrLen = st.nextToken(); double theMedian = (Integer.parseInt(currLen) + Integer .parseInt(nextCurrLen)) / 2.0; System.out.println("The median is: " + theMedian); br.close(); return theMedian; } } } finally { if (br != null) { br.close(); } } // error, no median found return -1; }
3.68
streampipes_BoilerpipeHTMLContentHandler_startElement
// @Override public void startElement(String uri, String localName, String qName, Attributes atts) throws SAXException { labelStacks.add(null); TagAction ta = tagActions.get(localName); if (ta != null) { if (ta.changesTagLevel()) { tagLevel++; } flush = ta.start(this, localName, qName, atts) | flush; } else { tagLevel++; flush = true; } lastEvent = Event.START_TAG; lastStartTag = localName; }
3.68
hudi_HoodieRepairTool_checkBackupPathForRepair
/** * Verifies the backup path for repair. * If there is no backup path configured, creates a new one in temp folder. * If the backup path already has files, throws an error to the user. * If the backup path is within the table base path, throws an error too. * * @return {@code 0} if successful; {@code -1} otherwise. * @throws IOException upon errors. */ int checkBackupPathForRepair() throws IOException { if (cfg.backupPath == null) { SecureRandom random = new SecureRandom(); long randomLong = random.nextLong(); cfg.backupPath = "/tmp/" + BACKUP_DIR_PREFIX + randomLong; } Path backupPath = new Path(cfg.backupPath); if (metaClient.getFs().exists(backupPath) && metaClient.getFs().listStatus(backupPath).length > 0) { LOG.error(String.format("Cannot use backup path %s: it is not empty", cfg.backupPath)); return -1; } return checkBackupPathAgainstBasePath(); }
3.68
querydsl_MetaDataExporter_setColumnAnnotations
/** * Set whether column annotations should be created * * @param columnAnnotations */ public void setColumnAnnotations(boolean columnAnnotations) { this.columnAnnotations = columnAnnotations; }
3.68
streampipes_InfluxStore_onEvent
/** * Saves an event to the connected InfluxDB database * * @param event The event which should be saved * @throws SpRuntimeException If the column name (key-value of the event map) is not allowed */ public void onEvent(Event event) throws SpRuntimeException { var missingFields = new ArrayList<String>(); var nullFields = new ArrayList<String>(); if (event == null) { throw new SpRuntimeException("event is null"); } // sanitize event for (var key : event.getRaw().keySet()) { if (InfluxDbReservedKeywords.KEYWORD_LIST.stream().anyMatch(k -> k.equalsIgnoreCase(key))) { event.renameFieldByRuntimeName(key, key + "_"); } } var timestampValue = event.getFieldBySelector(measure.getTimestampField()).getAsPrimitive().getAsLong(); var point = Point.measurement(measure.getMeasureName()).time((long) timestampValue, TimeUnit.MILLISECONDS); for (var ep : measure.getEventSchema().getEventProperties()) { var runtimeName = ep.getRuntimeName(); // timestamp should not be added as a field if (!measure.getTimestampField().endsWith(runtimeName)) { var sanitizedRuntimeName = sanitizedRuntimeNames.get(runtimeName); var field = event.getOptionalFieldByRuntimeName(runtimeName); try { if (ep instanceof EventPropertyPrimitive) { if (field.isPresent()) { var eventPropertyPrimitiveField = field.get().getAsPrimitive(); if (eventPropertyPrimitiveField.getRawValue() == null) { nullFields.add(sanitizedRuntimeName); } else { // store property as tag when the field is a dimension property if (PropertyScope.DIMENSION_PROPERTY.name().equals(ep.getPropertyScope())) { point.tag(sanitizedRuntimeName, eventPropertyPrimitiveField.getAsString()); } else { handleMeasurementProperty( point, (EventPropertyPrimitive) ep, sanitizedRuntimeName, eventPropertyPrimitiveField); } } } else { missingFields.add(runtimeName); } } else { // Since InfluxDB can't store non-primitive types, store them as string // and deserialize later in downstream processes if (field.isPresent()) { handleNonPrimitiveMeasurementProperty(point, event, sanitizedRuntimeName); } else { missingFields.add(runtimeName); } } } catch (SpRuntimeException iae) { LOG.warn("Runtime exception while extracting field value of field {} - this field will be ignored", runtimeName, iae); } } } if (missingFields.size() > 0) { LOG.debug("Ignored {} fields which were present in the schema, but not in the provided event: {}", missingFields.size(), String.join(", ", missingFields)); } if (nullFields.size() > 0) { LOG.warn("Ignored {} fields which had a value 'null': {}", nullFields.size(), String.join(", ", nullFields)); } influxDb.write(point.build()); }
3.68
pulsar_GenericRecord_getNativeObject
/** * Return the internal native representation of the Record, * like a AVRO GenericRecord. * * @return the internal representation of the record * @throws UnsupportedOperationException if the operation is not supported */ @Override default Object getNativeObject() { throw new UnsupportedOperationException(); }
3.68
flink_FileRegionBuffer_getNioBufferReadable
/** * This method is only called by tests and by event-deserialization, like checkpoint barriers. * Because such events are not used for bounded intermediate results, this method currently * executes only in tests. */ @Override public ByteBuffer getNioBufferReadable() { try { final ByteBuffer buffer = ByteBuffer.allocateDirect(bufferSize()); BufferReaderWriterUtil.readByteBufferFully(fileChannel, buffer, position()); buffer.flip(); return buffer; } catch (IOException e) { // this is not very pretty, but given that this code runs only in tests // the exception wrapping here is simpler than updating the method signature // to declare IOExceptions, as would be necessary for a proper "lazy buffer". throw new FlinkRuntimeException(e.getMessage(), e); } }
3.68
hbase_Connection_getBufferedMutator
/** * <p> * Retrieve a {@link BufferedMutator} for performing client-side buffering of writes. The * {@link BufferedMutator} returned by this method is thread-safe. This BufferedMutator will use * the Connection's ExecutorService. This object can be used for long lived operations. * </p> * <p> * The caller is responsible for calling {@link BufferedMutator#close()} on the returned * {@link BufferedMutator} instance. * </p> * <p> * This accessor will use the connection's ExecutorService and will throw an exception in the main * thread when an asynchronous exception occurs. * @param tableName the name of the table * @return a {@link BufferedMutator} for the supplied tableName. */ default BufferedMutator getBufferedMutator(TableName tableName) throws IOException { return getBufferedMutator(new BufferedMutatorParams(tableName)); }
3.68
flink_SegmentsUtil_inFirstSegment
/** Is it just in first MemorySegment, we use quick way to do something. */ private static boolean inFirstSegment(MemorySegment[] segments, int offset, int numBytes) { return numBytes + offset <= segments[0].size(); }
3.68
hudi_HoodieTableMetadataUtil_processRollbackMetadata
/** * Extracts information about the deleted and append files from the {@code HoodieRollbackMetadata}. * <p> * During a rollback files may be deleted (COW, MOR) or rollback blocks be appended (MOR only) to files. This * function will extract this change file for each partition. * * @param rollbackMetadata {@code HoodieRollbackMetadata} * @param partitionToAppendedFiles The {@code Map} to fill with files appended per partition and their sizes. */ private static void processRollbackMetadata(HoodieRollbackMetadata rollbackMetadata, Map<String, Map<String, Long>> partitionToAppendedFiles) { rollbackMetadata.getPartitionMetadata().values().forEach(pm -> { // Has this rollback produced new files? boolean hasRollbackLogFiles = pm.getRollbackLogFiles() != null && !pm.getRollbackLogFiles().isEmpty(); final String partition = pm.getPartitionPath(); final String partitionId = getPartitionIdentifier(partition); BiFunction<Long, Long, Long> fileMergeFn = (oldSize, newSizeCopy) -> { // if a file exists in both written log files and rollback log files, we want to pick the one that is higher // as rollback file could have been updated after written log files are computed. return oldSize > newSizeCopy ? oldSize : newSizeCopy; }; if (hasRollbackLogFiles) { if (!partitionToAppendedFiles.containsKey(partitionId)) { partitionToAppendedFiles.put(partitionId, new HashMap<>()); } // Extract appended file name from the absolute paths saved in getAppendFiles() pm.getRollbackLogFiles().forEach((path, size) -> { String fileName = new Path(path).getName(); partitionToAppendedFiles.get(partitionId).merge(fileName, size, fileMergeFn); }); } }); }
3.68
pulsar_BacklogQuotaManager_disconnectProducers
/** * Disconnect producers on given topic. * * @param persistentTopic * The topic on which all producers should be disconnected */ private void disconnectProducers(PersistentTopic persistentTopic) { List<CompletableFuture<Void>> futures = new ArrayList<>(); Map<String, Producer> producers = persistentTopic.getProducers(); producers.values().forEach(producer -> { log.info("Producer [{}] has exceeded backlog quota on topic [{}]. Disconnecting producer", producer.getProducerName(), persistentTopic.getName()); futures.add(producer.disconnect()); }); FutureUtil.waitForAll(futures).thenRun(() -> { log.info("All producers on topic [{}] are disconnected", persistentTopic.getName()); }).exceptionally(exception -> { log.error("Error in disconnecting producers on topic [{}] [{}]", persistentTopic.getName(), exception); return null; }); }
3.68
hbase_Get_addFamily
/** * Get all columns from the specified family. * <p> * Overrides previous calls to addColumn for this family. * @param family family name * @return the Get object */ public Get addFamily(byte[] family) { familyMap.remove(family); familyMap.put(family, null); return this; }
3.68
hbase_AbstractFSWALProvider_extractFileNumFromWAL
/** * It returns the file create timestamp (the 'FileNum') from the file name. For name format see * {@link #validateWALFilename(String)} public until remaining tests move to o.a.h.h.wal * @param wal must not be null * @return the file number that is part of the WAL file name */ public static long extractFileNumFromWAL(final WAL wal) { final Path walPath = ((AbstractFSWAL<?>) wal).getCurrentFileName(); if (walPath == null) { throw new IllegalArgumentException("The WAL path couldn't be null"); } String name = walPath.getName(); long timestamp = getTimestamp(name); if (timestamp == NO_TIMESTAMP) { throw new IllegalArgumentException(name + " is not a valid wal file name"); } return timestamp; }
3.68
hudi_TimelineUtils_getExtraMetadataFromLatestIncludeClustering
/** * Get extra metadata for specified key from latest commit/deltacommit/replacecommit instant including internal commits * such as clustering. */ public static Option<String> getExtraMetadataFromLatestIncludeClustering(HoodieTableMetaClient metaClient, String extraMetadataKey) { return metaClient.getCommitsTimeline().filterCompletedInstants().getReverseOrderedInstants() .findFirst().map(instant -> getMetadataValue(metaClient, extraMetadataKey, instant)).orElse(Option.empty()); }
3.68
hbase_HRegion_getStore
/** * Return HStore instance. Does not do any copy: as the number of store is limited, we iterate on * the list. */ private HStore getStore(Cell cell) { return stores.entrySet().stream().filter(e -> CellUtil.matchingFamily(cell, e.getKey())) .map(e -> e.getValue()).findFirst().orElse(null); }
3.68
flink_ValueDataTypeConverter_extractDataType
/** * Returns the clearly identifiable data type if possible. For example, {@code 12L} can be * expressed as {@code DataTypes.BIGINT().notNull()}. However, for example, {@code null} could * be any type and is not supported. * * <p>All types of the {@link LogicalTypeFamily#PREDEFINED} family, symbols, and arrays are * supported. */ public static Optional<DataType> extractDataType(Object value) { if (value == null) { return Optional.empty(); } DataType convertedDataType = null; if (value instanceof String) { convertedDataType = convertToCharType((String) value); } // byte arrays have higher priority than regular arrays else if (value instanceof byte[]) { convertedDataType = convertToBinaryType((byte[]) value); } else if (value instanceof BigDecimal) { convertedDataType = convertToDecimalType((BigDecimal) value); } else if (value instanceof java.time.LocalTime) { convertedDataType = convertToTimeType((java.time.LocalTime) value); } else if (value instanceof java.time.LocalDateTime) { convertedDataType = convertToTimestampType(((java.time.LocalDateTime) value).getNano()); } else if (value instanceof java.sql.Timestamp) { convertedDataType = convertToTimestampType(((java.sql.Timestamp) value).getNanos()); } else if (value instanceof java.time.ZonedDateTime) { convertedDataType = convertToZonedTimestampType(((java.time.ZonedDateTime) value).getNano()); } else if (value instanceof java.time.OffsetDateTime) { convertedDataType = convertToZonedTimestampType(((java.time.OffsetDateTime) value).getNano()); } else if (value instanceof java.time.Instant) { convertedDataType = convertToLocalZonedTimestampType(((java.time.Instant) value).getNano()); } else if (value instanceof java.time.Period) { convertedDataType = convertToYearMonthIntervalType(((java.time.Period) value).getYears()); } else if (value instanceof java.time.Duration) { final java.time.Duration duration = (java.time.Duration) value; convertedDataType = convertToDayTimeIntervalType(duration.toDays(), duration.getNano()); } else if (value instanceof Object[]) { // don't let the class-based extraction kick in if array elements differ return convertToArrayType((Object[]) value) .map(dt -> dt.notNull().bridgedTo(value.getClass())); } final Optional<DataType> resultType; if (convertedDataType != null) { resultType = Optional.of(convertedDataType); } else { // class-based extraction is possible for BOOLEAN, TINYINT, SMALLINT, INT, FLOAT, // DOUBLE, // DATE, TIME with java.sql.Time, and arrays of primitive types resultType = ClassDataTypeConverter.extractDataType(value.getClass()); } return resultType.map(dt -> dt.notNull().bridgedTo(value.getClass())); }
3.68
querydsl_ClassPathUtils_safeClassForName
/** * Get the class for the given className via the given classLoader * * @param classLoader classloader to be used * @param className fully qualified class name * @return {@code Class} instance matching the class name or null if not found */ public static Class<?> safeClassForName(ClassLoader classLoader, String className) { try { if (className.startsWith("com.sun.") || className.startsWith("com.apple.")) { return null; } else { return Class.forName(className, true, classLoader); } } catch (ClassNotFoundException | NoClassDefFoundError e) { return null; } }
3.68
morf_SchemaChangeSequence_removeColumns
/** * @see org.alfasoftware.morf.upgrade.SchemaEditor#removeColumns(java.lang.String, org.alfasoftware.morf.metadata.Column[]) */ @Override public void removeColumns(String tableName, Column... definitions) { // simple redirect for now, but a future optimisation could re-implement this to be more efficient for (Column definition : definitions) { removeColumn(tableName, definition); } }
3.68
hadoop_OBSFileSystem_mkdirs
/** * Make the given path and all non-existent parents into directories. Has the * semantics of Unix {@code 'mkdir -p'}. Existence of the directory hierarchy * is not an error. * * @param path path to create * @param permission to apply to f * @return true if a directory was created * @throws FileAlreadyExistsException there is a file at the path specified * @throws IOException other IO problems */ @Override public boolean mkdirs(final Path path, final FsPermission permission) throws IOException, FileAlreadyExistsException { try { return OBSCommonUtils.innerMkdirs(this, path); } catch (ObsException e) { throw OBSCommonUtils.translateException("mkdirs", path, e); } }
3.68
rocketmq-connect_DorisSinkTask_start
/** * Start the component * * @param keyValue */ @Override public void start(KeyValue keyValue) { originalConfig = keyValue; config = new DorisSinkConfig(keyValue); remainingRetries = config.getMaxRetries(); log.info("Initializing doris writer"); this.updater = new Updater(config); }
3.68
flink_NFA_open
/** * Initialization method for the NFA. It is called before any element is passed and thus * suitable for one time setup work. * * @param cepRuntimeContext runtime context of the enclosing operator * @param conf The configuration containing the parameters attached to the contract. */ public void open(RuntimeContext cepRuntimeContext, Configuration conf) throws Exception { for (State<T> state : getStates()) { for (StateTransition<T> transition : state.getStateTransitions()) { IterativeCondition condition = transition.getCondition(); FunctionUtils.setFunctionRuntimeContext(condition, cepRuntimeContext); FunctionUtils.openFunction(condition, DefaultOpenContext.INSTANCE); } } }
3.68
hadoop_ManifestPrinter_field
/** * Print a field, if non-null. * @param name field name. * @param value value. */ private void field(String name, Object value) { if (value != null) { println("%s: %s", name, value); } }
3.68
hadoop_NativeAzureFileSystemHelper_isBlobAlreadyExistsConflict
/* * Determines if a conditional request failed because the blob already * exists. * * @param e - the storage exception thrown by the failed operation. * * @return true if a conditional request failed because the blob already * exists; otherwise, returns false. */ static boolean isBlobAlreadyExistsConflict(StorageException e) { if (e.getHttpStatusCode() == HttpURLConnection.HTTP_CONFLICT && StorageErrorCodeStrings.BLOB_ALREADY_EXISTS.equals(e.getErrorCode())) { return true; } return false; }
3.68
hbase_IOEngine_usesSharedMemory
/** * IOEngine uses shared memory means, when reading Cacheable from it, those refers to the same * memory area as used by the Engine for caching it. * @return true when IOEngine using shared memory. */ default boolean usesSharedMemory() { return false; }
3.68
flink_DynamicTableFactory_getEnrichmentOptions
/** * Returns a map of options that can enrich the options of the original {@link * #getCatalogTable()} during a plan restore. * * <p>If and only if {@code table.plan.restore.catalog-objects} is set to {@code ALL}, this * method may return a non-empty {@link Map} of options retrieved from the {@link Catalog}. * * <p>Because only the {@link DynamicTableFactory} is able to decide which options are safe * to be forwarded without affecting the original topology, enrichment options are exposed * through this method. In general, it's highly recommended using the {@link * FactoryUtil#createTableFactoryHelper(DynamicTableFactory, Context)} to merge the options * and then get the result with {@link TableFactoryHelper#getOptions()}. The helper * considers both {@link #forwardOptions()} and {@link FormatFactory#forwardOptions()}. * * <p>Since a restored topology is static, an implementer has to ensure that the declared * options don't affect fundamental abilities. The planner might not react to changed * abilities anymore. * * @see TableFactoryHelper */ default Map<String, String> getEnrichmentOptions() { return Collections.emptyMap(); }
3.68
framework_SessionDestroyEvent_getService
/** * Gets the Vaadin service from which the even originates. * * @return the Vaadin service */ public VaadinService getService() { return getSource(); }
3.68
framework_VFlash_setSource
/** * Set the resource representing the Flash content that should be displayed. * * @param source * the resource URL */ public void setSource(String source) { if (this.source != source) { this.source = source; needsRebuild = true; } }
3.68
hbase_Result_getTotalSizeOfCells
/** * Get total size of raw cells * @return Total size. */ public static long getTotalSizeOfCells(Result result) { long size = 0; if (result.isEmpty()) { return size; } for (Cell c : result.rawCells()) { size += c.heapSize(); } return size; }
3.68
flink_RemoteInputChannel_getUnannouncedCredit
/** * Gets the currently unannounced credit. * * @return Credit which was not announced to the sender yet. */ public int getUnannouncedCredit() { return unannouncedCredit.get(); }
3.68
hbase_QualifierFilter_toByteArray
/** Returns The filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.QualifierFilter.Builder builder = FilterProtos.QualifierFilter.newBuilder(); builder.setCompareFilter(super.convert()); return builder.build().toByteArray(); }
3.68
flink_DualInputOperator_getSecondInput
/** * Returns the second input, or null, if none is set. * * @return The contract's second input. */ public Operator<IN2> getSecondInput() { return this.input2; }
3.68
hadoop_XAttrStorage_updateINodeXAttrs
/** * Update xattrs of inode. * <p> * Must be called while holding the FSDirectory write lock. * * @param inode INode to update * @param xAttrs to update xAttrs. * @param snapshotId id of the latest snapshot of the inode */ public static void updateINodeXAttrs(INode inode, List<XAttr> xAttrs, int snapshotId) throws QuotaExceededException { if (inode.getXAttrFeature() != null) { inode.removeXAttrFeature(snapshotId); } if (xAttrs == null || xAttrs.isEmpty()) { return; } inode.addXAttrFeature(new XAttrFeature(xAttrs), snapshotId); }
3.68
hadoop_FederationStateStoreUtils_logAndThrowStoreException
/** * Throws an <code>FederationStateStoreException</code> due to an error in * <code>FederationStateStore</code>. * * @param t the throwable raised in the called class. * @param log the logger interface. * @param errMsgFormat the error message format string. * @param args referenced by the format specifiers in the format string. * @throws YarnException on failure */ public static void logAndThrowStoreException( Throwable t, Logger log, String errMsgFormat, Object... args) throws YarnException { String errMsg = String.format(errMsgFormat, args); if (t != null) { log.error(errMsg, t); throw new FederationStateStoreException(errMsg, t); } else { log.error(errMsg); throw new FederationStateStoreException(errMsg); } }
3.68
flink_ChannelWriterOutputView_getBytesMemoryUsed
/** * Gets the number of bytes used by this output view, including written bytes and header bytes. * * @return The number of bytes that have been written to this output view. */ public long getBytesMemoryUsed() { return (this.blockCount - 1) * getSegmentSize() + getCurrentPositionInSegment(); }
3.68
flink_BinaryStringDataUtil_toDecimal
/** * Parses a {@link BinaryStringData} to {@link DecimalData}. * * @return DecimalData value if the parsing was successful. */ public static DecimalData toDecimal(BinaryStringData str, int precision, int scale) throws NumberFormatException { str.ensureMaterialized(); DecimalData data; if (DecimalDataUtils.isByteArrayDecimal(precision) || DecimalDataUtils.isByteArrayDecimal(str.getSizeInBytes())) { data = toBigPrecisionDecimal(str, precision, scale); } else { int sizeInBytes = str.getSizeInBytes(); data = toDecimalFromBytes( precision, scale, getTmpBytes(str, sizeInBytes), 0, sizeInBytes); } if (data == null) { throw numberFormatExceptionFor(str, "Overflow."); } return data; }
3.68
hadoop_JobBase_addLongValue
/** * Increment the given counter by the given incremental value If the counter * does not exist, one is created with value 0. * * @param name * the counter name * @param inc * the incremental value * @return the updated value. */ protected Long addLongValue(Object name, long inc) { Long val = this.longCounters.get(name); Long retv = null; if (val == null) { retv = Long.valueOf(inc); } else { retv = Long.valueOf(val.longValue() + inc); } this.longCounters.put(name, retv); return retv; }
3.68
pulsar_DefaultMetadataResolver_resolve
/** * Resolves the authorization metadata. * @return metadata * @throws IOException if the metadata could not be resolved. */ public Metadata resolve() throws IOException { try { URLConnection c = this.metadataUrl.openConnection(); if (connectTimeout != null) { c.setConnectTimeout((int) connectTimeout.toMillis()); } if (readTimeout != null) { c.setReadTimeout((int) readTimeout.toMillis()); } c.setRequestProperty("Accept", "application/json"); Metadata metadata; try (InputStream inputStream = c.getInputStream()) { metadata = this.objectReader.readValue(inputStream); } return metadata; } catch (IOException e) { throw new IOException("Cannot obtain authorization metadata from " + metadataUrl.toString(), e); } }
3.68
flink_LeaderInformationRegister_clear
/** * Creates a new {@code LeaderInformationRegister} that matches the passed {@code * LeaderInformationRegister} except for the entry of {@code componentId} which is removed if it * existed. */ public static LeaderInformationRegister clear( @Nullable LeaderInformationRegister leaderInformationRegister, String componentId) { if (leaderInformationRegister == null || !leaderInformationRegister.getRegisteredComponentIds().iterator().hasNext()) { return LeaderInformationRegister.empty(); } return merge(leaderInformationRegister, componentId, LeaderInformation.empty()); }
3.68
framework_GridLayoutExtraSpacing_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { // TODO Auto-generated method stub return null; }
3.68
morf_SqlScriptExecutorProvider_defaultVisitor
/** * Defaults the {@code visitor} to be a NullVisitor if the visitor is null. * * @param visitor the visitor to potentially default. * @return a not-null visitor. */ protected SqlScriptVisitor defaultVisitor(SqlScriptVisitor visitor) { if (visitor != null) { return visitor; } return new NullVisitor(); }
3.68
framework_MenuItem_getCommand
/** * Gets the command associated with this item. * * @return this item's command, or <code>null</code> if none exists */ public Command getCommand() { return command; }
3.68
hadoop_WordList_getSize
/** * Returns the size of the list. */ public int getSize() { return list.size(); }
3.68
hudi_InternalFilter_add
/** * Adds an array of keys to <i>this</i> filter. * * @param keys The array of keys. */ public void add(Key[] keys) { if (keys == null) { throw new IllegalArgumentException("Key[] may not be null"); } for (Key key : keys) { add(key); } }
3.68
flink_ParameterTool_createPropertiesFile
/** * Create a properties file with all the known parameters (call after the last get*() call). Set * the default value, if overwrite is true. * * @param pathToFile Location of the default properties file. * @param overwrite Boolean flag indicating whether or not to overwrite the file * @throws IOException If overwrite is not allowed and the file exists */ public void createPropertiesFile(String pathToFile, boolean overwrite) throws IOException { final File file = new File(pathToFile); if (file.exists()) { if (overwrite) { file.delete(); } else { throw new RuntimeException( "File " + pathToFile + " exists and overwriting is not allowed"); } } final Properties defaultProps = new Properties(); defaultProps.putAll(this.defaultData); try (final OutputStream out = new FileOutputStream(file)) { defaultProps.store( out, "Default file created by Flink's ParameterUtil.createPropertiesFile()"); } }
3.68
hbase_BucketCache_evictBlock
/** * Try to evict the block from {@link BlockCache} by force. We'll call this in few cases:<br> * 1. Close an HFile, and clear all cached blocks. <br> * 2. Call {@link Admin#clearBlockCache(TableName)} to clear all blocks for a given table.<br> * <p> * Firstly, we'll try to remove the block from RAMCache,and then try to evict from backingMap. * Here we evict the block from backingMap immediately, but only free the reference from bucket * cache by calling {@link BucketEntry#markedAsEvicted}. If there're still some RPC referring this * block, block can only be de-allocated when all of them release the block. * <p> * NOTICE: we need to grab the write offset lock firstly before releasing the reference from * bucket cache. if we don't, we may read an {@link BucketEntry} with refCnt = 0 when * {@link BucketCache#getBlock(BlockCacheKey, boolean, boolean, boolean)}, it's a memory leak. * @param cacheKey Block to evict * @return true to indicate whether we've evicted successfully or not. */ @Override public boolean evictBlock(BlockCacheKey cacheKey) { return doEvictBlock(cacheKey, null, false); }
3.68
hmily_XaLoadBalancerAutoConfiguration_xaTransactionEventListener
/** * Register {@link SpringCloudXaLoadBalancer.TransactionEventListener} Bean. * * @return {@link SpringCloudXaLoadBalancer.TransactionEventListener} Bean */ @Bean public SpringCloudXaLoadBalancer.TransactionEventListener xaTransactionEventListener() { return new SpringCloudXaLoadBalancer.TransactionEventListener(); }
3.68
streampipes_TextBlock_hasLabel
/** * Checks whether this TextBlock has the given label. * * @param label The label * @return <code>true</code> if this block is marked by the given label. */ public boolean hasLabel(final String label) { return labels != null && labels.contains(label); }
3.68
querydsl_AbstractSQLQuery_getResults
/** * Get the results as a JDBC ResultSet * * @return results as ResultSet */ public ResultSet getResults() { final SQLListenerContextImpl context = startContext(connection(), queryMixin.getMetadata()); String queryString = null; List<Object> constants = Collections.emptyList(); try { listeners.preRender(context); SQLSerializer serializer = serialize(false); queryString = serializer.toString(); logQuery(queryString, serializer.getConstants()); context.addSQL(getSQL(serializer)); listeners.rendered(context); listeners.notifyQuery(queryMixin.getMetadata()); constants = serializer.getConstants(); listeners.prePrepare(context); final PreparedStatement stmt = getPreparedStatement(queryString); setParameters(stmt, constants, serializer.getConstantPaths(), getMetadata().getParams()); context.addPreparedStatement(stmt); listeners.prepared(context); listeners.preExecute(context); final ResultSet rs = stmt.executeQuery(); listeners.executed(context); return new ResultSetAdapter(rs) { @Override public void close() throws SQLException { try { super.close(); } finally { stmt.close(); reset(); endContext(context); } } }; } catch (SQLException e) { onException(context, e); reset(); endContext(context); throw configuration.translate(queryString, constants, e); } }
3.68
hadoop_BlockStorageMovementTracker_stopTracking
/** * Sets running flag to false. */ public void stopTracking() { running = false; }
3.68
hbase_MasterProcedureScheduler_waitGlobalExclusiveLock
// ============================================================================ // Global Locking Helpers // ============================================================================ /** * Try to acquire the share lock on global. * @see #wakeGlobalExclusiveLock(Procedure, String) * @param procedure the procedure trying to acquire the lock * @return true if the procedure has to wait for global to be available */ public boolean waitGlobalExclusiveLock(Procedure<?> procedure, String globalId) { schedLock(); try { final LockAndQueue lock = locking.getGlobalLock(globalId); if (lock.tryExclusiveLock(procedure)) { removeFromRunQueue(globalRunQueue, getGlobalQueue(globalId), () -> procedure + " held shared lock"); return false; } waitProcedure(lock, procedure); logLockedResource(LockedResourceType.GLOBAL, HConstants.EMPTY_STRING); return true; } finally { schedUnlock(); } }
3.68
framework_VRichTextArea_addBlurHandler
/** * Adds a blur handler to the component. * * @param blurHandler * the blur handler to add */ public void addBlurHandler(BlurHandler blurHandler) { blurHandlers.put(blurHandler, rta.addBlurHandler(blurHandler)); }
3.68
hudi_ClusteringUtil_rollbackClustering
/** * Force rolls back the inflight clustering instant, for handling failure case. * * @param table The hoodie table * @param writeClient The write client * @param instantTime The instant time */ public static void rollbackClustering(HoodieFlinkTable<?> table, HoodieFlinkWriteClient<?> writeClient, String instantTime) { HoodieInstant inflightInstant = HoodieTimeline.getReplaceCommitInflightInstant(instantTime); if (table.getMetaClient().reloadActiveTimeline().filterPendingReplaceTimeline().containsInstant(inflightInstant)) { LOG.warn("Rollback failed clustering instant: [" + instantTime + "]"); table.rollbackInflightClustering(inflightInstant, commitToRollback -> writeClient.getTableServiceClient().getPendingRollbackInfo(table.getMetaClient(), commitToRollback, false)); } }
3.68
dubbo_ReferenceAnnotationBeanPostProcessor_processReferenceAnnotatedBeanDefinition
/** * process @DubboReference at java-config @bean method * <pre class="code"> * &#064;Configuration * public class ConsumerConfig { * * &#064;Bean * &#064;DubboReference(group="demo", version="1.2.3") * public ReferenceBean&lt;DemoService&gt; demoService() { * return new ReferenceBean(); * } * * } * </pre> * * @param beanName * @param beanDefinition */ private void processReferenceAnnotatedBeanDefinition(String beanName, AnnotatedBeanDefinition beanDefinition) { MethodMetadata factoryMethodMetadata = SpringCompatUtils.getFactoryMethodMetadata(beanDefinition); // Extract beanClass from generic return type of java-config bean method: ReferenceBean<DemoService> // see // org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.getTypeForFactoryBeanFromMethod Class beanClass = getBeanFactory().getType(beanName); if (beanClass == Object.class) { beanClass = SpringCompatUtils.getGenericTypeOfReturnType(factoryMethodMetadata); } if (beanClass == Object.class) { // bean class is invalid, ignore it return; } if (beanClass == null) { String beanMethodSignature = factoryMethodMetadata.getDeclaringClassName() + "#" + factoryMethodMetadata.getMethodName() + "()"; throw new BeanCreationException( "The ReferenceBean is missing necessary generic type, which returned by the @Bean method of Java-config class. " + "The generic type of the returned ReferenceBean must be specified as the referenced interface type, " + "such as ReferenceBean<DemoService>. Please check bean method: " + beanMethodSignature); } // get dubbo reference annotation attributes Map<String, Object> annotationAttributes = null; // try all dubbo reference annotation types for (Class<? extends Annotation> annotationType : getAnnotationTypes()) { if (factoryMethodMetadata.isAnnotated(annotationType.getName())) { // Since Spring 5.2 // return factoryMethodMetadata.getAnnotations().get(annotationType).filterDefaultValues().asMap(); // Compatible with Spring 4.x annotationAttributes = factoryMethodMetadata.getAnnotationAttributes(annotationType.getName()); annotationAttributes = filterDefaultValues(annotationType, annotationAttributes); break; } } if (annotationAttributes != null) { // @DubboReference on @Bean method LinkedHashMap<String, Object> attributes = new LinkedHashMap<>(annotationAttributes); // reset id attribute attributes.put(ReferenceAttributes.ID, beanName); // convert annotation props ReferenceBeanSupport.convertReferenceProps(attributes, beanClass); // get interface String interfaceName = (String) attributes.get(ReferenceAttributes.INTERFACE); // check beanClass and reference interface class if (!StringUtils.isEquals(interfaceName, beanClass.getName()) && beanClass != GenericService.class) { String beanMethodSignature = factoryMethodMetadata.getDeclaringClassName() + "#" + factoryMethodMetadata.getMethodName() + "()"; throw new BeanCreationException( "The 'interfaceClass' or 'interfaceName' attribute value of @DubboReference annotation " + "is inconsistent with the generic type of the ReferenceBean returned by the bean method. " + "The interface class of @DubboReference is: " + interfaceName + ", but return ReferenceBean<" + beanClass.getName() + ">. " + "Please remove the 'interfaceClass' and 'interfaceName' attributes from @DubboReference annotation. " + "Please check bean method: " + beanMethodSignature); } Class interfaceClass = beanClass; // set attribute instead of property values beanDefinition.setAttribute(Constants.REFERENCE_PROPS, attributes); beanDefinition.setAttribute(ReferenceAttributes.INTERFACE_CLASS, interfaceClass); beanDefinition.setAttribute(ReferenceAttributes.INTERFACE_NAME, interfaceName); } else { // raw reference bean // the ReferenceBean is not yet initialized beanDefinition.setAttribute(ReferenceAttributes.INTERFACE_CLASS, beanClass); if (beanClass != GenericService.class) { beanDefinition.setAttribute(ReferenceAttributes.INTERFACE_NAME, beanClass.getName()); } } // set id beanDefinition.getPropertyValues().add(ReferenceAttributes.ID, beanName); }
3.68
flink_CheckpointConfig_setCheckpointingMode
/** * Sets the checkpointing mode (exactly-once vs. at-least-once). * * @param checkpointingMode The checkpointing mode. */ public void setCheckpointingMode(CheckpointingMode checkpointingMode) { configuration.set(ExecutionCheckpointingOptions.CHECKPOINTING_MODE, checkpointingMode); }
3.68
flink_JoinOperator_projectTuple16
/** * Projects a pair of joined elements to a {@link Tuple} with the previously selected * fields. Requires the classes of the fields of the resulting tuples. * * @return The projected data set. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15> ProjectJoin< I1, I2, Tuple16< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>> projectTuple16() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo< Tuple16< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>> tType = new TupleTypeInfo< Tuple16< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>>(fTypes); return new ProjectJoin< I1, I2, Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15>>( this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this); }
3.68
framework_StaticSection_setHtml
/** * Sets the HTML content displayed in this cell. * * @param html * the html to set, not null */ public void setHtml(String html) { Objects.requireNonNull(html, "html cannot be null"); removeComponentIfPresent(); cellState.html = html; cellState.type = GridStaticCellType.HTML; row.section.markAsDirty(); }
3.68
flink_SpillChannelManager_unregisterChannelToBeRemovedAtShutdown
/** * Removes a channel from the list of channels that are to be removed at shutdown. * * @param channel The channel id. */ synchronized void unregisterChannelToBeRemovedAtShutdown(FileIOChannel.ID channel) { channelsToDeleteAtShutdown.remove(channel); }
3.68
flink_OptimizerNode_getBroadcastConnectionNames
/** Return the list of names associated with broadcast inputs for this node. */ public List<String> getBroadcastConnectionNames() { return this.broadcastConnectionNames; }
3.68
morf_InsertStatement_toString
/** * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder result = new StringBuilder("SQL INSERT INTO [" + table + "]"); if (!hints.isEmpty()) result.append(" HINTS ").append(hints); if (!fields.isEmpty()) result.append(" FIELDS ").append(fields); if (!values.isEmpty()) result.append(" VALUES ").append(values); if (selectStatement != null) result.append(" FROM SELECT [").append(selectStatement).append("]"); if (fromTable != null) result.append(" FROM [").append(fromTable).append("]"); if (!fieldDefaults.isEmpty()) result.append(" WITH DEFAULTS ").append(fieldDefaults.values()); return result.toString(); }
3.68
hbase_ThrottleSettings_getProto
/** * Returns a copy of the internal state of <code>this</code> */ QuotaProtos.ThrottleRequest getProto() { return proto.toBuilder().build(); }
3.68
morf_H2_formatJdbcUrl
/** * @see org.alfasoftware.morf.jdbc.DatabaseType#formatJdbcUrl(JdbcUrlElements) */ @Override public String formatJdbcUrl(JdbcUrlElements jdbcUrlElements) { // http://www.h2database.com/html/features.html#database_url StringBuilder builder = new StringBuilder() .append("jdbc:h2:"); if (StringUtils.isNotBlank(jdbcUrlElements.getHostName()) && !"localhost".equals(jdbcUrlElements.getHostName()) || jdbcUrlElements.getPort() > 0) { builder .append("tcp://") .append(jdbcUrlElements.getHostName()) .append(jdbcUrlElements.getPort() == 0 ? "" : ":" + jdbcUrlElements.getPort()) .append("/mem:") // this means we're going to use a remote in-memory DB which isn't ideal .append(jdbcUrlElements.getDatabaseName()); } else { // no host, try the instanceName if (StringUtils.isBlank(jdbcUrlElements.getInstanceName())) { builder .append("mem:") .append(jdbcUrlElements.getDatabaseName()); } else { // Allow the instanceName to have a trailing slash, or not. builder .append("file:") .append(jdbcUrlElements.getInstanceName()) .append(jdbcUrlElements.getInstanceName().endsWith(File.separator) ? "" : File.separator) .append(jdbcUrlElements.getDatabaseName()); } } // The DB_CLOSE_DELAY=-1 prevents the database being lost when the last connection is closed. // The DEFAULT_LOCK_TIMEOUT=150000 sets the default lock timeout to 150 // seconds. When the value is not set, it takes default // org.h2.engine.Constants.INITIAL_LOCK_TIMEOUT=2000 value // The LOB_TIMEOUT defines how long a lob returned from a ResultSet is available post-commit, defaulting to 5 minutes (300000 ms) // Set this to 2 seconds to allow certain tests using lob fields to work // The MV_STORE is a flag that governs whether to use the new storage engine (defaulting to true as of H2 version 1.4, false in prior versions) // Note that implementations of H2 prior to version 1.4.199 had an MVCC parameter used to allow higher concurrency. // This configuration has been removed and the old "PageStore" implementation (MV_STORE=FALSE) is no longer supported. builder.append(";DB_CLOSE_DELAY=-1;DEFAULT_LOCK_TIMEOUT=150000;LOB_TIMEOUT=2000;MV_STORE=TRUE"); return builder.toString(); }
3.68
hadoop_FederationCache_buildSubClusterPolicyConfigurationResponse
/** * Build SubClusterPolicyConfiguration Response. * * @return SubClusterPolicyConfiguration Response. * @throws YarnException exceptions from yarn servers. */ private CacheResponse<SubClusterPolicyConfiguration> buildSubClusterPolicyConfigurationResponse() throws YarnException { GetSubClusterPoliciesConfigurationsRequest request = GetSubClusterPoliciesConfigurationsRequest.newInstance(); GetSubClusterPoliciesConfigurationsResponse response = stateStore.getPoliciesConfigurations(request); List<SubClusterPolicyConfiguration> policyConfigs = response.getPoliciesConfigs(); CacheResponse<SubClusterPolicyConfiguration> cacheResponse = new SubClusterPolicyConfigurationCacheResponse(); cacheResponse.setList(policyConfigs); return cacheResponse; }
3.68
flink_TaskStatsRequestCoordinator_collectTaskStats
/** * Collects result from one of the tasks. * * @param executionId ID of the Task. * @param taskStatsResult Result of the stats sample from the Task. */ protected void collectTaskStats( ImmutableSet<ExecutionAttemptID> executionId, T taskStatsResult) { checkDiscarded(); if (pendingTasks.remove(executionId)) { statsResultByTaskGroup.put(executionId, taskStatsResult); } else if (isComplete()) { throw new IllegalStateException("Completed"); } else { throw new IllegalArgumentException("Unknown task " + executionId); } }
3.68
hbase_HBaseSaslRpcServer_unwrap
/** * Unwrap InvalidToken exception, otherwise return the one passed in. */ public static Throwable unwrap(Throwable e) { Throwable cause = e; while (cause != null) { if (cause instanceof InvalidToken) { return cause; } cause = cause.getCause(); } return e; }
3.68
framework_DefaultFieldGroupFieldFactory_populateWithEnumData
/** * Populates the given select with all the enums in the given {@link Enum} * class. Uses {@link Enum}.toString() for caption. * * @param select * The select to populate * @param enumClass * The Enum class to use */ @SuppressWarnings({ "rawtypes", "unchecked" }) protected void populateWithEnumData(AbstractSelect select, Class<? extends Enum> enumClass) { select.removeAllItems(); for (Object p : select.getContainerPropertyIds()) { select.removeContainerProperty(p); } select.addContainerProperty(CAPTION_PROPERTY_ID, String.class, ""); select.setItemCaptionPropertyId(CAPTION_PROPERTY_ID); EnumSet<?> enumSet = EnumSet.allOf(enumClass); for (Object r : enumSet) { Item newItem = select.addItem(r); newItem.getItemProperty(CAPTION_PROPERTY_ID).setValue(r.toString()); } }
3.68
framework_UIConnector_getSubWindows
/** * Return a list of current sub-windows. This method is meant for testing * purposes only. * * @return a list of sub-windows */ public List<WindowConnector> getSubWindows() { List<WindowConnector> windows = new ArrayList<>(); for (ComponentConnector child : getChildComponents()) { if (child instanceof WindowConnector) { windows.add((WindowConnector) child); } } return windows; }
3.68
flink_AbstractPythonFunctionOperator_advanceWatermark
/** * Advances the watermark of all managed timer services, potentially firing event time timers. * It also ensures that the fired timers are processed in the Python user-defined functions. */ private void advanceWatermark(Watermark watermark) throws Exception { if (getTimeServiceManager().isPresent()) { InternalTimeServiceManager<?> timeServiceManager = getTimeServiceManager().get(); timeServiceManager.advanceWatermark(watermark); while (!isBundleFinished()) { invokeFinishBundle(); timeServiceManager.advanceWatermark(watermark); } } }
3.68
flink_MultipleParameterTool_toMultiMap
/** * Return MultiMap of all the parameters processed by {@link MultipleParameterTool}. * * @return MultiMap of the {@link MultipleParameterTool}. Key is String and Value is a * Collection of String. */ public Map<String, Collection<String>> toMultiMap() { return data; }
3.68
hbase_ProcedureExecutor_isProcedureOwner
/** * Check if the user is this procedure's owner * @param procId the target procedure * @param user the user * @return true if the user is the owner of the procedure, false otherwise or the owner is * unknown. */ public boolean isProcedureOwner(long procId, User user) { if (user == null) { return false; } final Procedure<TEnvironment> runningProc = procedures.get(procId); if (runningProc != null) { return runningProc.getOwner().equals(user.getShortName()); } final CompletedProcedureRetainer<TEnvironment> retainer = completed.get(procId); if (retainer != null) { return retainer.getProcedure().getOwner().equals(user.getShortName()); } // Procedure either does not exist or has already completed and got cleaned up. // At this time, we cannot check the owner of the procedure return false; }
3.68
pulsar_Authentication_getAuthData
/** * Get/Create an authentication data provider which provides the data that this client will be sent to the broker. * Some authentication method need to auth between each client channel. So it need the broker, who it will talk to. * * @param brokerHostName * target broker host name * * @return The authentication data provider */ default AuthenticationDataProvider getAuthData(String brokerHostName) throws PulsarClientException { return this.getAuthData(); }
3.68
hbase_CatalogFamilyFormat_getServerColumn
/** * Returns the column qualifier for server column for replicaId * @param replicaId the replicaId of the region * @return a byte[] for server column qualifier */ public static byte[] getServerColumn(int replicaId) { return replicaId == 0 ? HConstants.SERVER_QUALIFIER : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); }
3.68
flink_Catalog_listProcedures
/** * List the names of all procedures in the given database. An empty list is returned if no * procedure. * * @param dbName name of the database. * @return a list of the names of the procedures in this database * @throws DatabaseNotExistException if the database does not exist * @throws CatalogException in case of any runtime exception */ default List<String> listProcedures(String dbName) throws DatabaseNotExistException, CatalogException { throw new UnsupportedOperationException( String.format("listProcedures is not implemented for %s.", this.getClass())); }
3.68
morf_ResultSetComparer_compareColumnValue
/** * Given data values from right and left data sets, compare and record mismatch. * * @return An optional mismatch. */ @SuppressWarnings({ "rawtypes" }) private Optional<ResultSetMismatch> compareColumnValue(Comparable leftValue, Comparable rightValue, String[] keys, int columnIndex, int columnType, MismatchType mismatchTypeToRaise) { if (leftValue == null && rightValue == null) { return Optional.empty(); } if (rightValue == null && leftValue != null) { return Optional.of(new ResultSetMismatch(mismatchTypeToRaise, columnIndex, valueToString(leftValue, columnType), null, keys)); } if (rightValue != null && leftValue == null) { return Optional.of(new ResultSetMismatch(mismatchTypeToRaise, columnIndex, null, valueToString(rightValue, columnType), keys)); } if ( databaseEquivalentStringComparator.get().compare(leftValue, rightValue) != 0) { return Optional.of(new ResultSetMismatch( mismatchTypeToRaise, columnIndex, valueToString(leftValue, columnType), valueToString(rightValue, columnType), keys )); } return Optional.empty(); }
3.68
hibernate-validator_DefaultValidationOrder_assertDefaultGroupSequenceIsExpandable
/** * Asserts that the default group sequence of the validated bean can be expanded into the sequences which needs to * be validated. * * @param defaultGroupSequence the default group sequence of the bean currently validated * * @throws jakarta.validation.GroupDefinitionException in case {@code defaultGroupSequence} cannot be expanded into one of the group sequences * which need to be validated */ @Override public void assertDefaultGroupSequenceIsExpandable(List<Class<?>> defaultGroupSequence) throws GroupDefinitionException { if ( sequenceMap == null ) { return; } for ( Map.Entry<Class<?>, Sequence> entry : sequenceMap.entrySet() ) { List<Group> sequenceGroups = entry.getValue().getComposingGroups(); int defaultGroupIndex = sequenceGroups.indexOf( Group.DEFAULT_GROUP ); if ( defaultGroupIndex != -1 ) { List<Group> defaultGroupList = buildTempGroupList( defaultGroupSequence ); ensureDefaultGroupSequenceIsExpandable( sequenceGroups, defaultGroupList, defaultGroupIndex ); } } }
3.68
flink_TableFunctionCollector_outputResult
/** Output final result of this UDTF to downstreams. */ @SuppressWarnings("unchecked") public void outputResult(Object result) { this.collected = true; this.collector.collect(result); }
3.68
hmily_HmilyRepositoryFacade_updateHmilyTransactionStatus
/** * Update hmily transaction status int. * * @param transId the trans id * @param status the status */ public void updateHmilyTransactionStatus(final Long transId, final Integer status) { checkRows(hmilyRepository.updateHmilyTransactionStatus(transId, status)); }
3.68
dubbo_ServiceInstancesChangedEvent_getServiceName
/** * @return The name of service that was changed */ public String getServiceName() { return serviceName; }
3.68
hbase_ConnectionOverAsyncConnection_getBatchPool
// only used for executing coprocessor calls, as users may reference the methods in the // BlockingInterface of the protobuf stub so we have to execute the call in a separated thread... // Will be removed in 4.0.0 along with the deprecated coprocessor methods in Table and Admin // interface. private ExecutorService getBatchPool() throws IOException { if (batchPool == null) { synchronized (this) { if (isClosed()) { throw new DoNotRetryIOException("Connection is closed"); } if (batchPool == null) { this.batchPool = createThreadPool(); } } } return this.batchPool; }
3.68
flink_BlobCacheSizeTracker_track
/** Register the BLOB to the tracker. */ public void track(JobID jobId, BlobKey blobKey, long size) { checkNotNull(jobId); checkNotNull(blobKey); checkArgument(size >= 0); synchronized (lock) { if (caches.putIfAbsent(Tuple2.of(jobId, blobKey), size) == null) { blobKeyByJob.computeIfAbsent(jobId, ignore -> new HashSet<>()).add(blobKey); total += size; if (total > sizeLimit) { LOG.warn( "The overall size of BLOBs in the cache exceeds " + "the limit. Limit = [{}], Current: [{}], " + "The size of next BLOB: [{}].", sizeLimit, total, size); } } else { LOG.warn( "Attempt to track a duplicated BLOB. This may indicate a duplicate upload " + "or a hash collision. Ignoring newest upload. " + "JobID = [{}], BlobKey = [{}]", jobId, blobKey); } } }
3.68
rocketmq-connect_IdentifierRules_trailingQuoteString
/** * Get the string used as a trailing quote. * * @return the trailing quote string; never null */ public String trailingQuoteString() { return trailingQuoteString; }
3.68
hadoop_ConnectionContext_isIdle
/** * Check if the connection is idle. It checks if the connection is not used * by another thread. * @return True if the connection is not used by another thread. */ public synchronized boolean isIdle() { return !isActive() && !isClosed(); }
3.68
flink_OverWindowPartitionedOrdered_as
/** * Assigns an alias for this window that the following {@code select()} clause can refer to. * * @param alias alias for this over window * @return the fully defined over window */ public OverWindow as(Expression alias) { return new OverWindow( alias, partitionBy, orderBy, valueLiteral(OverWindowRange.UNBOUNDED_RANGE), Optional.empty()); }
3.68
framework_VOverlay_getOverlayContainer
/** * Gets the 'overlay container' element pertaining to the given * {@link ApplicationConnection}. Each overlay should be created in a * overlay container element, so that the correct theme and styles can be * applied. * * @param ac * A reference to {@link ApplicationConnection} * @return The overlay container */ public static com.google.gwt.user.client.Element getOverlayContainer( ApplicationConnection ac) { String id = ac.getConfiguration().getRootPanelId(); id += "-overlays"; Element container = DOM.getElementById(id); if (container == null) { container = DOM.createDiv(); container.setId(id); String styles = ac.getUIConnector().getWidget().getParent() .getStyleName(); if (styles != null && !styles.isEmpty()) { container.addClassName(styles); } container.addClassName(CLASSNAME_CONTAINER); RootPanel.get().getElement().appendChild(container); } return DOM.asOld(container); }
3.68
hbase_HRegion_getSpecificStores
/** * get stores which matches the specified families * @return the stores need to be flushed. */ private Collection<HStore> getSpecificStores(List<byte[]> families) { Collection<HStore> specificStoresToFlush = new ArrayList<>(); for (byte[] family : families) { specificStoresToFlush.add(stores.get(family)); } return specificStoresToFlush; }
3.68