name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_QueryOperationConverter_getAggCall
/** Get the {@link AggCall} correspond to the aggregate or table aggregate expression. */ private AggCall getAggCall(Expression aggregateExpression) { if (isFunctionOfKind(aggregateExpression, TABLE_AGGREGATE)) { return aggregateExpression.accept(tableAggregateVisitor); } else { return aggregateExpression.accept(aggregateVisitor); } }
3.68
framework_MultiSelectionRenderer_getClientTop
/** Get the "top" of an element in relation to "client" coordinates. */ private int getClientTop(final Element e) { return e.getAbsoluteTop(); }
3.68
flink_TypeSerializerSnapshotSerializationUtil_writeSerializerSnapshot
/** * Writes a {@link TypeSerializerSnapshot} to the provided data output view. * * <p>It is written with a format that can be later read again using {@link * #readSerializerSnapshot(DataInputView, ClassLoader)}. * * @param out the data output view * @param serializerSnapshot the serializer configuration snapshot to write */ public static <T> void writeSerializerSnapshot( DataOutputView out, TypeSerializerSnapshot<T> serializerSnapshot) throws IOException { new TypeSerializerSnapshotSerializationProxy<>(serializerSnapshot).write(out); }
3.68
framework_VAbstractCalendarPanel_adjustDateToFitInsideRange
/** * Adjusts a date to fit inside the range, only if outside * * @param date */ private Date adjustDateToFitInsideRange(Date date) { if (!isAcceptedByRangeStart(date, resolution)) { date = parseRangeString(rangeStart); } else if (!isAcceptedByRangeEnd(date, resolution)) { date = parseRangeString(rangeEnd); } return date; }
3.68
flink_TableConfig_getOptional
/** * {@inheritDoc} * * <p>This method gives read-only access to the full configuration. However, * application-specific configuration has precedence. Configuration of outer layers is used for * defaults and fallbacks. See the docs of {@link TableConfig} for more information. * * @param option metadata of the option to read * @param <T> type of the value to read * @return read value or {@link Optional#empty()} if not found */ @Override public <T> Optional<T> getOptional(ConfigOption<T> option) { final Optional<T> tableValue = configuration.getOptional(option); if (tableValue.isPresent()) { return tableValue; } return rootConfiguration.getOptional(option); }
3.68
morf_InsertStatement_deepCopy
/** * @see org.alfasoftware.morf.util.DeepCopyableWithTransformation#deepCopy(org.alfasoftware.morf.util.DeepCopyTransformation) */ @Override public InsertStatementBuilder deepCopy(DeepCopyTransformation transformer) { return new InsertStatementBuilder(this, transformer); }
3.68
flink_CustomHeadersDecorator_addCustomHeader
/** * Adds a custom header to the message. Initializes the custom headers collection if it hasn't * been initialized yet. * * @param httpHeader The header to add. */ public void addCustomHeader(HttpHeader httpHeader) { if (customHeaders == null) { customHeaders = new ArrayList<>(); } customHeaders.add(httpHeader); }
3.68
framework_ContainerOrderedWrapper_getItem
/* * Gets the specified Item from the container. Don't add a JavaDoc comment * here, we use the default documentation from implemented interface. */ @Override public Item getItem(Object itemId) { return container.getItem(itemId); }
3.68
flink_PojoSerializerSnapshot_newPojoHasNewOrRemovedFields
/** * Checks whether the new {@link PojoSerializer} has new or removed fields compared to the * previous one. */ private static boolean newPojoHasNewOrRemovedFields( LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots, PojoSerializer<?> newPojoSerializer) { int numRemovedFields = fieldSerializerSnapshots.absentKeysOrValues().size(); int numPreexistingFields = fieldSerializerSnapshots.size() - numRemovedFields; boolean hasRemovedFields = numRemovedFields > 0; boolean hasNewFields = newPojoSerializer.getFields().length - numPreexistingFields > 0; return hasRemovedFields || hasNewFields; }
3.68
dubbo_DynamicConfiguration_close
/** * Close the configuration * * @throws Exception * @since 2.7.5 */ @Override default void close() throws Exception { throw new UnsupportedOperationException(); }
3.68
hadoop_FileIoProvider_flush
/** * See {@link Flushable#flush()}. * * @param volume target volume. null if unavailable. * @throws IOException */ public void flush( @Nullable FsVolumeSpi volume, Flushable f) throws IOException { final long begin = profilingEventHook.beforeFileIo(volume, FLUSH, 0); try { faultInjectorEventHook.beforeFileIo(volume, FLUSH, 0); f.flush(); profilingEventHook.afterFileIo(volume, FLUSH, begin, 0); } catch (Exception e) { onFailure(volume, begin); throw e; } }
3.68
hbase_Result_getColumnLatestCell
/** * The Cell for the most recent timestamp for a given column. * @param family family name * @param foffset family offset * @param flength family length * @param qualifier column qualifier * @param qoffset qualifier offset * @param qlength qualifier length * @return the Cell for the column, or null if no value exists in the row or none have been * selected in the query (Get/Scan) */ public Cell getColumnLatestCell(byte[] family, int foffset, int flength, byte[] qualifier, int qoffset, int qlength) { Cell[] kvs = rawCells(); // side effect possibly. if (kvs == null || kvs.length == 0) { return null; } int pos = binarySearch(kvs, family, foffset, flength, qualifier, qoffset, qlength); if (pos == -1) { return null; } if ( PrivateCellUtil.matchingColumn(kvs[pos], family, foffset, flength, qualifier, qoffset, qlength) ) { return kvs[pos]; } return null; }
3.68
framework_Upload_getUploadSize
/** * Returns size of file currently being uploaded. Value sane only during * upload. * * @return size in bytes */ public long getUploadSize() { return contentLength; }
3.68
flink_RocksDBStateBackend_setWriteBatchSize
/** * Sets the max batch size will be used in {@link RocksDBWriteBatchWrapper}, no positive value * will disable memory size controller, just use item count controller. * * @param writeBatchSize The size will used to be used in {@link RocksDBWriteBatchWrapper}. */ public void setWriteBatchSize(long writeBatchSize) { rocksDBStateBackend.setWriteBatchSize(writeBatchSize); }
3.68
hbase_RegionServerFlushTableProcedureManager_buildSubprocedure
/** * If in a running state, creates the specified subprocedure to flush table regions. Because this * gets the local list of regions to flush and not the set the master had, there is a possibility * of a race where regions may be missed. * @return Subprocedure to submit to the ProcedureMember. */ public Subprocedure buildSubprocedure(String table, List<String> families) { // don't run the subprocedure if the parent is stop(ping) if (rss.isStopping() || rss.isStopped()) { throw new IllegalStateException("Can't start flush region subprocedure on RS: " + rss.getServerName() + ", because stopping/stopped!"); } // check to see if this server is hosting any regions for the table List<HRegion> involvedRegions; try { involvedRegions = getRegionsToFlush(table); } catch (IOException e1) { throw new IllegalStateException("Failed to figure out if there is region to flush.", e1); } // We need to run the subprocedure even if we have no relevant regions. The coordinator // expects participation in the procedure and without sending message the master procedure // will hang and fail. LOG.debug("Launching subprocedure to flush regions for " + table); ForeignExceptionDispatcher exnDispatcher = new ForeignExceptionDispatcher(table); Configuration conf = rss.getConfiguration(); long timeoutMillis = conf.getLong(FLUSH_TIMEOUT_MILLIS_KEY, FLUSH_TIMEOUT_MILLIS_DEFAULT); long wakeMillis = conf.getLong(FLUSH_REQUEST_WAKE_MILLIS_KEY, FLUSH_REQUEST_WAKE_MILLIS_DEFAULT); FlushTableSubprocedurePool taskManager = new FlushTableSubprocedurePool(rss.getServerName().toString(), conf, rss); return new FlushTableSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis, involvedRegions, table, families, taskManager); }
3.68
flink_WebMonitorUtils_find
/** * Finds the Flink log directory using log.file Java property that is set during startup. */ public static LogFileLocation find(Configuration config) { final String logEnv = "log.file"; String logFilePath = System.getProperty(logEnv); if (logFilePath == null) { LOG.warn("Log file environment variable '{}' is not set.", logEnv); logFilePath = config.getString(WebOptions.LOG_PATH); } // not configured, cannot serve log files if (logFilePath == null || logFilePath.length() < 4) { LOG.warn( "JobManager log files are unavailable in the web dashboard. " + "Log file location not found in environment variable '{}' or configuration key '{}'.", logEnv, WebOptions.LOG_PATH.key()); return new LogFileLocation(null, null, null); } String outFilePath = logFilePath.substring(0, logFilePath.length() - 3).concat("out"); File logFile = resolveFileLocation(logFilePath); File logDir = null; if (logFile != null) { logDir = resolveFileLocation(logFile.getParent()); } LOG.info("Determined location of main cluster component log file: {}", logFilePath); LOG.info("Determined location of main cluster component stdout file: {}", outFilePath); return new LogFileLocation(logFile, resolveFileLocation(outFilePath), logDir); }
3.68
open-banking-gateway_EncryptionKeySerde_readKey
/** * Read public-private key pair from InputStream * @param is InputStream to read key from * @return Read key pair */ @SneakyThrows public PubAndPrivKey readKey(InputStream is) { PubAndPrivKeyContainer container = mapper.readValue(is, PubAndPrivKeyContainer.class); if (!PKCS_8.equals(container.getPrivFormat())) { throw new IllegalArgumentException("Bad key format"); } if (!X509.equals(container.getPubFormat())) { throw new IllegalArgumentException("Bad key format"); } KeyFactory factory = KeyFactory.getInstance(container.getAlgo()); var privKey = factory.generatePrivate(new PKCS8EncodedKeySpec(container.getEncoded())); var pubKey = factory.generatePublic(new X509EncodedKeySpec(container.getPubEncoded())); return new PubAndPrivKey(pubKey, privKey); }
3.68
morf_AliasedField_toString
/** * @see java.lang.Object#toString() */ @Override public String toString() { return StringUtils.isEmpty(alias) ? "" : " AS " + alias; }
3.68
framework_VCalendarPanel_getNextKey
/** * The key that selects the next week in the calendar. By default this is * the down arrow key but by overriding this method it can be changed to * whatever you like. * * @return */ protected int getNextKey() { return KeyCodes.KEY_DOWN; }
3.68
querydsl_QueryResults_getResults
/** * Get the results in List form * * An empty list is returned for no results. * * @return results */ public List<T> getResults() { return results; }
3.68
flink_ResourceManagerId_generate
/** Generates a new random ResourceManagerId. */ public static ResourceManagerId generate() { return new ResourceManagerId(); }
3.68
flink_ProcessorArchitecture_getProcessorArchitecture
/** Gets the ProcessorArchitecture of the system running this process. */ public static ProcessorArchitecture getProcessorArchitecture() { return CURRENT; }
3.68
hadoop_MappingRuleResult_updateNormalizedQueue
/** * External interface for setting the normalized version of the queue. This * class cannot normalize on it's own, but provides a way to store the * normalized name of the target queue. * @param normalizedQueueName The normalized name of the queue */ public void updateNormalizedQueue(String normalizedQueueName) { this.normalizedQueue = normalizedQueueName; }
3.68
framework_AbstractSelect_getItemCaptionPropertyId
/** * Gets the item caption property. * * @return the Id of the property used as item caption source. */ public Object getItemCaptionPropertyId() { return itemCaptionPropertyId; }
3.68
framework_VAbstractCalendarPanel_setDate
/** * Sets the data of the Panel. * * @param currentDate * The date to set */ public void setDate(Date currentDate) { doSetDate(currentDate, false, () -> { }); }
3.68
framework_TabSheet_setSelected
/** * Sets the selected tab in the TabSheet. Ensures that the selected tab is * repainted if needed. * * @param component * The new selection or null for no selection */ private void setSelected(Component component) { Tab tab = tabs.get(selected); selected = component; // Repaint of the selected component is needed as only the selected // component is communicated to the client. Otherwise this will be a // "cached" update even though the client knows nothing about the // connector if (selected != null) { tab = getTab(component); if (tab != null && tab.getDefaultFocusComponent() != null) { tab.getDefaultFocusComponent().focus(); } getState().selected = keyMapper.key(selected); selected.markAsDirtyRecursive(); } else { getState().selected = null; } }
3.68
hadoop_OBSObjectBucketUtils_newObjectMetadata
/** * Create a new object metadata instance. Any standard metadata headers are * added here, for example: encryption. * * @param length length of data to set in header. * @return a new metadata instance */ static ObjectMetadata newObjectMetadata(final long length) { final ObjectMetadata om = new ObjectMetadata(); if (length >= 0) { om.setContentLength(length); } return om; }
3.68
open-banking-gateway_FintechSecureStorage_validatePassword
/** * Validates FinTechs' Datasafe/KeyStore password * @param fintech Target FinTech to check password for * @param password Password to validate */ public void validatePassword(Fintech fintech, Supplier<char[]> password) { if (fintech.getFintechOnlyPrvKeys().isEmpty()) { throw new IllegalStateException("FinTech has no private keys"); } var keys = fintech.getFintechOnlyPrvKeys().stream() .map(it -> this.fintechOnlyPrvKeyFromPrivate(it, fintech, password)) .collect(Collectors.toList()); if (keys.isEmpty()) { throw new IllegalStateException("Failed to extract FintTech keys"); } }
3.68
flink_MetricConfig_getDouble
/** * Searches for the property with the specified key in this property list. If the key is not * found in this property list, the default property list, and its defaults, recursively, are * then checked. The method returns the default value argument if the property is not found. * * @param key the hashtable key. * @param defaultValue a default value. * @return the value in this property list with the specified key value parsed as a double. */ public double getDouble(String key, double defaultValue) { String argument = getProperty(key, null); return argument == null ? defaultValue : Double.parseDouble(argument); }
3.68
flink_GlobalConfiguration_loadConfiguration
/** * Loads the configuration files from the specified directory. If the dynamic properties * configuration is not null, then it is added to the loaded configuration. * * @param configDir directory to load the configuration from * @param dynamicProperties configuration file containing the dynamic properties. Null if none. * @return The configuration loaded from the given configuration directory */ public static Configuration loadConfiguration( final String configDir, @Nullable final Configuration dynamicProperties) { if (configDir == null) { throw new IllegalArgumentException( "Given configuration directory is null, cannot load configuration"); } final File confDirFile = new File(configDir); if (!(confDirFile.exists())) { throw new IllegalConfigurationException( "The given configuration directory name '" + configDir + "' (" + confDirFile.getAbsolutePath() + ") does not describe an existing directory."); } // get Flink yaml configuration file final File yamlConfigFile = new File(confDirFile, FLINK_CONF_FILENAME); if (!yamlConfigFile.exists()) { throw new IllegalConfigurationException( "The Flink config file '" + yamlConfigFile + "' (" + yamlConfigFile.getAbsolutePath() + ") does not exist."); } Configuration configuration = loadYAMLResource(yamlConfigFile); logConfiguration("Loading", configuration); if (dynamicProperties != null) { logConfiguration("Loading dynamic", dynamicProperties); configuration.addAll(dynamicProperties); } return configuration; }
3.68
flink_DeltaIteration_setResources
/** * Sets the resources for the iteration, and the minimum and preferred resources are the same by * default. The lower and upper resource limits will be considered in dynamic resource resize * feature for future plan. * * @param resources The resources for the iteration. * @return The iteration with set minimum and preferred resources. */ private DeltaIteration<ST, WT> setResources(ResourceSpec resources) { OperatorValidationUtils.validateResources(resources); this.minResources = resources; this.preferredResources = resources; return this; }
3.68
framework_DragSourceExtension_onDragEnd
/** * Method invoked when a <code>dragend</code> has been sent from client * side. Fires the {@link DragEndEvent}. * * @param dropEffect * the drop effect on the dragend */ protected void onDragEnd(DropEffect dropEffect) { DragEndEvent<T> event = new DragEndEvent<>(getParent(), dropEffect); fireEvent(event); }
3.68
flink_TypeInferenceUtil_runTypeInference
/** * Runs the entire type inference process. * * @param typeInference type inference of the current call * @param callContext call context of the current call * @param surroundingInfo information about the outer wrapping call of a current function call * for performing input type inference */ public static Result runTypeInference( TypeInference typeInference, CallContext callContext, @Nullable SurroundingInfo surroundingInfo) { try { return runTypeInferenceInternal(typeInference, callContext, surroundingInfo); } catch (ValidationException e) { throw createInvalidCallException(callContext, e); } catch (Throwable t) { throw createUnexpectedException(callContext, t); } }
3.68
flink_PlanNode_setRelativeMemoryPerSubtask
/** * Sets the memory dedicated to each task for this node. * * @param relativeMemoryPerSubtask The relative memory per sub-task */ public void setRelativeMemoryPerSubtask(double relativeMemoryPerSubtask) { this.relativeMemoryPerSubTask = relativeMemoryPerSubtask; }
3.68
hbase_MonitoredTaskImpl_expireNow
/** * Force the completion timestamp backwards so that it expires now. */ @Override public void expireNow() { stateTime -= 180 * 1000; }
3.68
hbase_CatalogReplicaLoadBalanceSimpleSelector_getRandomReplicaId
/** * Select an random replica id (including the primary replica id). In case there is no replica * region configured, return the primary replica id. * @return Replica id */ private int getRandomReplicaId() { int cachedNumOfReplicas = this.numOfReplicas; if (cachedNumOfReplicas == CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS) { cachedNumOfReplicas = refreshCatalogReplicaCount(); this.numOfReplicas = cachedNumOfReplicas; } // In case of no replica configured, return the primary region id. if (cachedNumOfReplicas <= 1) { return RegionInfo.DEFAULT_REPLICA_ID; } return ThreadLocalRandom.current().nextInt(cachedNumOfReplicas); }
3.68
morf_TableReference_getSchemaName
/** * Get the schema which contains this table. * * @return the schema name */ public String getSchemaName() { return schemaName; }
3.68
querydsl_GeneratedAnnotationResolver_resolveDefault
/** * Resolve the java {@code @Generated} annotation (can be of type {@code javax.annotation.Generated} * or {@code javax.annotation.processing.Generated} depending on the java version. * * @return the Generated annotation class from java. Never {@code null}. */ public static Class<? extends Annotation> resolveDefault() { return DEFAULT_GENERATED_ANNOTATION_CLASS; }
3.68
hbase_WALKeyImpl_getWriteTime
/** Returns the write time */ @Override public long getWriteTime() { return this.writeTime; }
3.68
framework_Overlay_setOwner
/** * Set owner (Widget that made this Overlay, not the layout parent) of * Overlay. * * @param owner * Owner (creator) of Overlay */ public void setOwner(Widget owner) { this.owner = owner; }
3.68
hadoop_HsController_tasks
/* * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#tasks() */ @Override public void tasks() { super.tasks(); }
3.68
hadoop_AbfsInputStream_markSupported
/** * gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false. * * @return always {@code false} */ @Override public boolean markSupported() { return false; }
3.68
hadoop_AzureBlobFileSystem_removeAcl
/** * Removes all but the base ACL entries of files and directories. The entries * for user, group, and others are retained for compatibility with permission * bits. * * @param path Path to modify * @throws IOException if an ACL could not be removed */ @Override public void removeAcl(final Path path) throws IOException { LOG.debug("AzureBlobFileSystem.removeAcl path: {}", path); TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.REMOVE_ACL, true, tracingHeaderFormat, listener); if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "removeAcl is only supported by storage accounts with the " + "hierarchical namespace enabled."); } Path qualifiedPath = makeQualified(path); try { abfsStore.removeAcl(qualifiedPath, tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(path, ex); } }
3.68
flink_DecimalDataUtils_signum
/** * Returns the signum function of this decimal. (The return value is -1 if this decimal is * negative; 0 if this decimal is zero; and 1 if this decimal is positive.) * * @return the signum function of this decimal. */ public static int signum(DecimalData decimal) { if (decimal.isCompact()) { return Long.signum(decimal.toUnscaledLong()); } else { return decimal.toBigDecimal().signum(); } }
3.68
hbase_OrderedBytes_decodeNumericValue
/** * Decode a {@link BigDecimal} from {@code src}. Assumes {@code src} encodes a value in Numeric * encoding and is within the valid range of {@link BigDecimal} values. {@link BigDecimal} does * not support {@code NaN} or {@code Infinte} values. * @see #decodeNumericAsDouble(PositionedByteRange) */ private static BigDecimal decodeNumericValue(PositionedByteRange src) { final int e; byte header = src.get(); boolean dsc = -1 == Integer.signum(header); header = dsc ? DESCENDING.apply(header) : header; if (header == NULL) return null; if (header == NEG_LARGE) { /* Large negative number: 0x08, ~E, ~M */ e = (int) getVaruint64(src, !dsc); return decodeSignificand(src, e, !dsc).negate(); } if (header >= NEG_MED_MIN && header <= NEG_MED_MAX) { /* Medium negative number: 0x13-E, ~M */ e = NEG_MED_MAX - header; return decodeSignificand(src, e, !dsc).negate(); } if (header == NEG_SMALL) { /* Small negative number: 0x14, -E, ~M */ e = (int) -getVaruint64(src, dsc); return decodeSignificand(src, e, !dsc).negate(); } if (header == ZERO) { return BigDecimal.ZERO; } if (header == POS_SMALL) { /* Small positive number: 0x16, ~-E, M */ e = (int) -getVaruint64(src, !dsc); return decodeSignificand(src, e, dsc); } if (header >= POS_MED_MIN && header <= POS_MED_MAX) { /* Medium positive number: 0x17+E, M */ e = header - POS_MED_MIN; return decodeSignificand(src, e, dsc); } if (header == POS_LARGE) { /* Large positive number: 0x22, E, M */ e = (int) getVaruint64(src, dsc); return decodeSignificand(src, e, dsc); } throw unexpectedHeader(header); }
3.68
hudi_ValidateNode_execute
/** * Method to start the validate operation. Exceptions will be thrown if its parent nodes exist and WAIT_FOR_PARENTS * was set to true or default, but the parent nodes have not completed yet. * * @param executionContext Context to execute this node * @param curItrCount current iteration count. */ @Override public void execute(ExecutionContext executionContext, int curItrCount) { if (this.getParentNodes().size() > 0 && (Boolean) this.config.getOtherConfigs().getOrDefault("WAIT_FOR_PARENTS", true)) { for (DagNode node : (List<DagNode>) this.getParentNodes()) { if (!node.isCompleted()) { throw new RuntimeException("cannot validate before parent nodes are complete"); } } } this.result = this.function.apply((List<DagNode>) this.getParentNodes()); }
3.68
rocketmq-connect_WrapperStatusListener_onPause
/** * Invoked after the task has been paused. * * @param id The id of the task */ @Override public void onPause(ConnectorTaskId id) { managementService.put(new TaskStatus(id, TaskStatus.State.PAUSED, workerId, generation())); }
3.68
hbase_Threads_shutdown
/** * Shutdown passed thread using isAlive and join. * @param joinwait Pass 0 if we're to wait forever. * @param t Thread to shutdown */ public static void shutdown(final Thread t, final long joinwait) { if (t == null) return; while (t.isAlive()) { try { t.join(joinwait); } catch (InterruptedException e) { LOG.warn(t.getName() + "; joinwait=" + joinwait, e); } } }
3.68
open-banking-gateway_PsuSecureStorage_getOrCreateKeyFromPrivateForAspsp
/** * Gets or generates key from for PSU to ASPSP consent protection * @param password Key protection password * @param session Authorization session for current user * @param storePublicKeyIfNeeded If public key needs to be stored * @return Public and Private key pair to protect PSU and ASPSP consent grant */ @SneakyThrows public PubAndPrivKey getOrCreateKeyFromPrivateForAspsp(Supplier<char[]> password, AuthSession session, BiConsumer<UUID, PublicKey> storePublicKeyIfNeeded) { try (InputStream is = datasafeServices.privateService().read( ReadRequest.forDefaultPrivate( session.getPsu().getUserIdAuth(password), new PairIdPsuAspspTuple(session).toDatasafePathWithoutPsuAndId() ) )) { return serde.readKey(is); } catch (BaseDatasafeDbStorageService.DbStorageEntityNotFoundException ex) { return generateAndSaveAspspSecretKey(password, session, storePublicKeyIfNeeded); } }
3.68
hibernate-validator_SizeValidatorForArraysOfChar_isValid
/** * Checks the number of entries in an array. * * @param array The array to validate. * @param constraintValidatorContext context in which the constraint is evaluated. * * @return Returns {@code true} if the array is {@code null} or the number of entries in * {@code array} is between the specified {@code min} and {@code max} values (inclusive), * {@code false} otherwise. */ @Override public boolean isValid(char[] array, ConstraintValidatorContext constraintValidatorContext) { if ( array == null ) { return true; } return array.length >= min && array.length <= max; }
3.68
flink_PurgingTrigger_of
/** * Creates a new purging trigger from the given {@code Trigger}. * * @param nestedTrigger The trigger that is wrapped by this purging trigger */ public static <T, W extends Window> PurgingTrigger<T, W> of(Trigger<T, W> nestedTrigger) { return new PurgingTrigger<>(nestedTrigger); }
3.68
framework_Button_addClickListener
/** * Adds the button click listener. * * @see Registration * * @param listener * the Listener to be added. * @return a registration object for removing the listener * @since 8.0 */ public Registration addClickListener(ClickListener listener) { return addListener(ClickEvent.class, listener, ClickListener.BUTTON_CLICK_METHOD); }
3.68
flink_EmbeddedRocksDBStateBackend_setRocksDBMemoryFactory
/** Set RocksDBMemoryFactory. */ public void setRocksDBMemoryFactory(RocksDBMemoryFactory rocksDBMemoryFactory) { this.rocksDBMemoryFactory = checkNotNull(rocksDBMemoryFactory); }
3.68
flink_RocksDBMemoryConfiguration_getWriteBufferRatio
/** * Gets the fraction of the total memory to be used for write buffers. This only has an effect * is either {@link #setUseManagedMemory(boolean)} or {@link #setFixedMemoryPerSlot(MemorySize)} * are set. * * <p>See {@link RocksDBOptions#WRITE_BUFFER_RATIO} for details. */ public double getWriteBufferRatio() { return writeBufferRatio != null ? writeBufferRatio : RocksDBOptions.WRITE_BUFFER_RATIO.defaultValue(); }
3.68
AreaShop_WorldGuardHandler5_buildDomain
/** * Build a DefaultDomain from a RegionAccessSet. * @param regionAccessSet RegionAccessSet to read * @return DefaultDomain containing the entities from the RegionAccessSet */ private DefaultDomain buildDomain(RegionAccessSet regionAccessSet) { DefaultDomain owners = new DefaultDomain(); for(String playerName : regionAccessSet.getPlayerNames()) { owners.addPlayer(playerName); } // Add by name since UUIDs were not yet supported for(UUID uuid : regionAccessSet.getPlayerUniqueIds()) { OfflinePlayer offlinePlayer = Bukkit.getOfflinePlayer(uuid); if(offlinePlayer != null && offlinePlayer.getName() != null) { owners.addPlayer(offlinePlayer.getName()); } } for(String group : regionAccessSet.getGroupNames()) { owners.addGroup(group); } return owners; }
3.68
morf_ResultSetIterator_close
/** * @see java.lang.AutoCloseable#close() */ @Override public final void close() throws SQLException { this.resultSet.close(); this.statement.close(); }
3.68
hbase_Constraints_setConfiguration
/** * Update the configuration for the {@link Constraint}; does not change the order in which the * constraint is run. * @param builder {@link TableDescriptorBuilder} to update * @param clazz {@link Constraint} to update * @param configuration to update the {@link Constraint} with. * @throws IOException if the Constraint was not stored correctly * @throws IllegalArgumentException if the Constraint was not present on this table. */ public static TableDescriptorBuilder setConfiguration(TableDescriptorBuilder builder, Class<? extends Constraint> clazz, Configuration configuration) throws IOException, IllegalArgumentException { // get the entry for this class Pair<String, String> e = getKeyValueForClass(builder, clazz); if (e == null) { throw new IllegalArgumentException( "Constraint: " + clazz.getName() + " is not associated with this table."); } // clone over the configuration elements Configuration conf = new Configuration(configuration); // read in the previous info about the constraint Configuration internal = readConfiguration(e.getSecond()); // update the fields based on the previous settings conf.setIfUnset(ENABLED_KEY, internal.get(ENABLED_KEY)); conf.setIfUnset(PRIORITY_KEY, internal.get(PRIORITY_KEY)); // update the current value return writeConstraint(builder, e.getFirst(), conf); }
3.68
hbase_HRegionServer_preRegistrationInitialization
/** * All initialization needed before we go register with Master.<br> * Do bare minimum. Do bulk of initializations AFTER we've connected to the Master.<br> * In here we just put up the RpcServer, setup Connection, and ZooKeeper. */ private void preRegistrationInitialization() { final Span span = TraceUtil.createSpan("HRegionServer.preRegistrationInitialization"); try (Scope ignored = span.makeCurrent()) { initializeZooKeeper(); setupClusterConnection(); bootstrapNodeManager = new BootstrapNodeManager(asyncClusterConnection, masterAddressTracker); regionReplicationBufferManager = new RegionReplicationBufferManager(this); // Setup RPC client for master communication this.rpcClient = asyncClusterConnection.getRpcClient(); span.setStatus(StatusCode.OK); } catch (Throwable t) { // Call stop if error or process will stick around for ever since server // puts up non-daemon threads. TraceUtil.setError(span, t); this.rpcServices.stop(); abort("Initialization of RS failed. Hence aborting RS.", t); } finally { span.end(); } }
3.68
hbase_ImmutableMemStoreLAB_getNewExternalChunk
/* * Returning a new chunk, without replacing current chunk, meaning MSLABImpl does not make the * returned chunk as CurChunk. The space on this chunk will be allocated externally. The interface * is only for external callers. */ @Override public Chunk getNewExternalChunk(int size) { MemStoreLAB mslab = this.mslabs.get(0); return mslab.getNewExternalChunk(size); }
3.68
hbase_ByteBufferKeyValue_equals
/** * Needed doing 'contains' on List. Only compares the key portion, not the value. */ @Override public boolean equals(Object other) { if (!(other instanceof Cell)) { return false; } return CellUtil.equals(this, (Cell) other); }
3.68
framework_CheckBoxGroup_getItemDescriptionGenerator
/** * Gets the item description generator. * * @return the item description generator * * @since 8.2 */ public DescriptionGenerator<T> getItemDescriptionGenerator() { return descriptionGenerator; }
3.68
flink_CatalogDatabaseImpl_getProperties
/** Get a map of properties associated with the database. */ public Map<String, String> getProperties() { return properties; }
3.68
flink_CopyOnWriteStateMap_compositeHash
/** Helper function that creates and scrambles a composite hash for key and namespace. */ private static int compositeHash(Object key, Object namespace) { // create composite key through XOR, then apply some bit-mixing for better distribution of // skewed keys. return MathUtils.bitMix(key.hashCode() ^ namespace.hashCode()); }
3.68
zxing_QRCode_isValidMaskPattern
// Check if "mask_pattern" is valid. public static boolean isValidMaskPattern(int maskPattern) { return maskPattern >= 0 && maskPattern < NUM_MASK_PATTERNS; }
3.68
flink_CatalogManager_dropDatabase
/** * Drop a database. * * @param catalogName Name of the catalog for database. * @param databaseName Name of the database to be dropped. * @param ignoreIfNotExists Flag to specify behavior when the database does not exist: if set to * false, throw an exception, if set to true, do nothing. * @param cascade Flag to specify behavior when the database contains table or function: if set * to true, delete all tables and functions in the database and then delete the database, if * set to false, throw an exception. * @throws DatabaseNotExistException if the given database does not exist * @throws DatabaseNotEmptyException if the given database is not empty and isRestrict is true * @throws CatalogException in case of any runtime exception */ public void dropDatabase( String catalogName, String databaseName, boolean ignoreIfNotExists, boolean cascade) throws DatabaseNotExistException, DatabaseNotEmptyException, CatalogException { if (Objects.equals(currentCatalogName, catalogName) && Objects.equals(currentDatabaseName, databaseName)) { throw new ValidationException("Cannot drop a database which is currently in use."); } Catalog catalog = getCatalogOrError(catalogName); catalog.dropDatabase(databaseName, ignoreIfNotExists, cascade); catalogModificationListeners.forEach( listener -> listener.onEvent( DropDatabaseEvent.createEvent( CatalogContext.createContext(catalogName, catalog), databaseName, ignoreIfNotExists, cascade))); }
3.68
querydsl_EnumExpression_coalesce
/** * Create a {@code coalesce(this, args...)} expression * * @param args additional arguments * @return coalesce */ @Override @SuppressWarnings({"unchecked"}) public EnumExpression<T> coalesce(T... args) { Coalesce<T> coalesce = new Coalesce<T>(getType(), mixin); for (T arg : args) { coalesce.add(arg); } return (EnumExpression<T>) coalesce.asEnum(); }
3.68
hbase_MetricsREST_incrementSucessfulScanRequests
/** * @param inc How much to add to sucessfulScanCount. */ public synchronized void incrementSucessfulScanRequests(final int inc) { source.incrementSucessfulScanRequests(inc); }
3.68
pulsar_ProducerConfiguration_setInitialSequenceId
/** * Set the baseline for the sequence ids for messages published by the producer. * <p> * First message will be using (initialSequenceId + 1) as its sequence id and subsequent messages will be assigned * incremental sequence ids, if not otherwise specified. * * @param initialSequenceId * @return */ public ProducerConfiguration setInitialSequenceId(long initialSequenceId) { conf.setInitialSequenceId(initialSequenceId); return this; }
3.68
pulsar_AuthorizationProvider_allowTenantOperation
/** * @deprecated - will be removed after 2.12. Use async variant. */ @Deprecated default Boolean allowTenantOperation(String tenantName, String role, TenantOperation operation, AuthenticationDataSource authData) { try { return allowTenantOperationAsync(tenantName, role, operation, authData).get(); } catch (InterruptedException e) { throw new RestException(e); } catch (ExecutionException e) { throw new RestException(e.getCause()); } }
3.68
hmily_KryoPoolFactory_get
/** * Get kryo. * * @return the kryo */ public Kryo get() { return pool.borrow(); }
3.68
flink_ArrowSerializer_finishCurrentBatch
/** * Forces to finish the processing of the current batch of elements. It will serialize the batch * of elements into one arrow batch. */ public void finishCurrentBatch() throws Exception { arrowWriter.finish(); arrowStreamWriter.writeBatch(); arrowWriter.reset(); }
3.68
hbase_HRegionFileSystem_createSplitsDir
/** * Creates region split daughter directories under the table dir. If the daughter regions already * exist, for example, in the case of a recovery from a previous failed split procedure, this * method deletes the given region dir recursively, then recreates it again. */ public void createSplitsDir(RegionInfo daughterA, RegionInfo daughterB) throws IOException { Path daughterADir = getSplitsDir(daughterA); if (fs.exists(daughterADir) && !deleteDir(daughterADir)) { throw new IOException("Failed deletion of " + daughterADir + " before creating them again."); } if (!createDir(daughterADir)) { throw new IOException("Failed create of " + daughterADir); } Path daughterBDir = getSplitsDir(daughterB); if (fs.exists(daughterBDir) && !deleteDir(daughterBDir)) { throw new IOException("Failed deletion of " + daughterBDir + " before creating them again."); } if (!createDir(daughterBDir)) { throw new IOException("Failed create of " + daughterBDir); } }
3.68
pulsar_NamespaceBundleFactory_getNamespaceFromPoliciesPath
/* * @param path - path for the namespace policies ex. /admin/policies/prop/cluster/namespace * * @returns namespace with path, ex. prop/cluster/namespace */ public static String getNamespaceFromPoliciesPath(String path) { if (path.isEmpty()) { return path; } // String before / is considered empty string by splitter Iterable<String> splitter = Splitter.on("/").limit(6).split(path); Iterator<String> i = splitter.iterator(); // skip first three - "","admin", "policies" i.next(); i.next(); i.next(); // prop, cluster, namespace return Joiner.on("/").join(i); }
3.68
hbase_AuthManager_authorizeUserTable
/** * Check if user has given action privilige in table:family:qualifier scope. * @param user user name * @param table table name * @param family family name * @param qualifier qualifier name * @param action one of action in [Read, Write, Create, Exec, Admin] * @return true if user has, false otherwise */ public boolean authorizeUserTable(User user, TableName table, byte[] family, byte[] qualifier, Permission.Action action) { if (user == null) { return false; } if (table == null) { table = PermissionStorage.ACL_TABLE_NAME; } if (authorizeUserNamespace(user, table.getNamespaceAsString(), action)) { return true; } PermissionCache<TablePermission> tblPermissions = tableCache.getOrDefault(table, TBL_NO_PERMISSION); if (authorizeTable(tblPermissions.get(user.getShortName()), table, family, qualifier, action)) { return true; } for (String group : user.getGroupNames()) { if ( authorizeTable(tblPermissions.get(AuthUtil.toGroupEntry(group)), table, family, qualifier, action) ) { return true; } } return false; }
3.68
hbase_ChaosAgent_createZKConnection
/*** * Creates Connection with ZooKeeper. * @throws IOException if something goes wrong */ private void createZKConnection(Watcher watcher) throws IOException { if (watcher == null) { zk = new ZooKeeper(quorum, ChaosConstants.SESSION_TIMEOUT_ZK, this); } else { zk = new ZooKeeper(quorum, ChaosConstants.SESSION_TIMEOUT_ZK, watcher); } LOG.info("ZooKeeper Connection created for ChaosAgent: " + agentName); }
3.68
graphhopper_OSMFileHeader_create
/** * Constructor for XML Parser */ public static OSMFileHeader create(long id, XMLStreamReader parser) throws XMLStreamException { OSMFileHeader header = new OSMFileHeader(); parser.nextTag(); return header; }
3.68
pulsar_InternalConfigurationData_getLedgersRootPath
/** @deprecated */ @Deprecated public String getLedgersRootPath() { return ledgersRootPath; }
3.68
hbase_TableInfoModel_setRegions
/** * @param regions the regions to set */ public void setRegions(List<TableRegionModel> regions) { this.regions = regions; }
3.68
framework_Header_isDefault
/** * Returns whether this row is the default header row. * * @return {@code true} if this row is the default row, {@code false} * otherwise. */ protected boolean isDefault() { return getRowState().defaultHeader; }
3.68
hbase_VisibilityUtils_getScanLabelGenerators
/** * @param conf The configuration to use * @return Stack of ScanLabelGenerator instances. ScanLabelGenerator classes can be specified in * Configuration as comma separated list using key * "hbase.regionserver.scan.visibility.label.generator.class" when any of the specified * ScanLabelGenerator class can not be loaded. */ public static List<ScanLabelGenerator> getScanLabelGenerators(Configuration conf) { // There can be n SLG specified as comma separated in conf String slgClassesCommaSeparated = conf.get(VISIBILITY_LABEL_GENERATOR_CLASS); // We have only System level SLGs now. The order of execution will be same as the order in the // comma separated config value List<ScanLabelGenerator> slgs = new ArrayList<>(); if (StringUtils.isNotEmpty(slgClassesCommaSeparated)) { String[] slgClasses = slgClassesCommaSeparated.split(COMMA); for (String slgClass : slgClasses) { Class<? extends ScanLabelGenerator> slgKlass; try { slgKlass = (Class<? extends ScanLabelGenerator>) conf.getClassByName(slgClass.trim()); slgs.add(ReflectionUtils.newInstance(slgKlass, conf)); } catch (ClassNotFoundException e) { throw new IllegalArgumentException("Unable to find " + slgClass, e); } } } // If no SLG is specified in conf, by default we'll add two SLGs // 1. FeedUserAuthScanLabelGenerator // 2. DefinedSetFilterScanLabelGenerator // This stacking will achieve the following default behavior: // 1. If there is no Auths in the scan, we will obtain the global defined set for the user // from the labels table. // 2. If there is Auths in the scan, we will examine the passed in Auths and filter out the // labels that the user is not entitled to. Then use the resulting label set. if (slgs.isEmpty()) { slgs.add(ReflectionUtils.newInstance(FeedUserAuthScanLabelGenerator.class, conf)); slgs.add(ReflectionUtils.newInstance(DefinedSetFilterScanLabelGenerator.class, conf)); } return slgs; }
3.68
hadoop_Validate_checkLessOrEqual
/** * Validates that the first value is less than or equal to the second value. * @param value1 the first value to check. * @param value1Name the name of the first argument. * @param value2 the second value to check. * @param value2Name the name of the second argument. */ public static void checkLessOrEqual( long value1, String value1Name, long value2, String value2Name) { checkArgument( value1 <= value2, "'%s' (%s) must be less than or equal to '%s' (%s).", value1Name, value1, value2Name, value2); }
3.68
flink_AbstractKeyedStateBackend_getNumberOfKeyGroups
/** @see KeyedStateBackend */ public int getNumberOfKeyGroups() { return numberOfKeyGroups; }
3.68
graphhopper_State_getIncomingVirtualEdge
/** * Returns the virtual edge that should be used by incoming paths. * * @throws IllegalStateException if this State is not directed. */ public EdgeIteratorState getIncomingVirtualEdge() { if (!isDirected) { throw new IllegalStateException( "This method may only be called for directed GPXExtensions"); } return incomingVirtualEdge; }
3.68
hadoop_DataNodeFaultInjector_logDelaySendingAckToUpstream
/** * Used as a hook to intercept the latency of sending ack. */ public void logDelaySendingAckToUpstream( final String upstreamAddr, final long delayMs) throws IOException { }
3.68
hbase_ConfigurationManager_containsObserver
/** Returns true if contains the observer, for unit test only */ public boolean containsObserver(ConfigurationObserver observer) { synchronized (configurationObservers) { return configurationObservers.contains(observer); } }
3.68
hbase_StoreUtils_getDeterministicRandomSeed
/** * Creates a deterministic hash code for store file collection. */ public static OptionalInt getDeterministicRandomSeed(Collection<HStoreFile> files) { return files.stream().mapToInt(f -> f.getPath().getName().hashCode()).findFirst(); }
3.68
hbase_Bytes_putInt
/** * Put an int value out to the specified byte array position. * @param bytes the byte array * @param offset position in the array * @param val int to write out * @return incremented offset * @throws IllegalArgumentException if the byte array given doesn't have enough room at the offset * specified. */ public static int putInt(byte[] bytes, int offset, int val) { if (bytes.length - offset < SIZEOF_INT) { throw new IllegalArgumentException("Not enough room to put an int at" + " offset " + offset + " in a " + bytes.length + " byte array"); } return ConverterHolder.BEST_CONVERTER.putInt(bytes, offset, val); }
3.68
querydsl_MetaDataExporter_export
/** * Export the tables based on the given database metadata * * @param md database metadata * @throws SQLException */ public void export(DatabaseMetaData md) throws SQLException { if (beanPackageName == null) { beanPackageName = module.getPackageName(); } if (beansTargetFolder == null) { beansTargetFolder = targetFolder; } module.bind(SQLCodegenModule.BEAN_PACKAGE_NAME, beanPackageName); module.loadExtensions(); classes.clear(); typeMappings = module.get(TypeMappings.class); queryTypeFactory = module.get(QueryTypeFactory.class); serializer = module.get(Serializer.class); beanSerializer = module.get(Serializer.class, SQLCodegenModule.BEAN_SERIALIZER); namingStrategy = module.get(NamingStrategy.class); configuration = module.get(Configuration.class); SQLTemplates templates = sqlTemplatesRegistry.getTemplates(md); if (templates != null) { configuration.setTemplates(templates); } else { logger.info("Found no specific dialect for " + md.getDatabaseProductName()); } if (beanSerializer == null) { keyDataFactory = new KeyDataFactory(namingStrategy, module.getPackageName(), module.getPrefix(), module.getSuffix(), schemaToPackage); } else { keyDataFactory = new KeyDataFactory(namingStrategy, beanPackageName, module.getBeanPrefix(), module.getBeanSuffix(), schemaToPackage); } String[] typesArray = null; if (tableTypesToExport != null && !tableTypesToExport.isEmpty()) { List<String> types = new ArrayList<String>(); for (String tableType : tableTypesToExport.split(",")) { types.add(tableType.trim()); } typesArray = types.toArray(new String[0]); } else if (!exportAll) { List<String> types = new ArrayList<String>(2); if (exportTables) { types.add("TABLE"); } if (exportViews) { types.add("VIEW"); } typesArray = types.toArray(new String[0]); } List<String> catalogs = patternAsList(catalogPattern); List<String> schemas = patternAsList(schemaPattern); List<String> tables = patternAsList(tableNamePattern); for (String catalog : catalogs) { catalog = trimIfNonNull(catalog); for (String schema : schemas) { schema = trimIfNonNull(schema); for (String table : tables) { table = trimIfNonNull(table); handleTables(md, catalog, schema, table, typesArray); } } } }
3.68
pulsar_Topics_removeBacklogQuota
/** * @deprecated Use {@link TopicPolicies#removeBacklogQuota(String)} instead. */ @Deprecated default void removeBacklogQuota(String topic) throws PulsarAdminException { removeBacklogQuota(topic, BacklogQuota.BacklogQuotaType.destination_storage); }
3.68
morf_AbstractSqlDialectTest_testLeast
/** * Test the LEAST functionality behaves as expected */ @Test public void testLeast() { SelectStatement testStatement = select(least(new NullFieldLiteral(), field("bob"))).from(tableRef("MyTable")); assertEquals(expectedLeast().toLowerCase(), testDialect.convertStatementToSQL(testStatement).toLowerCase()); }
3.68
graphhopper_Path_getFromNode
/** * @return the first node of this Path. */ private int getFromNode() { if (fromNode < 0) throw new IllegalStateException("fromNode < 0 should not happen"); return fromNode; }
3.68
morf_SqlUtils_isEmpty
/** * Shortcut to "empty or null", where empty means spaces only. * * <p>Note that only <i>spaces</i> are considered empty. Tabs and newlines are not considered empty. * This will therefore give somewhat different results to {@link StringUtils#isBlank(CharSequence)}.</p> * * @param expression the expression to evaluate. * @return an expression wrapping the passed expression with additional * criteria to ensure it is not blank or null. */ public static Criterion isEmpty(AliasedField expression) { return Function.coalesce(Function.length(Function.trim(expression)), literal(0)).eq(0); }
3.68
flink_CopyOnWriteSkipListStateMap_helpGetNextNode
/** Return the next of the given node at the given level. */ long helpGetNextNode(long node, int level) { return SkipListUtils.helpGetNextNode( node, level, this.levelIndexHeader, this.spaceAllocator); }
3.68
framework_VTextArea_getTextAreaElement
/** * Gets the base TextAreaElement of this widget. * * @return the base element */ public TextAreaElement getTextAreaElement() { return super.getElement().cast(); }
3.68
hudi_BaseHoodieTableServiceClient_inlineClustering
/** * Executes a clustering plan on a table, serially before or after an insert/upsert action. * Schedules and executes clustering inline. */ protected Option<String> inlineClustering(Option<Map<String, String>> extraMetadata) { Option<String> clusteringInstantOpt = inlineScheduleClustering(extraMetadata); clusteringInstantOpt.ifPresent(clusteringInstant -> { // inline cluster should auto commit as the user is never given control cluster(clusteringInstant, true); }); return clusteringInstantOpt; }
3.68
framework_VScrollTable_fireColumnResizeEvent
/** * Fires a column resize event which sends the resize information to the * server. * * @param columnId * The columnId of the column which was resized * @param originalWidth * The width in pixels of the column before the resize event * @param newWidth * The width in pixels of the column after the resize event */ private void fireColumnResizeEvent(String columnId, int originalWidth, int newWidth) { client.updateVariable(paintableId, "columnResizeEventColumn", columnId, false); client.updateVariable(paintableId, "columnResizeEventPrev", originalWidth, false); client.updateVariable(paintableId, "columnResizeEventCurr", newWidth, immediate); }
3.68
flink_Tuple7_setFields
/** * Sets new values to all fields of the tuple. * * @param f0 The value for field 0 * @param f1 The value for field 1 * @param f2 The value for field 2 * @param f3 The value for field 3 * @param f4 The value for field 4 * @param f5 The value for field 5 * @param f6 The value for field 6 */ public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6) { this.f0 = f0; this.f1 = f1; this.f2 = f2; this.f3 = f3; this.f4 = f4; this.f5 = f5; this.f6 = f6; }
3.68
hadoop_JsonSerDeser_toJson
/** * Convert an object to a JSON string * @param instance instance to convert * @return a JSON string description * @throws JsonProcessingException parse problems */ public String toJson(T instance) throws JsonProcessingException { mapper.configure(SerializationFeature.INDENT_OUTPUT, true); return mapper.writeValueAsString(instance); }
3.68
flink_DeployParser_parseDeployOutput
/** * Parses the output of a Maven build where {@code deploy:deploy} was used, and returns a set of * deployed modules. */ public static Set<String> parseDeployOutput(File buildResult) throws IOException { try (Stream<String> linesStream = Files.lines(buildResult.toPath())) { return parseDeployOutput(linesStream); } }
3.68
flink_BinarySegmentUtils_getInt
/** * get int from segments. * * @param segments target segments. * @param offset value offset. */ public static int getInt(MemorySegment[] segments, int offset) { if (inFirstSegment(segments, offset, 4)) { return segments[0].getInt(offset); } else { return getIntMultiSegments(segments, offset); } }
3.68