name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_FlinkContainersSettings_numTaskManagers
/** * Sets the {@code numTaskManagers} and returns a reference to this Builder enabling method * chaining. * * @param numTaskManagers The {@code numTaskManagers} to set. * @return A reference to this Builder. */ public Builder numTaskManagers(int numTaskManagers) { this.numTaskManagers = numTaskManagers; return this; }
3.68
framework_AbstractSelect_getType
/** * Gets the property type. * * @param propertyId * the Id identifying the property. * @see Container#getType(java.lang.Object) */ @Override public Class<?> getType(Object propertyId) { return items.getType(propertyId); }
3.68
pulsar_AuthorizationService_canLookup
/** * Check whether the specified role can perform a lookup for the specified topic. * * For that the caller needs to have producer or consumer permission. * * @param topicName * @param role * @return * @throws Exception */ public boolean canLookup(TopicName topicName, String role, AuthenticationDataSource authenticationData) throws Exception { try { return canLookupAsync(topicName, role, authenticationData) .get(conf.getMetadataStoreOperationTimeoutSeconds(), SECONDS); } catch (TimeoutException e) { log.warn("Time-out {} sec while checking authorization on {} ", conf.getMetadataStoreOperationTimeoutSeconds(), topicName); throw e; } catch (Exception e) { log.warn("Role - {} failed to get lookup permissions for topic - {}. {}", role, topicName, e.getMessage()); throw e; } }
3.68
framework_VOptionGroup_buildOptions
/* * Try to update content of existing elements, rebuild panel entirely * otherwise */ @Override public void buildOptions(UIDL uidl) { /* * In order to retain focus, we need to update values rather than * recreate panel from scratch (#10451). However, the panel will be * rebuilt (losing focus) if number of elements or their order is * changed. */ Map<String, CheckBox> keysToOptions = new HashMap<String, CheckBox>(); for (Map.Entry<CheckBox, String> entry : optionsToKeys.entrySet()) { keysToOptions.put(entry.getValue(), entry.getKey()); } List<Widget> existingwidgets = new ArrayList<Widget>(); List<Widget> newwidgets = new ArrayList<Widget>(); // Get current order of elements for (Widget wid : panel) { existingwidgets.add(wid); } optionsEnabled.clear(); if (isMultiselect()) { Roles.getGroupRole().set(getElement()); } else { Roles.getRadiogroupRole().set(getElement()); } for (final Object child : uidl) { final UIDL opUidl = (UIDL) child; String itemHtml = opUidl.getStringAttribute("caption"); if (!htmlContentAllowed) { itemHtml = WidgetUtil.escapeHTML(itemHtml); } String iconUrl = opUidl.getStringAttribute("icon"); if (iconUrl != null && !iconUrl.isEmpty()) { Icon icon = client.getIcon(iconUrl); itemHtml = icon.getElement().getString() + itemHtml; } String key = opUidl.getStringAttribute("key"); CheckBox op = keysToOptions.get(key); // Need to recreate object if isMultiselect is changed (#10451) // OR if htmlContentAllowed changed due to Safari 5 issue if ((op == null) || (htmlContentAllowed != wasHtmlContentAllowed) || (isMultiselect() != wasMultiselect)) { // Create a new element if (isMultiselect()) { op = new VCheckBox(); } else { op = new RadioButton(paintableId); op.setStyleName("v-radiobutton"); } if (iconUrl != null && !iconUrl.isEmpty()) { WidgetUtil.sinkOnloadForImages(op.getElement()); op.addHandler(iconLoadHandler, LoadEvent.getType()); } op.addStyleName(CLASSNAME_OPTION); op.addClickHandler(this); optionsToKeys.put(op, key); } op.setHTML(itemHtml); op.setValue(opUidl.getBooleanAttribute("selected")); boolean optionEnabled = !opUidl.getBooleanAttribute( OptionGroupConstants.ATTRIBUTE_OPTION_DISABLED); boolean enabled = optionEnabled && !isReadonly() && isEnabled(); op.setEnabled(enabled); optionsEnabled.put(op, optionEnabled); setStyleName(op.getElement(), StyleConstants.DISABLED, !(optionEnabled && isEnabled())); newwidgets.add(op); } if (!newwidgets.equals(existingwidgets)) { // Rebuild the panel, losing focus panel.clear(); for (Widget wid : newwidgets) { panel.add(wid); } } wasHtmlContentAllowed = htmlContentAllowed; wasMultiselect = isMultiselect(); }
3.68
hudi_FileIOUtils_getConfiguredLocalDirs
/** * Return the configured local directories where hudi can write files. This * method does not create any directories on its own, it only encapsulates the * logic of locating the local directories according to deployment mode. */ public static String[] getConfiguredLocalDirs() { if (isRunningInYarnContainer()) { // If we are in yarn mode, systems can have different disk layouts so we must set it // to what Yarn on this system said was available. Note this assumes that Yarn has // created the directories already, and that they are secured so that only the // user has access to them. return getYarnLocalDirs().split(","); } else if (System.getProperty("java.io.tmpdir") != null) { return System.getProperty("java.io.tmpdir").split(","); } else { return null; } }
3.68
hadoop_NamenodeStatusReport_getNumberOfMissingBlocksWithReplicationFactorOne
/** * Gets the total number of missing blocks on the cluster with * replication factor 1. * * @return the total number of missing blocks on the cluster with * replication factor 1. */ public long getNumberOfMissingBlocksWithReplicationFactorOne() { return this.numberOfMissingBlocksWithReplicationFactorOne; }
3.68
hmily_GsonUtils_fromJson
/** * From json t. * * @param <T> the type parameter * @param jsonElement the json element * @param tClass the t class * @return the t */ public <T> T fromJson(final JsonElement jsonElement, final Class<T> tClass) { return GSON.fromJson(jsonElement, tClass); }
3.68
morf_UpgradePathFinder_candidateStepsByUUID
/** * Transforms a collection of upgrade step classes into a map of UUIDs and {@link CandidateStep} instances. * * @throws IllegalStateException if any particular UUID appears on more than once upgrade step class. */ private Map<java.util.UUID, CandidateStep> candidateStepsByUUID() { // track which candidate step is attached to which UUID to detect duplicates // also the map must retain the original ordering of the upgrade steps Map<java.util.UUID, CandidateStep> uuidsForClass = new LinkedHashMap<>(); for (Class<? extends UpgradeStep> candidateStepClass : upgradeGraph.orderedSteps()) { CandidateStep candidateStep = new CandidateStep(candidateStepClass); // store the candidate in the map and check that each UUID is only present once CandidateStep duplicateStep = uuidsForClass.put(candidateStep.getUuid(), candidateStep); if (duplicateStep != null) { throw new IllegalStateException(String.format("Upgrade step [%s] has the same UUID as [%s]", candidateStep, duplicateStep)); } } return uuidsForClass; }
3.68
hbase_HRegion_recordMutationWithoutWal
/** * Update LongAdders for number of puts without wal and the size of possible data loss. These * information are exposed by the region server metrics. */ private void recordMutationWithoutWal(final Map<byte[], List<Cell>> familyMap) { numMutationsWithoutWAL.increment(); if (numMutationsWithoutWAL.sum() <= 1) { LOG.info("writing data to region " + this + " with WAL disabled. Data may be lost in the event of a crash."); } long mutationSize = 0; for (List<Cell> cells : familyMap.values()) { // Optimization: 'foreach' loop is not used. See: // HBASE-12023 HRegion.applyFamilyMapToMemstore creates too many iterator objects assert cells instanceof RandomAccess; int listSize = cells.size(); for (int i = 0; i < listSize; i++) { Cell cell = cells.get(i); mutationSize += cell.getSerializedSize(); } } dataInMemoryWithoutWAL.add(mutationSize); }
3.68
hudi_HoodieFlinkCopyOnWriteTable_upsertPrepped
/** * Upserts the given prepared records into the Hoodie table, at the supplied instantTime. * * <p>This implementation requires that the input records are already tagged, and de-duped if needed. * * <p>Specifies the write handle explicitly in order to have fine-grained control with * the underneath file. * * @param context HoodieEngineContext * @param instantTime Instant Time for the action * @param preppedRecords Hoodie records to upsert * @return HoodieWriteMetadata */ public HoodieWriteMetadata<List<WriteStatus>> upsertPrepped( HoodieEngineContext context, HoodieWriteHandle<?, ?, ?, ?> writeHandle, String instantTime, List<HoodieRecord<T>> preppedRecords) { return new FlinkUpsertPreppedCommitActionExecutor<>(context, writeHandle, config, this, instantTime, preppedRecords).execute(); }
3.68
morf_AbstractSqlDialectTest_expectedSqlInsertIntoValuesWithComplexField
/** * @return The expected SQL for Insert Into Values With Complex Field */ protected List<String> expectedSqlInsertIntoValuesWithComplexField() { return Arrays.asList("INSERT INTO " + tableName("TableOne") + " (id, value) VALUES (3, 1 + 2)"); }
3.68
framework_VScrollTable_updateActionMap
/** For internal use only. May be removed or replaced in the future. */ public void updateActionMap(UIDL mainUidl) { UIDL actionsUidl = mainUidl.getChildByTagName("actions"); if (actionsUidl == null) { return; } for (final Object child : actionsUidl) { final UIDL action = (UIDL) child; final String key = action.getStringAttribute("key"); final String caption = action.getStringAttribute("caption"); actionMap.put(key + "_c", caption); if (action.hasAttribute("icon")) { // TODO need some uri handling ?? actionMap.put(key + "_i", action.getStringAttribute("icon")); } else { actionMap.remove(key + "_i"); } } }
3.68
hbase_HBaseTestingUtility_restartHBaseCluster
/** * Starts the hbase cluster up again after shutting it down previously in a test. Use this if you * want to keep dfs/zk up and just stop/start hbase. * @param servers number of region servers */ public void restartHBaseCluster(int servers) throws IOException, InterruptedException { this.restartHBaseCluster(servers, null); }
3.68
flink_Deadline_timeLeftIfAny
/** * Returns the time left between the deadline and now. If no time is left, a {@link * TimeoutException} will be thrown. * * @throws TimeoutException if no time is left */ public Duration timeLeftIfAny() throws TimeoutException { long nanos = Math.subtractExact(timeNanos, clock.relativeTimeNanos()); if (nanos <= 0) { throw new TimeoutException(); } return Duration.ofNanos(nanos); }
3.68
flink_MetricStore_getJobMetricStore
/** * Returns the {@link ComponentMetricStore} for the given job ID. * * @param jobID job ID * @return ComponentMetricStore for the given ID, or null if no store for the given argument * exists */ public synchronized ComponentMetricStore getJobMetricStore(String jobID) { return jobID == null ? null : ComponentMetricStore.unmodifiable(jobs.get(jobID)); }
3.68
flink_LimitedConnectionsFileSystem_getTotalNumberOfOpenStreams
/** Gets the total number of open streams (input plus output). */ public int getTotalNumberOfOpenStreams() { lock.lock(); try { return numReservedOutputStreams + numReservedInputStreams; } finally { lock.unlock(); } }
3.68
hbase_Import_map
/** * @param row The current table row key. * @param value The columns. * @param context The current context. * @throws IOException When something is broken with the data. */ @Override public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException { try { writeResult(row, value, context); } catch (InterruptedException e) { LOG.error("Interrupted while writing result", e); Thread.currentThread().interrupt(); } }
3.68
hbase_StoreFileInfo_isTopReference
/** Returns True if the store file is a top Reference */ public boolean isTopReference() { return this.reference != null && Reference.isTopFileRegion(this.reference.getFileRegion()); }
3.68
dubbo_AbstractRegistry_notify
/** * Notify changes from the provider side. * * @param url consumer side url * @param listener listener * @param urls provider latest urls */ protected void notify(URL url, NotifyListener listener, List<URL> urls) { if (url == null) { throw new IllegalArgumentException("notify url == null"); } if (listener == null) { throw new IllegalArgumentException("notify listener == null"); } if ((CollectionUtils.isEmpty(urls)) && !ANY_VALUE.equals(url.getServiceInterface())) { // 1-4 Empty address. logger.warn(REGISTRY_EMPTY_ADDRESS, "", "", "Ignore empty notify urls for subscribe url " + url); return; } if (logger.isInfoEnabled()) { logger.info("Notify urls for subscribe url " + url + ", url size: " + urls.size()); } // keep every provider's category. Map<String, List<URL>> result = new HashMap<>(); for (URL u : urls) { if (UrlUtils.isMatch(url, u)) { String category = u.getCategory(DEFAULT_CATEGORY); List<URL> categoryList = result.computeIfAbsent(category, k -> new ArrayList<>()); categoryList.add(u); } } if (result.size() == 0) { return; } Map<String, List<URL>> categoryNotified = notified.computeIfAbsent(url, u -> new ConcurrentHashMap<>()); for (Map.Entry<String, List<URL>> entry : result.entrySet()) { String category = entry.getKey(); List<URL> categoryList = entry.getValue(); categoryNotified.put(category, categoryList); listener.notify(categoryList); // We will update our cache file after each notification. // When our Registry has a subscribed failure due to network jitter, we can return at least the existing // cache URL. if (localCacheEnabled) { saveProperties(url); } } }
3.68
pulsar_ManagedLedgerConfig_getRetentionSizeInMB
/** * @return quota for message retention * */ public long getRetentionSizeInMB() { return retentionSizeInMB; }
3.68
hudi_CompactionStrategy_filterPartitionPaths
/** * Filter the partition paths based on compaction strategy. * * @param writeConfig * @param allPartitionPaths * @return */ public List<String> filterPartitionPaths(HoodieWriteConfig writeConfig, List<String> allPartitionPaths) { return allPartitionPaths; }
3.68
flink_LogicalTypeUtils_toRowType
/** * Converts any logical type to a row type. Composite types are converted to a row type. Atomic * types are wrapped into a field. */ public static RowType toRowType(LogicalType t) { switch (t.getTypeRoot()) { case ROW: return (RowType) t; case STRUCTURED_TYPE: final StructuredType structuredType = (StructuredType) t; final List<RowField> fields = structuredType.getAttributes().stream() .map( attribute -> new RowField( attribute.getName(), attribute.getType(), attribute.getDescription().orElse(null))) .collect(Collectors.toList()); return new RowType(structuredType.isNullable(), fields); case DISTINCT_TYPE: return toRowType(((DistinctType) t).getSourceType()); default: return RowType.of(t); } }
3.68
querydsl_AbstractHibernateSQLQuery_setTimeout
/** * Set a timeout for the underlying JDBC query. * @param timeout the timeout in seconds */ @SuppressWarnings("unchecked") public Q setTimeout(int timeout) { this.timeout = timeout; return (Q) this; }
3.68
open-banking-gateway_UpdateAuthMapper_updateContext
/** * Due to JsonCustomSerializer, Xs2aContext will always have the type it had started with, for example * {@link de.adorsys.opba.protocol.xs2a.context.ais.AccountListXs2aContext} will be * always properly deserialized. */ public Xs2aContext updateContext(Xs2aContext context, AuthorizationRequest request) { if (context instanceof AccountListXs2aContext) { return aisAccountsMapper.map(request, (AccountListXs2aContext) context); } if (context instanceof TransactionListXs2aContext) { return aisTransactionsMapper.map(request, (TransactionListXs2aContext) context); } if (context instanceof SinglePaymentXs2aContext) { return pisRequestSinglePaymentInitiation.map(request, (SinglePaymentXs2aContext) context); } throw new IllegalArgumentException("Can't update authorization for: " + context.getClass().getCanonicalName()); }
3.68
flink_DataSet_groupBy
/** * Groups a {@link DataSet} using field expressions. A field expression is either the name of a * public field or a getter method with parentheses of the {@link DataSet}S underlying type. A * dot can be used to drill down into objects, as in {@code "field1.getInnerField2()" }. This * method returns an {@link UnsortedGrouping} on which one of the following grouping * transformation can be applied. * * <ul> * <li>{@link UnsortedGrouping#sortGroup(int, org.apache.flink.api.common.operators.Order)} to * get a {@link SortedGrouping}. * <li>{@link UnsortedGrouping#aggregate(Aggregations, int)} to apply an Aggregate * transformation. * <li>{@link UnsortedGrouping#reduce(org.apache.flink.api.common.functions.ReduceFunction)} * to apply a Reduce transformation. * <li>{@link * UnsortedGrouping#reduceGroup(org.apache.flink.api.common.functions.GroupReduceFunction)} * to apply a GroupReduce transformation. * </ul> * * @param fields One or more field expressions on which the DataSet will be grouped. * @return An {@link UnsortedGrouping} on which a transformation needs to be applied to obtain a * transformed DataSet. * @see Tuple * @see UnsortedGrouping * @see AggregateOperator * @see ReduceOperator * @see org.apache.flink.api.java.operators.GroupReduceOperator * @see DataSet */ public UnsortedGrouping<T> groupBy(String... fields) { return new UnsortedGrouping<>(this, new Keys.ExpressionKeys<>(fields, getType())); }
3.68
hadoop_RouterQuotaUpdateService_getMountTableEntries
/** * Get all the existing mount tables. * @return List of mount tables. * @throws IOException */ private List<MountTable> getMountTableEntries() throws IOException { // scan mount tables from root path GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest .newInstance("/"); GetMountTableEntriesResponse getResponse = getMountTableStore() .getMountTableEntries(getRequest); return getResponse.getEntries(); }
3.68
hadoop_ResourceCalculatorProcessTree_initialize
/** * Initialize the object. * @throws YarnException Throws an exception on error. */ public void initialize() throws YarnException { }
3.68
pulsar_PulsarAdminImpl_brokers
/** * @return the brokers management object */ public Brokers brokers() { return brokers; }
3.68
flink_AbstractKeyedStateBackend_dispose
/** * Closes the state backend, releasing all internal resources, but does not delete any * persistent checkpoint data. */ @Override public void dispose() { IOUtils.closeQuietly(cancelStreamRegistry); if (kvStateRegistry != null) { kvStateRegistry.unregisterAll(); } lastName = null; lastState = null; keyValueStatesByName.clear(); }
3.68
pulsar_SecurityUtil_isSecurityEnabled
/** * Initializes UserGroupInformation with the given Configuration and returns * UserGroupInformation.isSecurityEnabled(). * All checks for isSecurityEnabled() should happen through this method. * * @param config the given configuration * * @return true if kerberos is enabled on the given configuration, false otherwise * */ public static boolean isSecurityEnabled(final Configuration config) { Validate.notNull(config); return KERBEROS.equalsIgnoreCase(config.get(HADOOP_SECURITY_AUTHENTICATION)); }
3.68
hbase_Get_getCacheBlocks
/** * Get whether blocks should be cached for this Get. * @return true if default caching should be used, false if blocks should not be cached */ public boolean getCacheBlocks() { return cacheBlocks; }
3.68
hadoop_OBSObjectBucketUtils_innerCreateEmptyObject
// Used to create an empty file that represents an empty directory private static void innerCreateEmptyObject(final OBSFileSystem owner, final String objectName) throws ObsException, IOException { final InputStream im = new InputStream() { @Override public int read() { return -1; } }; PutObjectRequest putObjectRequest = OBSCommonUtils .newPutObjectRequest(owner, objectName, newObjectMetadata(0L), im); long len; if (putObjectRequest.getFile() != null) { len = putObjectRequest.getFile().length(); } else { len = putObjectRequest.getMetadata().getContentLength(); } try { owner.getObsClient().putObject(putObjectRequest); owner.getSchemeStatistics().incrementWriteOps(1); owner.getSchemeStatistics().incrementBytesWritten(len); } finally { im.close(); } }
3.68
framework_AbstractOrderedLayout_getExpandRatio
/** * Returns the expand ratio of given component. * * @param component * which expand ratios is requested * @return expand ratio of given component, 0.0f by default. */ public float getExpandRatio(Component component) { ChildComponentData childData = getState(false).childData.get(component); if (childData == null) { throw new IllegalArgumentException( "The given component is not a child of this layout"); } return childData.expandRatio; }
3.68
flink_ResultPartitionDeploymentDescriptor_isBroadcast
/** Whether the resultPartition is a broadcast edge. */ public boolean isBroadcast() { return partitionDescriptor.isBroadcast(); }
3.68
hbase_OrderedBytes_decodeBlobVar
/** * Decode a blob value that was encoded using BlobVar encoding. */ public static byte[] decodeBlobVar(PositionedByteRange src) { final byte header = src.get(); if (header == NULL || header == DESCENDING.apply(NULL)) { return null; } assert header == BLOB_VAR || header == DESCENDING.apply(BLOB_VAR); Order ord = BLOB_VAR == header ? ASCENDING : DESCENDING; if (src.peek() == ord.apply(TERM)) { // skip empty input buffer. src.get(); return new byte[0]; } final int offset = src.getOffset(), start = src.getPosition(); int end; byte[] a = src.getBytes(); for (end = start; (byte) (ord.apply(a[offset + end]) & 0x80) != TERM; end++) ; end++; // increment end to 1-past last byte // create ret buffer using length of encoded data + 1 (header byte) PositionedByteRange ret = new SimplePositionedMutableByteRange(blobVarDecodedLength(end - start + 1)); int s = 6; byte t = (byte) ((ord.apply(a[offset + start]) << 1) & 0xff); for (int i = start + 1; i < end; i++) { if (s == 7) { ret.put((byte) (t | (ord.apply(a[offset + i]) & 0x7f))); i++; // explicitly reset t -- clean up overflow buffer after decoding // a full cycle and retain assertion condition below. This happens t = 0; // when the LSB in the last encoded byte is 1. (HBASE-9893) } else { ret.put((byte) (t | ((ord.apply(a[offset + i]) & 0x7f) >>> s))); } if (i == end) break; t = (byte) ((ord.apply(a[offset + i]) << (8 - s)) & 0xff); s = s == 1 ? 7 : s - 1; } src.setPosition(end); assert t == 0 : "Unexpected bits remaining after decoding blob."; assert ret.getPosition() == ret.getLength() : "Allocated unnecessarily large return buffer."; return ret.getBytes(); }
3.68
flink_KubernetesUtils_isHostNetwork
/** Checks if hostNetwork is enabled. */ public static boolean isHostNetwork(Configuration configuration) { return configuration.getBoolean(KubernetesConfigOptions.KUBERNETES_HOSTNETWORK_ENABLED); }
3.68
hudi_BootstrapOperator_preLoadIndexRecords
/** * Load the index records before {@link #processElement}. */ protected void preLoadIndexRecords() throws Exception { String basePath = hoodieTable.getMetaClient().getBasePath(); int taskID = getRuntimeContext().getIndexOfThisSubtask(); LOG.info("Start loading records in table {} into the index state, taskId = {}", basePath, taskID); for (String partitionPath : FSUtils.getAllPartitionPaths(new HoodieFlinkEngineContext(hadoopConf), metadataConfig(conf), basePath)) { if (pattern.matcher(partitionPath).matches()) { loadRecords(partitionPath); } } LOG.info("Finish sending index records, taskId = {}.", getRuntimeContext().getIndexOfThisSubtask()); // wait for the other bootstrap tasks finish bootstrapping. waitForBootstrapReady(getRuntimeContext().getIndexOfThisSubtask()); hoodieTable = null; }
3.68
hbase_SnapshotDescriptionUtils_validate
/** * Convert the passed snapshot description into a 'full' snapshot description based on default * parameters, if none have been supplied. This resolves any 'optional' parameters that aren't * supplied to their default values. * @param snapshot general snapshot descriptor * @param conf Configuration to read configured snapshot defaults if snapshot is not complete * @return a valid snapshot description * @throws IllegalArgumentException if the {@link SnapshotDescription} is not a complete * {@link SnapshotDescription}. */ public static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf) throws IllegalArgumentException, IOException { if (!snapshot.hasTable()) { throw new IllegalArgumentException( "Descriptor doesn't apply to a table, so we can't build it."); } SnapshotDescription.Builder builder = snapshot.toBuilder(); // set the creation time, if one hasn't been set long time = snapshot.getCreationTime(); if (time == SnapshotDescriptionUtils.NO_SNAPSHOT_START_TIME_SPECIFIED) { time = EnvironmentEdgeManager.currentTime(); LOG.debug("Creation time not specified, setting to:" + time + " (current time:" + EnvironmentEdgeManager.currentTime() + ")."); builder.setCreationTime(time); } long ttl = snapshot.getTtl(); // set default ttl(sec) if it is not set already or the value is out of the range if ( ttl == SnapshotDescriptionUtils.NO_SNAPSHOT_TTL_SPECIFIED || ttl > TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE) ) { final long defaultSnapshotTtl = conf.getLong(HConstants.DEFAULT_SNAPSHOT_TTL_CONFIG_KEY, HConstants.DEFAULT_SNAPSHOT_TTL); if (LOG.isDebugEnabled()) { LOG.debug("Snapshot current TTL value: {} resetting it to default value: {}", ttl, defaultSnapshotTtl); } ttl = defaultSnapshotTtl; } builder.setTtl(ttl); if (!snapshot.hasVersion()) { builder.setVersion(SnapshotDescriptionUtils.SNAPSHOT_LAYOUT_VERSION); LOG.debug("Snapshot {} VERSION not specified, setting to {}", snapshot.getName(), SnapshotDescriptionUtils.SNAPSHOT_LAYOUT_VERSION); } RpcServer.getRequestUser().ifPresent(user -> { if (AccessChecker.isAuthorizationSupported(conf)) { builder.setOwner(user.getShortName()); LOG.debug("Set {} as owner of Snapshot", user.getShortName()); } }); snapshot = builder.build(); // set the acl to snapshot if security feature is enabled. if (isSecurityAvailable(conf)) { snapshot = writeAclToSnapshotDescription(snapshot, conf); } return snapshot; }
3.68
hbase_Bytes_toArray
/** * Convert a list of byte[] to an array * @param array List of byte []. * @return Array of byte []. */ public static byte[][] toArray(final List<byte[]> array) { // List#toArray doesn't work on lists of byte []. byte[][] results = new byte[array.size()][]; for (int i = 0; i < array.size(); i++) { results[i] = array.get(i); } return results; }
3.68
hbase_ZKUtil_getDataNoWatch
/** * Get the data at the specified znode without setting a watch. Returns the data if the node * exists. Returns null if the node does not exist. Sets the stats of the node in the passed Stat * object. Pass a null stat if not interested. * @param zkw zk reference * @param znode path of node * @param stat node status to get if node exists * @return data of the specified znode, or null if node does not exist * @throws KeeperException if unexpected zookeeper exception */ public static byte[] getDataNoWatch(ZKWatcher zkw, String znode, Stat stat) throws KeeperException { try { byte[] data = zkw.getRecoverableZooKeeper().getData(znode, null, stat); logRetrievedMsg(zkw, znode, data, false); return data; } catch (KeeperException.NoNodeException e) { LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + "because node does not exist (not necessarily an error)")); return null; } catch (KeeperException e) { LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); zkw.keeperException(e); return null; } catch (InterruptedException e) { LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); zkw.interruptedException(e); return null; } }
3.68
hbase_ChoreService_requestCorePoolDecrease
/** * Represents a request to decrease the number of core pool threads. Typically a request * originates from the fact that the current core pool size is more than sufficient to service the * running Chores. */ private synchronized void requestCorePoolDecrease() { if (scheduler.getCorePoolSize() > MIN_CORE_POOL_SIZE) { scheduler.setCorePoolSize(scheduler.getCorePoolSize() - 1); printChoreServiceDetails("requestCorePoolDecrease"); } }
3.68
framework_Navigator_revertNavigation
/** * Revert the changes to the navigation state. When navigation fails, this * method can be called by {@link #navigateTo(View, String, String)} to * revert the URL fragment to point to the previous view to which navigation * succeeded. * * This method should only be called by * {@link #navigateTo(View, String, String)}. Normally it should not be * overridden, but can be by frameworks that need to hook into view change * cancellations of this type. * * @since 7.6 */ protected void revertNavigation() { if (currentNavigationState != null) { getStateManager().setState(currentNavigationState); } }
3.68
framework_VaadinSession_lock
/** * Locks this session to protect its data from concurrent access. Accessing * the UI state from outside the normal request handling should always lock * the session and unlock it when done. The preferred way to ensure locking * is done correctly is to wrap your code using {@link UI#access(Runnable)} * (or {@link VaadinSession#access(Runnable)} if you are only touching the * session and not any UI), e.g.: * * <pre> * myUI.access(new Runnable() { * &#064;Override * public void run() { * // Here it is safe to update the UI. * // UI.getCurrent can also be used * myUI.getContent().setCaption(&quot;Changed safely&quot;); * } * }); * </pre> * * If you for whatever reason want to do locking manually, you should do it * like: * * <pre> * session.lock(); * try { * doSomething(); * } finally { * session.unlock(); * } * </pre> * * This method will block until the lock can be retrieved. * <p> * {@link #getLockInstance()} can be used if more control over the locking * is required. * * @see #unlock() * @see #getLockInstance() * @see #hasLock() */ public void lock() { getLockInstance().lock(); }
3.68
dubbo_PojoUtils_getDefaultValue
/** * return init value * * @param parameterType * @return */ private static Object getDefaultValue(Class<?> parameterType) { if ("char".equals(parameterType.getName())) { return Character.MIN_VALUE; } if ("boolean".equals(parameterType.getName())) { return false; } if ("byte".equals(parameterType.getName())) { return (byte) 0; } if ("short".equals(parameterType.getName())) { return (short) 0; } return parameterType.isPrimitive() ? 0 : null; }
3.68
flink_KubernetesStateHandleStore_getAllAndLock
/** * Gets all available state handles from Kubernetes. * * @return All state handles from ConfigMap. */ @Override public List<Tuple2<RetrievableStateHandle<T>, String>> getAllAndLock() { return kubeClient .getConfigMap(configMapName) .map( configMap -> { final List<Tuple2<RetrievableStateHandle<T>, String>> stateHandles = new ArrayList<>(); configMap.getData().entrySet().stream() .filter(entry -> configMapKeyFilter.test(entry.getKey())) .forEach( entry -> { try { final StateHandleWithDeleteMarker<T> result = deserializeStateHandle( entry.getValue()); if (!result.isMarkedForDeletion()) { stateHandles.add( new Tuple2<>( result.getInner(), entry.getKey())); } } catch (IOException e) { LOG.warn( "ConfigMap {} contained corrupted data. Ignoring the key {}.", configMapName, entry.getKey()); } }); return stateHandles; }) .orElse(Collections.emptyList()); }
3.68
flink_ExecutionConfig_getRegisteredPojoTypes
/** Returns the registered POJO types. */ public LinkedHashSet<Class<?>> getRegisteredPojoTypes() { return registeredPojoTypes; }
3.68
flink_AccumulatorHelper_deserializeAccumulators
/** * Takes the serialized accumulator results and tries to deserialize them using the provided * class loader. * * @param serializedAccumulators The serialized accumulator results. * @param loader The class loader to use. * @return The deserialized accumulator results. */ public static Map<String, OptionalFailure<Object>> deserializeAccumulators( Map<String, SerializedValue<OptionalFailure<Object>>> serializedAccumulators, ClassLoader loader) throws IOException, ClassNotFoundException { if (serializedAccumulators == null || serializedAccumulators.isEmpty()) { return Collections.emptyMap(); } Map<String, OptionalFailure<Object>> accumulators = CollectionUtil.newHashMapWithExpectedSize(serializedAccumulators.size()); for (Map.Entry<String, SerializedValue<OptionalFailure<Object>>> entry : serializedAccumulators.entrySet()) { OptionalFailure<Object> value = null; if (entry.getValue() != null) { value = entry.getValue().deserializeValue(loader); } accumulators.put(entry.getKey(), value); } return accumulators; }
3.68
dubbo_TriHttp2RemoteFlowController_writeAllocatedBytes
/** * Write the allocated bytes for this stream. * @return the number of bytes written for a stream or {@code -1} if no write occurred. */ int writeAllocatedBytes(int allocated) { final int initialAllocated = allocated; int writtenBytes; // In case an exception is thrown we want to remember it and pass it to cancel(Throwable). Throwable cause = null; FlowControlled frame; try { assert !writing; writing = true; // Write the remainder of frames that we are allowed to boolean writeOccurred = false; while (!cancelled && (frame = peek()) != null) { int maxBytes = min(allocated, writableWindow()); if (maxBytes <= 0 && frame.size() > 0) { // The frame still has data, but the amount of allocated bytes has been exhausted. // Don't write needless empty frames. break; } writeOccurred = true; int initialFrameSize = frame.size(); try { frame.write(ctx, max(0, maxBytes)); if (frame.size() == 0) { // This frame has been fully written, remove this frame and notify it. // Since we remove this frame first, we're guaranteed that its error // method will not be called when we call cancel. pendingWriteQueue.remove(); frame.writeComplete(); } } finally { // Decrement allocated by how much was actually written. allocated -= initialFrameSize - frame.size(); } } if (!writeOccurred) { // Either there was no frame, or the amount of allocated bytes has been exhausted. return -1; } } catch (Throwable t) { // Mark the state as cancelled, we'll clear the pending queue via cancel() below. cancelled = true; cause = t; } finally { writing = false; // Make sure we always decrement the flow control windows // by the bytes written. writtenBytes = initialAllocated - allocated; decrementPendingBytes(writtenBytes, false); decrementFlowControlWindow(writtenBytes); // If a cancellation occurred while writing, call cancel again to // clear and error all of the pending writes. if (cancelled) { cancel(INTERNAL_ERROR, cause); } if(monitor.isOverFlowControl()){ cause = new Throwable(); cancel(FLOW_CONTROL_ERROR,cause); } } return writtenBytes; }
3.68
hmily_RepositoryPathUtils_buildZookeeperRootPath
/** * Build zookeeper root path string. * * @param prefix the prefix * @param id the id * @return the string */ public static String buildZookeeperRootPath(final String prefix, final String id) { return String.join("/", prefix, id); }
3.68
morf_SchemaChangeSequence_correctPrimaryKeyColumns
/** * @see org.alfasoftware.morf.upgrade.SchemaEditor#correctPrimaryKeyColumns(java.lang.String, java.util.List) * @deprecated This change step should never be required, use {@link #changePrimaryKeyColumns(String, List, List)} * instead. This method will be removed when upgrade steps before 5.2.14 are removed. */ @Override @Deprecated public void correctPrimaryKeyColumns(String tableName, List<String> newPrimaryKeyColumns) { CorrectPrimaryKeyColumns correctPrimaryKeyColumns = new CorrectPrimaryKeyColumns(tableName, newPrimaryKeyColumns); visitor.visit(correctPrimaryKeyColumns); schemaAndDataChangeVisitor.visit(correctPrimaryKeyColumns); }
3.68
framework_MessageSender_setConnection
/** * Sets the application connection this instance is connected to. Called * internally by the framework. * * @param connection * the application connection this instance is connected to */ public void setConnection(ApplicationConnection connection) { this.connection = connection; xhrConnection.setConnection(connection); }
3.68
graphhopper_Unzipper_getVerifiedFile
// see #1628 File getVerifiedFile(File destinationDir, ZipEntry ze) throws IOException { File destinationFile = new File(destinationDir, ze.getName()); if (!destinationFile.getCanonicalPath().startsWith(destinationDir.getCanonicalPath() + File.separator)) throw new SecurityException("Zip Entry is outside of the target dir: " + ze.getName()); return destinationFile; }
3.68
morf_DatabaseMetaDataProvider_tableTypesForViews
/** * Types for {@link DatabaseMetaData#getTables(String, String, String, String[])} * used by {@link #loadAllViewNames()}. * * @return Array of relevant JDBC types. */ protected String[] tableTypesForViews() { return new String[] { "VIEW" }; }
3.68
flink_TGetQueryIdResp_findByName
/** Find the _Fields constant that matches name, or null if its not found. */ public static _Fields findByName(java.lang.String name) { return byName.get(name); }
3.68
graphhopper_NameSimilarityEdgeFilter_prepareName
/** * Removes any characters in the String that we don't care about in the matching procedure * TODO Currently limited to certain 'western' languages */ private String prepareName(String name) { StringBuilder sb = new StringBuilder(name.length()); Matcher wordCharMatcher = WORD_CHAR.matcher(name); while (wordCharMatcher.find()) { String normalizedToken = toLowerCase(wordCharMatcher.group()); String rewrite = rewriteMap.get(normalizedToken); if (rewrite != null) normalizedToken = rewrite; if (normalizedToken.isEmpty()) continue; // Ignore matching short phrases like de, la, ... except it is a number if (normalizedToken.length() > 2) { sb.append(normalizedToken); } else { if (Character.isDigit(normalizedToken.charAt(0)) && (normalizedToken.length() == 1 || Character.isDigit(normalizedToken.charAt(1)))) { sb.append(normalizedToken); } } } return sb.toString(); }
3.68
AreaShop_RegionGroup_saveNow
/** * Save the groups to disk now, normally saveRequired() is preferred because of performance. */ public void saveNow() { plugin.getFileManager().saveGroupsNow(); }
3.68
hudi_HoodieAvroUtils_getNestedFieldSchemaFromRecord
/** * Get schema for the given field and record. Field can be nested, denoted by dot notation. e.g: a.b.c * * @param record - record containing the value of the given field * @param fieldName - name of the field * @return */ public static Schema getNestedFieldSchemaFromRecord(GenericRecord record, String fieldName) { String[] parts = fieldName.split("\\."); GenericRecord valueNode = record; int i = 0; for (; i < parts.length; i++) { String part = parts[i]; Object val = valueNode.get(part); if (i == parts.length - 1) { return resolveNullableSchema(valueNode.getSchema().getField(part).schema()); } else { if (!(val instanceof GenericRecord)) { throw new HoodieException("Cannot find a record at part value :" + part); } valueNode = (GenericRecord) val; } } throw new HoodieException("Failed to get schema. Not a valid field name: " + fieldName); }
3.68
framework_GridLayout_setDefaultComponentAlignment
/* * (non-Javadoc) * * @see * com.vaadin.ui.Layout.AlignmentHandler#setDefaultComponentAlignment(com * .vaadin.ui.Alignment) */ @Override public void setDefaultComponentAlignment(Alignment defaultAlignment) { defaultComponentAlignment = defaultAlignment; }
3.68
flink_BinaryRawValueData_fromBytes
/** * Creates a {@link BinaryStringData} instance from the given bytes with offset and number of * bytes. */ public static <T> BinaryRawValueData<T> fromBytes(byte[] bytes, int offset, int numBytes) { return new BinaryRawValueData<>( new MemorySegment[] {MemorySegmentFactory.wrap(bytes)}, offset, numBytes); }
3.68
hadoop_FlowRunRowKey_parseRowKeyFromString
/** * Given the encoded row key as string, returns the row key as an object. * @param encodedRowKey String representation of row key. * @return A <cite>FlowRunRowKey</cite> object. */ public static FlowRunRowKey parseRowKeyFromString(String encodedRowKey) { return new FlowRunRowKeyConverter().decodeFromString(encodedRowKey); }
3.68
framework_VLayoutSlot_getUsedWidth
/** * Returns how much horizontal space the widget and its caption use. * * @return the width of the contents in pixels */ public int getUsedWidth() { int widgetWidth = getWidgetWidth(); if (caption == null) { return widgetWidth; } else if (caption.shouldBePlacedAfterComponent()) { return widgetWidth + getCaptionWidth(); } else { return Math.max(widgetWidth, getCaptionWidth()); } }
3.68
graphhopper_BikeCommonPriorityParser_addPushingSection
// TODO duplicated in average speed void addPushingSection(String highway) { pushingSectionsHighways.add(highway); }
3.68
querydsl_StringExpressions_ltrim
/** * Create a {@code ltrim(str)} expression * * <p>Returns a character expression after it removes leading blanks.</p> * * @param str string * @return ltrim(str) */ public static StringExpression ltrim(Expression<String> str) { return Expressions.stringOperation(Ops.StringOps.LTRIM, str); }
3.68
flink_MetricAssertions_isCloseTo
/** * Verifies that the gauges value is close to the expected value within a certain deviation. * * @param value the expected value * @param epsilon the maximum deviation from the expected value * @return this assertion object */ public GaugeAssert<T> isCloseTo(long value, long epsilon) { assertThat((Long) actual.getValue()) .isGreaterThan(value - epsilon) .isLessThan(value + epsilon); return this; }
3.68
hadoop_LongValueSum_addNextValue
/** * add a value to the aggregator * * @param val * a long value. * */ public void addNextValue(long val) { this.sum += val; }
3.68
framework_ConnectorTracker_getConnector
/** * Gets a connector by its id. * * @param connectorId * The connector id to look for * @return The connector with the given id or null if no connector has the * given id */ public ClientConnector getConnector(String connectorId) { ClientConnector connector = connectorIdToConnector.get(connectorId); // Ignore connectors that have been unregistered but not yet cleaned up if (unregisteredConnectors.contains(connector)) { return null; } else if (connector != null) { return connector; } else { DragAndDropService service = uI.getSession() .getDragAndDropService(); if (connectorId.equals(service.getConnectorId())) { return service; } } return null; }
3.68
flink_RecordsBySplits_add
/** * Add the record from the given source split. * * @param split the source split the record was from. * @param record the record to add. */ public void add(SourceSplit split, E record) { add(split.splitId(), record); }
3.68
hbase_IndexBuilder_configureJob
/** * Job configuration. */ public static Job configureJob(Configuration conf, String[] args) throws IOException { String tableName = args[0]; String columnFamily = args[1]; System.out.println("****" + tableName); conf.set(TableInputFormat.SCAN, TableMapReduceUtil.convertScanToString(new Scan())); conf.set(TableInputFormat.INPUT_TABLE, tableName); conf.set("index.tablename", tableName); conf.set("index.familyname", columnFamily); String[] fields = new String[args.length - 2]; System.arraycopy(args, 2, fields, 0, fields.length); conf.setStrings("index.fields", fields); Job job = new Job(conf, tableName); job.setJarByClass(IndexBuilder.class); job.setMapperClass(Map.class); job.setNumReduceTasks(0); job.setInputFormatClass(TableInputFormat.class); job.setOutputFormatClass(MultiTableOutputFormat.class); return job; }
3.68
hadoop_ReservationInterval_isOverlap
/** * Returns whether the interval is active at the specified instant of time * * @param tick the instance of the time to check * @return true if active, false otherwise */ public boolean isOverlap(long tick) { return (startTime <= tick && tick <= endTime); }
3.68
hbase_MultiResponse_size
/** Returns Number of pairs in this container */ public int size() { int size = 0; for (RegionResult result : results.values()) { size += result.size(); } return size; }
3.68
hadoop_FieldSelectionMapReduce_map
/** * The identify function. Input key/value pair is written directly to output. */ public void map(K key, V val, OutputCollector<Text, Text> output, Reporter reporter) throws IOException { FieldSelectionHelper helper = new FieldSelectionHelper( FieldSelectionHelper.emptyText, FieldSelectionHelper.emptyText); helper.extractOutputKeyValue(key.toString(), val.toString(), fieldSeparator, mapOutputKeyFieldList, mapOutputValueFieldList, allMapValueFieldsFrom, ignoreInputKey, true); output.collect(helper.getKey(), helper.getValue()); }
3.68
hbase_CellUtil_createCellScanner
/** * Flatten the map of cells out under the CellScanner * @param map Map of Cell Lists; for example, the map of families to Cells that is used inside * Put, etc., keeping Cells organized by family. * @return CellScanner interface over <code>cellIterable</code> */ public static CellScanner createCellScanner(final NavigableMap<byte[], List<Cell>> map) { return new CellScanner() { private final Iterator<Entry<byte[], List<Cell>>> entries = map.entrySet().iterator(); private Iterator<Cell> currentIterator = null; private Cell currentCell; @Override public Cell current() { return this.currentCell; } @Override public boolean advance() { while (true) { if (this.currentIterator == null) { if (!this.entries.hasNext()) return false; this.currentIterator = this.entries.next().getValue().iterator(); } if (this.currentIterator.hasNext()) { this.currentCell = this.currentIterator.next(); return true; } this.currentCell = null; this.currentIterator = null; } } }; }
3.68
hadoop_BlockData_isLastBlock
/** * Indicates whether the given block is the last block in the associated file. * @param blockNumber the id of the desired block. * @return true if the given block is the last block in the associated file, false otherwise. * @throws IllegalArgumentException if blockNumber is invalid. */ public boolean isLastBlock(int blockNumber) { if (fileSize == 0) { return false; } throwIfInvalidBlockNumber(blockNumber); return blockNumber == (numBlocks - 1); }
3.68
framework_VMenuBar_getCloseMenuKey
/** * Get the key that closes the menu. By default it is the escape key but by * overriding this yoy can change the key to whatever you want. * * @return */ protected int getCloseMenuKey() { return KeyCodes.KEY_ESCAPE; }
3.68
pulsar_TimeAverageMessageData_totalMsgThroughput
/** * Get the total message throughput. * * @return Message throughput in + message throughput out. */ public double totalMsgThroughput() { return msgThroughputIn + msgThroughputOut; }
3.68
hbase_MetricsConnection_incrMetaCacheMiss
/** Increment the number of meta cache misses. */ public void incrMetaCacheMiss() { metaCacheMisses.inc(); }
3.68
flink_Transformation_getSlotSharingGroup
/** * Returns the slot sharing group of this transformation if present. * * @see #setSlotSharingGroup(SlotSharingGroup) */ public Optional<SlotSharingGroup> getSlotSharingGroup() { return slotSharingGroup; }
3.68
framework_MarginInfo_setMargins
/** * Copies margin values from another MarginInfo object. * * @param marginInfo * another marginInfo object */ public void setMargins(MarginInfo marginInfo) { bitMask = marginInfo.bitMask; }
3.68
hadoop_AbfsClientThrottlingAnalyzer_run
/** * Periodically analyzes a snapshot of the blob storage metrics and updates * the sleepDuration in order to appropriately throttle storage operations. */ @Override public void run() { boolean doWork = false; try { doWork = doingWork.compareAndSet(0, 1); // prevent concurrent execution of this task if (!doWork) { return; } long now = System.currentTimeMillis(); if (timerOrchestrator(TimerFunctionality.SUSPEND, this)) { return; } if (now - blobMetrics.get().getStartTime() >= analysisPeriodMs) { AbfsOperationMetrics oldMetrics = blobMetrics.getAndSet( new AbfsOperationMetrics(now)); oldMetrics.setEndTime(now); sleepDuration = analyzeMetricsAndUpdateSleepDuration(oldMetrics, sleepDuration); } } finally { if (doWork) { doingWork.set(0); } } }
3.68
framework_GridLayout_insertRow
/** * Inserts an empty row at the specified position in the grid. * * @param row * Index of the row before which the new row will be inserted. * The leftmost row has index 0. */ public void insertRow(int row) { if (row > getRows()) { throw new IllegalArgumentException("Cannot insert row at " + row + " in a gridlayout with height " + getRows()); } for (ChildComponentData existingArea : getState().childData.values()) { // Areas ending below the row needs to be moved down or stretched if (existingArea.row2 >= row) { existingArea.row2++; // Stretch areas that span over the selected row if (existingArea.row1 >= row) { existingArea.row1++; } } } if (cursorY >= row) { cursorY++; } setRows(getRows() + 1); markAsDirty(); }
3.68
querydsl_NumberExpression_negate
/** * Create a {@code this * -1} expression * * <p>Get the negation of this expression</p> * * @return this * -1 */ public NumberExpression<T> negate() { if (negation == null) { negation = Expressions.numberOperation(getType(), Ops.NEGATE, mixin); } return negation; }
3.68
hudi_IOUtils_getMaxMemoryAllowedForMerge
/** * Dynamic calculation of max memory to use for spillable map. There is always more than one task * running on an executor and each task maintains a spillable map. * user.available.memory = executor.memory * (1 - memory.fraction) * spillable.available.memory = user.available.memory * hoodie.memory.fraction / executor.cores. * Anytime the engine memory fractions/total memory is changed, the memory used for spillable map * changes accordingly. */ public static long getMaxMemoryAllowedForMerge(TaskContextSupplier context, String maxMemoryFraction) { Option<String> totalMemoryOpt = context.getProperty(EngineProperty.TOTAL_MEMORY_AVAILABLE); Option<String> memoryFractionOpt = context.getProperty(EngineProperty.MEMORY_FRACTION_IN_USE); Option<String> totalCoresOpt = context.getProperty(EngineProperty.TOTAL_CORES_PER_EXECUTOR); if (totalMemoryOpt.isPresent() && memoryFractionOpt.isPresent() && totalCoresOpt.isPresent()) { long executorMemoryInBytes = Long.parseLong(totalMemoryOpt.get()); double memoryFraction = Double.parseDouble(memoryFractionOpt.get()); double maxMemoryFractionForMerge = Double.parseDouble(maxMemoryFraction); long executorCores = Long.parseLong(totalCoresOpt.get()); double userAvailableMemory = executorMemoryInBytes * (1 - memoryFraction) / executorCores; long maxMemoryForMerge = (long) Math.floor(userAvailableMemory * maxMemoryFractionForMerge); return Math.max(DEFAULT_MIN_MEMORY_FOR_SPILLABLE_MAP_IN_BYTES, maxMemoryForMerge); } else { return DEFAULT_MAX_MEMORY_FOR_SPILLABLE_MAP_IN_BYTES; } }
3.68
framework_SassLinker_writeFromInputStream
/** * Writes the contents of an InputStream out to a file. * * @param contents * @param tempfile * @throws IOException */ private void writeFromInputStream(InputStream contents, File tempfile) throws IOException { // write the inputStream to a FileOutputStream OutputStream out = new FileOutputStream(tempfile); int read = 0; byte[] bytes = new byte[1024]; while ((read = contents.read(bytes)) != -1) { out.write(bytes, 0, read); } contents.close(); out.flush(); out.close(); }
3.68
pulsar_PositionAckSetUtil_isAckSetOverlap
//This method is to compare two ack set whether overlap or not public static boolean isAckSetOverlap(long[] currentAckSet, long[] otherAckSet) { if (currentAckSet == null || otherAckSet == null) { return false; } BitSetRecyclable currentBitSet = BitSetRecyclable.valueOf(currentAckSet); BitSetRecyclable otherBitSet = BitSetRecyclable.valueOf(otherAckSet); currentBitSet.flip(0, currentBitSet.size()); otherBitSet.flip(0, otherBitSet.size()); currentBitSet.and(otherBitSet); boolean isAckSetRepeated = !currentBitSet.isEmpty(); currentBitSet.recycle(); otherBitSet.recycle(); return isAckSetRepeated; }
3.68
morf_LoggingSqlScriptVisitor_afterExecute
/** * {@inheritDoc} * @see org.alfasoftware.morf.jdbc.SqlScriptExecutor.SqlScriptVisitor#afterExecute(java.lang.String, long) */ @Override public void afterExecute(String sql, long numberOfRowsUpdated) { log.info(logSchemaPositionPrefix() + "Completed [" + sql + "] with [" + numberOfRowsUpdated + "] rows updated"); }
3.68
hadoop_AbstractJavaKeyStoreProvider_locateKeystore
/** * Open up and initialize the keyStore. * * @throws IOException If there is a problem reading the password file * or a problem reading the keystore. */ private void locateKeystore() throws IOException { try { password = ProviderUtils.locatePassword(CREDENTIAL_PASSWORD_ENV_VAR, conf.get(CREDENTIAL_PASSWORD_FILE_KEY)); if (password == null) { password = CREDENTIAL_PASSWORD_DEFAULT.toCharArray(); } KeyStore ks; ks = KeyStore.getInstance(getKeyStoreType()); if (keystoreExists()) { stashOriginalFilePermissions(); try (InputStream in = getInputStreamForFile()) { ks.load(in, password); } } else { createPermissions("600"); // required to create an empty keystore. *sigh* ks.load(null, password); } keyStore = ks; } catch (KeyStoreException e) { throw new IOException("Can't create keystore", e); } catch (GeneralSecurityException e) { throw new IOException("Can't load keystore " + getPathAsString(), e); } }
3.68
pulsar_PulsarRecordCursor_haveAvailableCacheSize
/** * Check the queue has available cache size quota or not. * 1. If the CacheSizeAllocator is NullCacheSizeAllocator, return true. * 2. If the available cache size > 0, return true. * 3. If the available cache size is invalid and the queue size == 0, return true, ensure not block the query. */ private boolean haveAvailableCacheSize(CacheSizeAllocator cacheSizeAllocator, SpscArrayQueue queue) { if (cacheSizeAllocator instanceof NullCacheSizeAllocator) { return true; } return cacheSizeAllocator.getAvailableCacheSize() > 0 || queue.size() == 0; }
3.68
framework_VAbstractOrderedLayout_createSlot
/** * Create a slot to be added to the layout. * * This method is called automatically by {@link #getSlot(Widget)} when a * new slot is needed. It should not be called directly by the user, but can * be overridden to customize slot creation. * * @since 7.6 * @param widget * the widget for which a slot is being created * @return created Slot */ protected Slot createSlot(Widget widget) { Slot slot = GWT.create(Slot.class); slot.setLayout(this); slot.setWidget(widget); return slot; }
3.68
hibernate-validator_GroupConversionHelper_asDescriptors
/** * Returns a set with {@link GroupConversionDescriptor}s representing the * underlying group conversions. * * @return A set with group conversion descriptors. May be empty, but never * {@code null}. */ public Set<GroupConversionDescriptor> asDescriptors() { Set<GroupConversionDescriptor> descriptors = newHashSet( groupConversions.size() ); for ( Entry<Class<?>, Class<?>> conversion : groupConversions.entrySet() ) { descriptors.add( new GroupConversionDescriptorImpl( conversion.getKey(), conversion.getValue() ) ); } return CollectionHelper.toImmutableSet( descriptors ); }
3.68
hibernate-validator_ProgrammaticMetaDataProvider_mergeAnnotationProcessingOptions
/** * Creates a single merged {@code AnnotationProcessingOptions} in case multiple programmatic mappings are provided. * <p> * Note that it is made sure at this point that no element (type, property, method etc.) is configured more than once within * all the given contexts. So the "merge" pulls together the information for all configured elements, but it will never * merge several configurations for one given element. * * @param mappings set of mapping contexts providing annotation processing options to be merged * * @return a single annotation processing options object */ private static AnnotationProcessingOptions mergeAnnotationProcessingOptions(Set<DefaultConstraintMapping> mappings) { // if we only have one mapping we can return the context of just this mapping if ( mappings.size() == 1 ) { return mappings.iterator().next().getAnnotationProcessingOptions(); } AnnotationProcessingOptions options = new AnnotationProcessingOptionsImpl(); for ( DefaultConstraintMapping mapping : mappings ) { options.merge( mapping.getAnnotationProcessingOptions() ); } return options; }
3.68
hadoop_RouterRMAdminService_getPipelines
/** * Gets the Request interceptor chains for all the users. * * @return the request interceptor chains. */ @VisibleForTesting protected Map<String, RequestInterceptorChainWrapper> getPipelines() { return this.userPipelineMap; }
3.68
dubbo_DynamicConfiguration_getRuleKey
/** * The format is '{interfaceName}:[version]:[group]' * * @return */ static String getRuleKey(URL url) { return url.getColonSeparatedKey(); }
3.68
flink_AbstractPythonFunctionOperator_isBundleFinished
/** Returns whether the bundle is finished. */ public boolean isBundleFinished() { return elementCount == 0; }
3.68
graphhopper_GraphHopper__getOSMFile
/** * Currently we use this for a few tests where the dataReaderFile is loaded from the classpath */ protected File _getOSMFile() { return new File(osmFile); }
3.68
morf_SqlDialect_sqlForDefaultClauseLiteral
/** * Creates the representation of the default clause literal value. * @param column The column whose default will be converted. * * @return An SQL fragment representing the literal in a DEFAULT clause in an SQL statement */ protected String sqlForDefaultClauseLiteral(Column column) { return getSqlFrom(new FieldLiteral(column.getDefaultValue(), column.getType())); }
3.68
cron-utils_FieldConstraintsBuilder_forField
/** * Creates range constraints according to CronFieldName parameter. * * @param field - CronFieldName * @return FieldConstraintsBuilder instance */ public FieldConstraintsBuilder forField(final CronFieldName field) { switch (field) { case SECOND: case MINUTE: endRange = 59; return this; case HOUR: endRange = 23; return this; case DAY_OF_WEEK: stringMapping = daysOfWeekMapping(); endRange = 6; return this; case DAY_OF_MONTH: startRange = 1; endRange = 31; return this; case MONTH: stringMapping = monthsMapping(); startRange = 1; endRange = 12; return this; case DAY_OF_YEAR: startRange = 1; endRange = 366; return this; default: return this; } }
3.68
hadoop_TimelineFilterList_getOperator
/** * Get the operator. * * @return operator */ public Operator getOperator() { return operator; }
3.68
hbase_RequestConverter_buildGetClusterStatusRequest
/** * Creates a protocol buffer GetClusterStatusRequest * @return A GetClusterStatusRequest */ public static GetClusterStatusRequest buildGetClusterStatusRequest(EnumSet<Option> options) { return GetClusterStatusRequest.newBuilder() .addAllOptions(ClusterMetricsBuilder.toOptions(options)).build(); }
3.68
flink_ZooKeeperUtils_getPathForJob
/** Returns the JobID as a String (with leading slash). */ public static String getPathForJob(JobID jobId) { checkNotNull(jobId, "Job ID"); return String.format("/%s", jobId); }
3.68
cron-utils_CronDefinitionBuilder_withSeconds
/** * Adds definition for seconds field. * * @return new FieldDefinitionBuilder instance */ public FieldDefinitionBuilder withSeconds() { return new FieldDefinitionBuilder(this, CronFieldName.SECOND); }
3.68