name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_TimeIndicatorTypeInfo_createSerializer
// this replaces the effective serializer by a LongSerializer // it is a hacky but efficient solution to keep the object creation overhead low but still // be compatible with the corresponding SqlTimestampTypeInfo @Override @SuppressWarnings("unchecked") public TypeSerializer<Timestamp> createSerializer(ExecutionConfig executionConfig) { return (TypeSerializer) LongSerializer.INSTANCE; }
3.68
rocketmq-connect_ConnectorPluginsResource_listConnectorPlugins
/** * list connector plugins * * @param context * @return */ public void listConnectorPlugins(Context context) { synchronized (this) { List<PluginInfo> pluginInfos = Collections.unmodifiableList(connectorPlugins.stream() .filter(p -> PluginType.SINK.equals(p.getType()) || PluginType.SOURCE.equals(p.getType())) .collect(Collectors.toList())); context.json(new HttpResponse<>(context.status(), pluginInfos)); } }
3.68
framework_CacheFlushNotifier_removeDeadReferences
/** * Removes dead references from instance list */ private static void removeDeadReferences() { Reference<? extends SQLContainer> dead = deadInstances .poll(); while (dead != null) { allInstances.remove(dead); dead = deadInstances.poll(); } }
3.68
querydsl_SQLExpressions_regrIntercept
/** * REGR_INTERCEPT returns the y-intercept of the regression line. * * @param arg1 first arg * @param arg2 second arg * @return regr_intercept(arg1, arg2) */ public static WindowOver<Double> regrIntercept(Expression<? extends Number> arg1, Expression<? extends Number> arg2) { return new WindowOver<Double>(Double.class, SQLOps.REGR_INTERCEPT, arg1, arg2); }
3.68
hbase_AsyncAdmin_unassign
/** * Unassign a region from current hosting regionserver. Region will then be assigned to a * regionserver chosen at random. Region could be reassigned back to the same server. Use * {@link #move(byte[], ServerName)} if you want to control the region movement. * @param regionName Encoded or full name of region to unassign. Will clear any existing * RegionPlan if one found. * @param forcible If true, force unassign (Will remove region from regions-in-transition too if * present. If results in double assignment use hbck -fix to resolve. To be used * by experts). * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} instead. * @see <a href="https://issues.apache.org/jira/browse/HBASE-24875">HBASE-24875</a> */ @Deprecated default CompletableFuture<Void> unassign(byte[] regionName, boolean forcible) { return unassign(regionName); }
3.68
flink_IterableUtils_flatMap
/** * Flatmap the two-dimensional {@link Iterable} into an one-dimensional {@link Iterable} and * convert the keys into items. * * @param itemGroups to flatmap * @param mapper convert the {@link K} into {@link V} * @param <K> type of key in the two-dimensional iterable * @param <V> type of items that are mapped to * @param <G> iterable of {@link K} * @return flattened one-dimensional {@link Iterable} from the given two-dimensional {@link * Iterable} */ @Internal public static <K, V, G extends Iterable<K>> Iterable<V> flatMap( Iterable<G> itemGroups, Function<K, V> mapper) { return () -> new Iterator<V>() { private final Iterator<G> groupIterator = itemGroups.iterator(); private Iterator<K> itemIterator; @Override public boolean hasNext() { while (itemIterator == null || !itemIterator.hasNext()) { if (!groupIterator.hasNext()) { return false; } else { itemIterator = groupIterator.next().iterator(); } } return true; } @Override public V next() { if (hasNext()) { return mapper.apply(itemIterator.next()); } else { throw new NoSuchElementException(); } } }; }
3.68
flink_SkipListKeySerializer_serializeToSegment
/** * Serialize the key and namespace to bytes. The format is - int: length of serialized namespace * - byte[]: serialized namespace - int: length of serialized key - byte[]: serialized key */ MemorySegment serializeToSegment(K key, N namespace) { outputStream.reset(); try { // serialize namespace outputStream.setPosition(Integer.BYTES); namespaceSerializer.serialize(namespace, outputView); } catch (IOException e) { throw new RuntimeException("Failed to serialize namespace", e); } int keyStartPos = outputStream.getPosition(); try { // serialize key outputStream.setPosition(keyStartPos + Integer.BYTES); keySerializer.serialize(key, outputView); } catch (IOException e) { throw new RuntimeException("Failed to serialize key", e); } final byte[] result = outputStream.toByteArray(); final MemorySegment segment = MemorySegmentFactory.wrap(result); // set length of namespace and key segment.putInt(0, keyStartPos - Integer.BYTES); segment.putInt(keyStartPos, result.length - keyStartPos - Integer.BYTES); return segment; }
3.68
flink_MemoryManager_releaseAllMemory
/** * Releases all reserved memory chunks from an owner to this memory manager. * * @param owner The owner to associate with the memory reservation, for the fallback release. */ public void releaseAllMemory(Object owner) { checkMemoryReservationPreconditions(owner, 0L); Long memoryReservedForOwner = reservedMemory.remove(owner); if (memoryReservedForOwner != null) { memoryBudget.releaseMemory(memoryReservedForOwner); } }
3.68
hadoop_TFile_initBlock
/** * Load a compressed block for reading. Expecting blockIndex is valid. * * @throws IOException */ private void initBlock(int blockIndex) throws IOException { klen = -1; if (blkReader != null) { try { blkReader.close(); } finally { blkReader = null; } } blkReader = reader.getBlockReader(blockIndex); currentLocation.set(blockIndex, 0); }
3.68
flink_CatalogTableImpl_fromProperties
/** Construct a {@link CatalogTableImpl} from complete properties that contains table schema. */ public static CatalogTableImpl fromProperties(Map<String, String> properties) { DescriptorProperties descriptorProperties = new DescriptorProperties(false); descriptorProperties.putProperties(properties); TableSchema tableSchema = descriptorProperties.getTableSchema(SCHEMA); List<String> partitionKeys = descriptorProperties.getPartitionKeys(); return new CatalogTableImpl( tableSchema, partitionKeys, removeRedundant(properties, tableSchema, partitionKeys), ""); }
3.68
graphhopper_GraphHopper_clean
/** * Removes the on-disc routing files. Call only after calling close or before importOrLoad or * load */ public void clean() { if (getGraphHopperLocation().isEmpty()) throw new IllegalStateException("Cannot clean GraphHopper without specified graphHopperLocation"); File folder = new File(getGraphHopperLocation()); removeDir(folder); }
3.68
flink_ConstraintEnforcer_build
/** * If neither of NOT NULL or CHAR/VARCHAR length or BINARY/VARBINARY enforcers are * configured, null is returned. */ public ConstraintEnforcer build() { if (isConfigured) { String operatorName = "ConstraintEnforcer[" + String.join(", ", operatorNames) + "]"; return new ConstraintEnforcer( notNullEnforcer, notNullFieldIndices, typeLengthEnforcer, charFieldInfo != null ? charFieldInfo.stream().mapToInt(fi -> fi.fieldIdx).toArray() : null, charFieldInfo != null ? charFieldInfo.stream().mapToInt(fi -> fi.length).toArray() : null, charFieldInfo != null ? buildCouldPad(charFieldInfo) : null, binaryFieldInfo != null ? binaryFieldInfo.stream().mapToInt(fi -> fi.fieldIdx).toArray() : null, binaryFieldInfo != null ? binaryFieldInfo.stream().mapToInt(fi -> fi.length).toArray() : null, binaryFieldInfo != null ? buildCouldPad(binaryFieldInfo) : null, allFieldNames, operatorName); } return null; }
3.68
framework_VaadinFinderLocatorStrategy_getWidgetName
/** * Extracts the name of the widget class from a path fragment * * @param pathFragment * the path fragment * @return the name of the widget class. */ private String getWidgetName(String pathFragment) { String widgetName = pathFragment; int ixBracket = pathFragment.indexOf('['); if (ixBracket >= 0) { widgetName = pathFragment.substring(0, ixBracket); } return widgetName; }
3.68
hibernate-validator_AnnotationMetaDataProvider_run
/** * Runs the given privileged action, using a privileged block if required. * <p> * <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary * privileged actions within HV's protection domain. */ @IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in JDK17") private <T> T run(PrivilegedAction<T> action) { return System.getSecurityManager() != null ? AccessController.doPrivileged( action ) : action.run(); }
3.68
framework_GridElement_getRow
/** * Gets row element with given row index. * * @param index * Row index * @return Row element with given index. */ public GridRowElement getRow(int index) { scrollToRow(index); return getSubPart("#cell[" + index + "]").wrap(GridRowElement.class); }
3.68
hadoop_Sets_symmetricDifference
/** * Returns the symmetric difference of two sets as an unmodifiable set. * The returned set contains all elements that are contained in either * {@code set1} or {@code set2} but not in both. The iteration order of the * returned set is undefined. * * <p>Results are undefined if {@code set1} and {@code set2} are sets based * on different equivalence relations (as {@code HashSet}, {@code TreeSet}, * and the keySet of an {@code IdentityHashMap} all are). * * @param set1 set1. * @param set2 set2. * @param <E> Generics Type E. * @return a new, empty thread-safe {@code Set}. */ public static <E> Set<E> symmetricDifference( final Set<E> set1, final Set<E> set2) { if (set1 == null) { throw new NullPointerException("set1"); } if (set2 == null) { throw new NullPointerException("set2"); } Set<E> intersection = new HashSet<>(set1); intersection.retainAll(set2); Set<E> symmetricDifference = new HashSet<>(set1); symmetricDifference.addAll(set2); symmetricDifference.removeAll(intersection); return Collections.unmodifiableSet(symmetricDifference); }
3.68
flink_Tuple12_of
/** * Creates a new tuple and assigns the given values to the tuple's fields. This is more * convenient than using the constructor, because the compiler can infer the generic type * arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new * Tuple3<Integer, Double, String>(n, x, s)} */ public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tuple12<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> of( T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11) { return new Tuple12<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11); }
3.68
hudi_HoodieFileGroup_getLatestFileSliceBefore
/** * Obtain the latest file slice, upto an instantTime i.e < maxInstantTime. * * @param maxInstantTime Max Instant Time * @return the latest file slice */ public Option<FileSlice> getLatestFileSliceBefore(String maxInstantTime) { return Option.fromJavaOptional(getAllFileSlices().filter( slice -> compareTimestamps(slice.getBaseInstantTime(), LESSER_THAN, maxInstantTime)) .findFirst()); }
3.68
hadoop_ErasureCoderOptions_allowVerboseDump
/** * Allow dump verbose debug info or not. * @return true if verbose debug info is desired, false otherwise */ public boolean allowVerboseDump() { return allowVerboseDump; }
3.68
hadoop_ParsedTaskAttempt_obtainCounters
/** * @return the task attempt counters */ public Map<String, Long> obtainCounters() { return countersMap; }
3.68
hadoop_HAState_setStateInternal
/** * Internal method to move from the existing state to a new state. * @param context HA context * @param s new state * @throws ServiceFailedException on failure to transition to new state. */ protected final void setStateInternal(final HAContext context, final HAState s) throws ServiceFailedException { prepareToExitState(context); s.prepareToEnterState(context); context.writeLock(); try { exitState(context); context.setState(s); s.enterState(context); s.updateLastHATransitionTime(); } finally { context.writeUnlock(); } }
3.68
hadoop_BondedS3AStatisticsContext_incrementCounter
/** * Increment a specific counter. * <p> * No-op if not defined. * @param op operation * @param count increment value */ @Override public void incrementCounter(Statistic op, long count) { getInstrumentation().incrementCounter(op, count); }
3.68
morf_SqlDialect_getColumnRepresentation
/** * Gets the column representation for the datatype, etc. * * @param dataType the column datatype. * @param width the column width. * @param scale the column scale. * @return a string representation of the column definition. */ protected String getColumnRepresentation(DataType dataType, int width, int scale) { switch (dataType) { case STRING: return width == 0 ? "VARCHAR" : String.format("VARCHAR(%d)", width); case DECIMAL: return width == 0 ? "DECIMAL" : String.format("DECIMAL(%d,%d)", width, scale); case DATE: return "DATE"; case BOOLEAN: return "BIT"; case BIG_INTEGER: return "BIGINT"; case INTEGER: return "INTEGER"; case BLOB: return "BLOB"; case CLOB: return "CLOB"; default: throw new UnsupportedOperationException("Cannot map column with type [" + dataType + "]"); } }
3.68
framework_AbstractSelect_getDropLocation
/** * Returns a detailed vertical location where the drop happened on Item. */ public VerticalDropLocation getDropLocation() { String detail = (String) getData("detail"); if (detail == null) { return null; } return VerticalDropLocation.valueOf(detail); }
3.68
hudi_SnapshotLoadQuerySplitter_getNextCheckpoint
/** * Retrieves the next checkpoint based on query information. * * @param df The dataset to process. * @param queryInfo The query information object. * @return Updated query information with the next checkpoint, in case of empty checkpoint, * returning endPoint same as queryInfo.getEndInstant(). */ public QueryInfo getNextCheckpoint(Dataset<Row> df, QueryInfo queryInfo) { return getNextCheckpoint(df, queryInfo.getStartInstant()) .map(checkpoint -> queryInfo.withUpdatedEndInstant(checkpoint)) .orElse(queryInfo); }
3.68
hbase_IdLock_isHeldByCurrentThread
/** * Test whether the given id is already locked by the current thread. */ public boolean isHeldByCurrentThread(long id) { Thread currentThread = Thread.currentThread(); Entry entry = map.get(id); if (entry == null) { return false; } synchronized (entry) { return currentThread.equals(entry.holder); } }
3.68
hbase_AbstractFSWALProvider_getLogFileSize0
/** * iff the given WALFactory is using the DefaultWALProvider for meta and/or non-meta, count the * size of files (only rolled). if either of them aren't, count 0 for that provider. */ @Override protected long getLogFileSize0() { T log = this.wal; return log == null ? 0 : log.getLogFileSize(); }
3.68
framework_FilesystemContainer_addItem
/* * (non-Javadoc) * * @see com.vaadin.data.Container#addItem(java.lang.Object) */ @Override public Item addItem(Object itemId) throws UnsupportedOperationException { throw new UnsupportedOperationException( "File system container does not support this operation"); }
3.68
pulsar_ManagedLedgerConfig_getRetentionTimeMillis
/** * @return duration for which messages are retained * */ public long getRetentionTimeMillis() { return retentionTimeMs; }
3.68
flink_StateBackend_createKeyedStateBackend
/** * Creates a new {@link CheckpointableKeyedStateBackend} with the given managed memory fraction. * Backends that use managed memory are required to implement this interface. */ default <K> CheckpointableKeyedStateBackend<K> createKeyedStateBackend( Environment env, JobID jobID, String operatorIdentifier, TypeSerializer<K> keySerializer, int numberOfKeyGroups, KeyGroupRange keyGroupRange, TaskKvStateRegistry kvStateRegistry, TtlTimeProvider ttlTimeProvider, MetricGroup metricGroup, @Nonnull Collection<KeyedStateHandle> stateHandles, CloseableRegistry cancelStreamRegistry, double managedMemoryFraction) throws Exception { // ignore managed memory fraction by default return createKeyedStateBackend( env, jobID, operatorIdentifier, keySerializer, numberOfKeyGroups, keyGroupRange, kvStateRegistry, ttlTimeProvider, metricGroup, stateHandles, cancelStreamRegistry); }
3.68
AreaShop_GeneralRegion_calculateVolume
/** * Calculate the volume of the region (could be expensive for polygon regions). * @return Number of blocks in the region */ private long calculateVolume() { // Use own calculation for polygon regions, as WorldGuard does not implement it and returns 0 ProtectedRegion region = getRegion(); if(region instanceof ProtectedPolygonalRegion) { Vector min = getMinimumPoint(); Vector max = getMaximumPoint(); // Exact, but slow algorithm if(getWidth() * getDepth() < 100) { long surface = 0; for (int x = min.getBlockX(); x <= max.getBlockX(); x++) { for (int z = min.getBlockZ(); z <= max.getBlockZ(); z++) { if (region.contains(x, min.getBlockY(), z)) { surface++; } } } return surface * getHeight(); } // Estimate, but quick algorithm else { List<Vector> points = plugin.getWorldGuardHandler().getRegionPoints(region); int numPoints = points.size(); if(numPoints < 3) { return 0; } double area = 0; int x1, x2, z1, z2; for(int i = 0; i <= numPoints - 2; i++) { x1 = points.get(i).getBlockX(); z1 = points.get(i).getBlockZ(); x2 = points.get(i + 1).getBlockX(); z2 = points.get(i + 1).getBlockZ(); area += ((z1 + z2) * (x1 - x2)); } x1 = points.get(numPoints - 1).getBlockX(); z1 = points.get(numPoints - 1).getBlockZ(); x2 = points.get(0).getBlockX(); z2 = points.get(0).getBlockZ(); area += ((z1 + z2) * (x1 - x2)); area = Math.ceil(Math.abs(area) / 2); return (long)(area * getHeight()); } } else { return region.volume(); } }
3.68
flink_SimpleVersionedSerialization_readVersionAndDeserializeList
/** * Deserializes the version and data from a stream. * * <p>This method deserializes data serialized via {@link * #writeVersionAndSerializeList(SimpleVersionedSerializer, List, DataOutputView)} . * * <p>The first four bytes will be interpreted as the version. The next four bytes will be * interpreted as the length of the list, then length-many data will be read and deserialized * via the {@link SimpleVersionedSerializer#deserialize(int, byte[])} method. * * @param serializer The serializer to serialize the datum with. * @param in The stream to deserialize from. */ public static <T> List<T> readVersionAndDeserializeList( SimpleVersionedSerializer<T> serializer, DataInputView in) throws IOException { checkNotNull(serializer); checkNotNull(in); final int serializerVersion = in.readInt(); final int dataSize = in.readInt(); final List<T> data = new ArrayList<>(); for (int ignored = 0; ignored < dataSize; ignored++) { final int datumSize = in.readInt(); final byte[] datum = new byte[datumSize]; in.readFully(datum); data.add(serializer.deserialize(serializerVersion, datum)); } return data; }
3.68
flink_FlatMapIterator_flatMap
/** Delegates calls to the {@link #flatMap(Object)} method. */ @Override public final void flatMap(IN value, Collector<OUT> out) throws Exception { for (Iterator<OUT> iter = flatMap(value); iter.hasNext(); ) { out.collect(iter.next()); } }
3.68
framework_Calendar_getDropHandler
/** * Get the currently active drop handler. */ @Override public DropHandler getDropHandler() { return dropHandler; }
3.68
hbase_BufferedMutatorParams_getImplementationClassName
/** * @return Name of the class we will use when we construct a {@link BufferedMutator} instance or * null if default implementation. * @deprecated Since 3.0.0, will be removed in 4.0.0. You can not set it any more as the * implementation has to use too many internal stuffs in HBase. */ @Deprecated public String getImplementationClassName() { return this.implementationClassName; }
3.68
zxing_WifiConfigManager_updateNetwork
/** * Update the network: either create a new network or modify an existing network * @param config the new network configuration */ private static void updateNetwork(WifiManager wifiManager, WifiConfiguration config) { Integer foundNetworkID = findNetworkInExistingConfig(wifiManager, config.SSID); if (foundNetworkID != null) { Log.i(TAG, "Removing old configuration for network " + config.SSID); wifiManager.removeNetwork(foundNetworkID); wifiManager.saveConfiguration(); } int networkId = wifiManager.addNetwork(config); if (networkId >= 0) { // Try to disable the current network and start a new one. if (wifiManager.enableNetwork(networkId, true)) { Log.i(TAG, "Associating to network " + config.SSID); wifiManager.saveConfiguration(); } else { Log.w(TAG, "Failed to enable network " + config.SSID); } } else { Log.w(TAG, "Unable to add network " + config.SSID); } }
3.68
framework_DDUtil_getHorizontalDropLocation
/** * Get horizontal drop location. * * @param element * the drop target element * @param event * the latest {@link NativeEvent} that relates to this drag * operation * @param leftRightRatio * the ratio that determines how big portion of the element on * each end counts for indicating desire to drop beside the * element rather than on top of it * @return the drop location */ public static HorizontalDropLocation getHorizontalDropLocation( Element element, NativeEvent event, double leftRightRatio) { int clientX = WidgetUtil.getTouchOrMouseClientX(event); // Event coordinates are relative to the viewport, element absolute // position is relative to the document. Make element position relative // to viewport by adjusting for viewport scrolling. See #6021 int elementLeft = element.getAbsoluteLeft() - Window.getScrollLeft(); int offsetWidth = element.getOffsetWidth(); int fromLeft = clientX - elementLeft; float percentageFromLeft = (fromLeft / (float) offsetWidth); if (percentageFromLeft < leftRightRatio) { return HorizontalDropLocation.LEFT; } else if (percentageFromLeft > 1 - leftRightRatio) { return HorizontalDropLocation.RIGHT; } else { return HorizontalDropLocation.CENTER; } }
3.68
hadoop_AbfsOperationMetrics_getEndTime
/** * * @return end time of metric collection. */ long getEndTime() { return endTime; }
3.68
hbase_HBaseTestingUtility_startMiniHBaseCluster
/** * Starts up mini hbase cluster. Usually you won't want this. You'll usually want * {@link #startMiniCluster()}. All other options will use default values, defined in * {@link StartMiniClusterOption.Builder}. * @param numMasters Master node number. * @param numRegionServers Number of region servers. * @param rsPorts Ports that RegionServer should use. * @param masterClass The class to use as HMaster, or null for default. * @param rsClass The class to use as HRegionServer, or null for default. * @param createRootDir Whether to create a new root or data directory path. * @param createWALDir Whether to create a new WAL directory. * @return The mini HBase cluster created. * @see #shutdownMiniHBaseCluster() * @deprecated since 2.2.0 and will be removed in 4.0.0. Use * {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead. * @see #startMiniHBaseCluster(StartMiniClusterOption) * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a> */ @Deprecated public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers, List<Integer> rsPorts, Class<? extends HMaster> masterClass, Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass, boolean createRootDir, boolean createWALDir) throws IOException, InterruptedException { StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters) .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass).rsPorts(rsPorts) .createRootDir(createRootDir).createWALDir(createWALDir).build(); return startMiniHBaseCluster(option); }
3.68
hibernate-validator_ComposingConstraintTree_reportAsSingleViolation
/** * @return {@code} true if the current constraint should be reported as single violation, {@code false otherwise}. * When using negation, we only report the single top-level violation, as * it is hard, especially for ALL_FALSE to give meaningful reports */ private boolean reportAsSingleViolation() { return getDescriptor().isReportAsSingleViolation() || getDescriptor().getCompositionType() == ALL_FALSE; }
3.68
flink_Tuple13_of
/** * Creates a new tuple and assigns the given values to the tuple's fields. This is more * convenient than using the constructor, because the compiler can infer the generic type * arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new * Tuple3<Integer, Double, String>(n, x, s)} */ public static <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> of( T0 f0, T1 f1, T2 f2, T3 f3, T4 f4, T5 f5, T6 f6, T7 f7, T8 f8, T9 f9, T10 f10, T11 f11, T12 f12) { return new Tuple13<>(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12); }
3.68
hadoop_GlobPattern_compiled
/** * @return the compiled pattern */ public Pattern compiled() { return compiled; }
3.68
morf_GraphBasedUpgradeNode_nodeListToStringOfNames
/** * @param nodes * @return String representation of the given nodes */ private String nodeListToStringOfNames(Collection<GraphBasedUpgradeNode> nodes) { return nodes.stream().map(n -> n.getName()).collect(Collectors.joining(", ")); }
3.68
hbase_ReplicationPeerConfig_needToReplicate
/** * Decide whether the passed family of the table need replicate to the peer cluster according to * this peer config. * @param table name of the table * @param family family name * @return true if (the family of) the table need replicate to the peer cluster. If passed family * is null, return true if any CFs of the table need replicate; If passed family is not * null, return true if the passed family need replicate. */ public boolean needToReplicate(TableName table, byte[] family) { String namespace = table.getNamespaceAsString(); if (replicateAllUserTables) { // replicate all user tables, but filter by exclude namespaces and table-cfs config if (excludeNamespaces != null && excludeNamespaces.contains(namespace)) { return false; } // trap here, must check existence first since HashMap allows null value. if (excludeTableCFsMap == null || !excludeTableCFsMap.containsKey(table)) { return true; } Collection<String> cfs = excludeTableCFsMap.get(table); // If cfs is null or empty then we can make sure that we do not need to replicate this table, // otherwise, we may still need to replicate the table but filter out some families. return cfs != null && !cfs.isEmpty() // If exclude-table-cfs contains passed family then we make sure that we do not need to // replicate this family. && (family == null || !cfs.contains(Bytes.toString(family))); } else { // Not replicate all user tables, so filter by namespaces and table-cfs config if (namespaces == null && tableCFsMap == null) { return false; } // First filter by namespaces config // If table's namespace in peer config, all the tables data are applicable for replication if (namespaces != null && namespaces.contains(namespace)) { return true; } // If table-cfs contains this table then we can make sure that we need replicate some CFs of // this table. Further we need all CFs if tableCFsMap.get(table) is null or empty. return tableCFsMap != null && tableCFsMap.containsKey(table) && (family == null || CollectionUtils.isEmpty(tableCFsMap.get(table)) // If table-cfs must contain passed family then we need to replicate this family. || tableCFsMap.get(table).contains(Bytes.toString(family))); } }
3.68
framework_UidlRequestHandler_handleSessionExpired
/* * (non-Javadoc) * * @see * com.vaadin.server.SessionExpiredHandler#handleSessionExpired(com.vaadin * .server.VaadinRequest, com.vaadin.server.VaadinResponse) */ @Override public boolean handleSessionExpired(VaadinRequest request, VaadinResponse response) throws IOException { if (!ServletPortletHelper.isUIDLRequest(request)) { return false; } VaadinService service = request.getService(); SystemMessages systemMessages = service.getSystemMessages( ServletPortletHelper.findLocale(null, null, request), request); service.writeUncachedStringResponse(response, JsonConstants.JSON_CONTENT_TYPE, VaadinService.createCriticalNotificationJSON( systemMessages.getSessionExpiredCaption(), systemMessages.getSessionExpiredMessage(), null, systemMessages.getSessionExpiredURL())); return true; }
3.68
hadoop_ApplicationInitializationContext_getApplicationId
/** * Get {@link ApplicationId} of the application * * @return applications ID */ public ApplicationId getApplicationId() { return this.applicationId; }
3.68
hadoop_MutableQuantiles_addQuantileInfo
/** * Add entry to quantileInfos array. * * @param i array index. * @param info info to be added to quantileInfos array. */ public synchronized void addQuantileInfo(int i, MetricsInfo info) { this.quantileInfos[i] = info; }
3.68
flink_TaskInfo_getIndexOfThisSubtask
/** * Gets the number of this parallel subtask. The numbering starts from 0 and goes up to * parallelism-1 (parallelism as returned by {@link #getNumberOfParallelSubtasks()}). * * @return The index of the parallel subtask. */ public int getIndexOfThisSubtask() { return this.indexOfSubtask; }
3.68
flink_MaxwellJsonFormatFactory_validateEncodingFormatOptions
/** Validator for maxwell encoding format. */ private static void validateEncodingFormatOptions(ReadableConfig tableOptions) { JsonFormatOptionsUtil.validateEncodingFormatOptions(tableOptions); }
3.68
hadoop_StripedBlockReader_actualReadFromBlock
/** * Perform actual reading of bytes from block. */ private BlockReadStats actualReadFromBlock() throws IOException { DataNodeFaultInjector.get().delayBlockReader(); int len = buffer.remaining(); int n = 0; while (n < len) { int nread = blockReader.read(buffer); if (nread <= 0) { break; } n += nread; stripedReader.getReconstructor().incrBytesRead(isLocal, nread); } return new BlockReadStats(n, blockReader.isShortCircuit(), blockReader.getNetworkDistance()); }
3.68
flink_Catalog_getTable
/** * Returns a {@link CatalogTable} or {@link CatalogView} at a specific time identified by the * given {@link ObjectPath}. The framework will resolve the metadata objects when necessary. * * @param tablePath Path of the table or view * @param timestamp Timestamp of the table snapshot, which is milliseconds since 1970-01-01 * 00:00:00 UTC * @return The requested table or view * @throws TableNotExistException if the target does not exist * @throws CatalogException in case of any runtime exception */ default CatalogBaseTable getTable(ObjectPath tablePath, long timestamp) throws TableNotExistException, CatalogException { throw new UnsupportedOperationException( String.format( "getTable(ObjectPath, long) is not implemented for %s.", this.getClass())); }
3.68
framework_ComboBox_isMultiSelect
/** * ComboBox does not support multi select mode. * * @deprecated As of 7.0, use {@link ListSelect}, {@link OptionGroup} or * {@link TwinColSelect} instead * * @see com.vaadin.ui.AbstractSelect#isMultiSelect() * * @return false */ @Deprecated @Override public boolean isMultiSelect() { return false; }
3.68
hadoop_CredentialProviderListFactory_createAWSCredentialProviderList
/** * Create the AWS credentials from the providers, the URI and * the key {@link Constants#AWS_CREDENTIALS_PROVIDER} in the configuration. * @param binding Binding URI -may be null * @param conf filesystem configuration * @return a credentials provider list * @throws IOException Problems loading the providers (including reading * secrets from credential files). */ public static AWSCredentialProviderList createAWSCredentialProviderList( @Nullable URI binding, Configuration conf) throws IOException { // this will reject any user:secret entries in the URI S3xLoginHelper.rejectSecretsInURIs(binding); AWSCredentialProviderList credentials = buildAWSProviderList(binding, conf, AWS_CREDENTIALS_PROVIDER, STANDARD_AWS_PROVIDERS, new HashSet<>()); // make sure the logging message strips out any auth details LOG.debug("For URI {}, using credentials {}", binding, credentials); return credentials; }
3.68
hadoop_ResourceUsageMetrics_setCumulativeCpuUsage
/** * Set the cumulative CPU usage. */ public void setCumulativeCpuUsage(long usage) { cumulativeCpuUsage = usage; }
3.68
framework_VScrollTable_selectFirstRenderedRowInViewPort
/** * Selects the first row visible in the table * <p> * For internal use only. May be removed or replaced in the future. * * @param focusOnly * Should the focus only be moved to the first row */ public void selectFirstRenderedRowInViewPort(boolean focusOnly) { int index = firstRowInViewPort; VScrollTableRow firstInViewport = scrollBody.getRowByRowIndex(index); if (firstInViewport == null) { // this should not happen in normal situations return; } setRowFocus(firstInViewport); if (!focusOnly) { selectFocusedRow(false, multiselectPending); sendSelectedRows(); } }
3.68
rocketmq-connect_WrapperStatusListener_onShutdown
/** * Invoked after successful shutdown of the task. * * @param id The id of the task */ @Override public void onShutdown(ConnectorTaskId id) { managementService.putSafe(new TaskStatus(id, TaskStatus.State.UNASSIGNED, workerId, generation())); }
3.68
hadoop_ManifestCommitter_getDestinationDir
/** * Get the job destination dir. * @return dest dir. */ private Path getDestinationDir() { return destinationDir; }
3.68
querydsl_JTSSurfaceExpression_area
/** * The area of this Surface, as measured in the spatial reference system of this Surface. * * @return area */ public NumberExpression<Double> area() { if (area == null) { area = Expressions.numberOperation(Double.class, SpatialOps.AREA, mixin); } return area; }
3.68
framework_LayoutManager_getConnection
/** * Returns the application connection for this layout manager. * * @return connection */ protected ApplicationConnection getConnection() { return connection; }
3.68
hbase_RSGroupInfo_getConfiguration
/** Getter for fetching an unmodifiable {@link #configuration} map. */ public Map<String, String> getConfiguration() { // shallow pointer copy return Collections.unmodifiableMap(configuration); }
3.68
hudi_HoodieRecordGlobalLocation_copy
/** * Copy the location with given partition path. */ public HoodieRecordGlobalLocation copy(String partitionPath) { return new HoodieRecordGlobalLocation(partitionPath, instantTime, fileId, position); }
3.68
hbase_KeyStoreFileType_getPropertyValue
/** * The property string that specifies that a key store or trust store should use this store file * type. */ public String getPropertyValue() { return this.name(); }
3.68
hbase_MobUtils_getMobRegionInfo
/** * Gets the RegionInfo of the mob files. This is a dummy region. The mob files are not saved in a * region in HBase. It's internally used only. * @return A dummy mob region info. */ public static RegionInfo getMobRegionInfo(TableName tableName) { return RegionInfoBuilder.newBuilder(tableName).setStartKey(MobConstants.MOB_REGION_NAME_BYTES) .setEndKey(HConstants.EMPTY_END_ROW).setSplit(false).setRegionId(0).build(); }
3.68
framework_VMenuBar_handleNavigation
/** * Handles the keyboard events handled by the MenuBar. * * @param keycode * The key code received * @param ctrl * Whether {@code CTRL} was pressed * @param shift * Whether {@code SHIFT} was pressed * @return true if the navigation event was handled */ public boolean handleNavigation(int keycode, boolean ctrl, boolean shift) { // If tab or shift+tab close menus if (keycode == KeyCodes.KEY_TAB) { setSelected(null); hideParents(false); menuVisible = false; VMenuBar root = getParentMenu(); while (root != null && root.getParentMenu() != null) { root = root.getParentMenu(); } if (root != null) { if (shift) { root.ignoreFocus = true; root.getElement().focus(); root.ignoreFocus = false; } else { root.getElement().focus(); root.setSelected(null); } } else if (shift) { ignoreFocus = true; getElement().focus(); ignoreFocus = false; } return false; } if (ctrl || shift || !isEnabled()) { // Do not handle tab key, nor ctrl keys return false; } if (keycode == getNavigationLeftKey()) { if (getSelected() == null) { // If nothing is selected then select the last item setSelected(items.get(items.size() - 1)); if (!getSelected().isSelectable()) { handleNavigation(keycode, ctrl, shift); } } else if (visibleChildMenu == null && getParentMenu() == null) { // If this is the root menu then move to the left int idx = items.indexOf(getSelected()); if (idx > 0) { setSelected(items.get(idx - 1)); } else { setSelected(items.get(items.size() - 1)); } if (!getSelected().isSelectable()) { handleNavigation(keycode, ctrl, shift); } } else if (visibleChildMenu != null) { // Redirect all navigation to the submenu visibleChildMenu.handleNavigation(keycode, ctrl, shift); } else if (getParentMenu().getParentMenu() == null) { // Inside a sub menu, whose parent is a root menu item VMenuBar root = getParentMenu(); root.getSelected().getSubMenu().setSelected(null); // #15255 - disable animate-in/out when hide popup root.hideChildren(false, false); // Get the root menus items and select the previous one int idx = root.getItems().indexOf(root.getSelected()); idx = idx > 0 ? idx : root.getItems().size(); CustomMenuItem selected = root.getItems().get(--idx); while (selected.isSeparator() || !selected.isEnabled()) { idx = idx > 0 ? idx : root.getItems().size(); selected = root.getItems().get(--idx); } root.setSelected(selected); openMenuAndFocusFirstIfPossible(selected); } else { getParentMenu().getSelected().getSubMenu().setSelected(null); getParentMenu().hideChildren(); getParentMenu().getSelected().getElement().focus(); getParentMenu().menuVisible = false; } return true; } else if (keycode == getNavigationRightKey()) { if (getSelected() == null) { // If nothing is selected then select the first item setSelected(items.get(0)); if (!getSelected().isSelectable()) { handleNavigation(keycode, ctrl, shift); } } else if (visibleChildMenu == null && getParentMenu() == null) { // If this is the root menu then move to the right int idx = items.indexOf(getSelected()); if (idx < items.size() - 1) { setSelected(items.get(idx + 1)); } else { setSelected(items.get(0)); } if (!getSelected().isSelectable()) { handleNavigation(keycode, ctrl, shift); } } else if (visibleChildMenu == null && getSelected().getSubMenu() != null) { // If the item has a submenu then show it and move the selection // there showChildMenu(getSelected()); menuVisible = true; visibleChildMenu.handleNavigation(keycode, ctrl, shift); } else if (visibleChildMenu == null) { // Get the root menu VMenuBar root = getParentMenu(); while (root.getParentMenu() != null) { root = root.getParentMenu(); } // Hide the submenu (#15255 - disable animate-in/out when hide // popup) root.hideChildren(false, false); // Get the root menus items and select the next one int idx = root.getItems().indexOf(root.getSelected()); idx = idx < root.getItems().size() - 1 ? idx : -1; CustomMenuItem selected = root.getItems().get(++idx); while (selected.isSeparator() || !selected.isEnabled()) { idx = idx < root.getItems().size() - 1 ? idx : -1; selected = root.getItems().get(++idx); } root.setSelected(selected); openMenuAndFocusFirstIfPossible(selected); } else if (visibleChildMenu != null) { // Redirect all navigation to the submenu visibleChildMenu.handleNavigation(keycode, ctrl, shift); } return true; } else if (keycode == getNavigationUpKey()) { if (getSelected() == null) { // If nothing is selected then select the last item setSelected(items.get(items.size() - 1)); if (!getSelected().isSelectable()) { handleNavigation(keycode, ctrl, shift); } } else if (visibleChildMenu != null) { // Redirect all navigation to the submenu visibleChildMenu.handleNavigation(keycode, ctrl, shift); } else { // Select the previous item if possible or loop to the last // item. If menu is in the first popup (opens down), closes the // popup. If menu is the root menu, opens the popup. int idx = items.indexOf(getSelected()); if (getParentMenu() == null && visibleChildMenu == null) { openMenuAndFocusLastIfPossible(selected); } else if (idx > 0) { setSelected(items.get(idx - 1)); } else if (getParentMenu() != null && getParentMenu().getParentMenu() == null) { getParentMenu().getSelected().getSubMenu() .setSelected(null); getParentMenu().hideChildren(); getParentMenu().getSelected().getElement().focus(); getParentMenu().menuVisible = false; return true; } else { setSelected(items.get(items.size() - 1)); } if (!getSelected().isSelectable()) { handleNavigation(keycode, ctrl, shift); } } return true; } else if (keycode == getNavigationDownKey()) { if (getSelected() == null) { // If nothing is selected then select the first item selectFirstItem(); } else if (visibleChildMenu == null && getParentMenu() == null) { // If this is the root menu the show the child menu with arrow // down, if there is a child menu openMenuAndFocusFirstIfPossible(getSelected()); } else if (visibleChildMenu != null) { // Redirect all navigation to the submenu visibleChildMenu.handleNavigation(keycode, ctrl, shift); } else { // Select the next item if possible or loop to the first item int idx = items.indexOf(getSelected()); if (idx < items.size() - 1) { setSelected(items.get(idx + 1)); } else { setSelected(items.get(0)); } if (!getSelected().isSelectable()) { handleNavigation(keycode, ctrl, shift); } } return true; } else if (keycode == getCloseMenuKey()) { setSelected(null); hideChildren(); if (getParentMenu() != null) { getParentMenu().hideChildren(); getParentMenu().getSelected().getElement().focus(); } menuVisible = false; return true; } else if (isNavigationSelectKey(keycode)) { if (getSelected() == null) { // If nothing is selected then select the first item selectFirstItem(); } else if (!getSelected().isEnabled()) { // NOP } else if (visibleChildMenu != null) { // Redirect all navigation to the submenu visibleChildMenu.handleNavigation(keycode, ctrl, shift); menuVisible = false; } else if (visibleChildMenu == null && getSelected().getSubMenu() != null) { // If the item has a sub menu then show it and move the // selection there openMenuAndFocusFirstIfPossible(getSelected()); } else { try { triggerEventIfNeeded(getSelected()); final Command command = getSelected().getCommand(); if (command != null) { command.execute(); } } finally { setSelected(null); hideParents(true); // #17076 keyboard selected menuitem without children: do // not leave menu to visible ("hover open") mode menuVisible = false; VMenuBar root = getRoot(); root.ignoreFocus = true; root.getElement().focus(); root.ignoreFocus = false; } } return true; } return false; }
3.68
dubbo_RpcContextAttachment_setAttachments
/** * set attachments * * @param attachment * @return context */ @Override public RpcContextAttachment setAttachments(Map<String, String> attachment) { this.attachments.clear(); if (attachment != null && attachment.size() > 0) { this.attachments.putAll(attachment); } return this; }
3.68
flink_TableChange_dropColumn
/** * A table change to drop column. * * <p>It is equal to the following statement: * * <pre> * ALTER TABLE &lt;table_name&gt; DROP COLUMN &lt;column_name&gt; * </pre> * * @param columnName the column to drop. * @return a TableChange represents the modification. */ static DropColumn dropColumn(String columnName) { return new DropColumn(columnName); }
3.68
hadoop_ResourceInformation_getUnit
/** * @return unit **/ @ApiModelProperty(value = "") @JsonProperty("unit") public String getUnit() { return unit == null ? "" : unit; }
3.68
hadoop_NamenodeStatusReport_getNameserviceId
/** * Get the name service identifier. * * @return The name service identifier. */ public String getNameserviceId() { return this.nameserviceId; }
3.68
framework_DataCommunicator_getDataObject
/** * Creates the JsonObject for given data object. This method calls all data * generators for it. * * @param data * data object to be made into a json object * @return json object representing the data object */ protected JsonObject getDataObject(T data) { JsonObject dataObject = Json.createObject(); for (DataGenerator<T> generator : generators) { generator.generateData(data, dataObject); } return dataObject; }
3.68
hadoop_HsSingleCounterPage_content
/** * The content of this page is the CountersBlock now. * @return CountersBlock.class */ @Override protected Class<? extends SubView> content() { return SingleCounterBlock.class; }
3.68
pulsar_PersistentSubscription_close
/** * Close the cursor ledger for this subscription. Requires that there are no active consumers on the dispatcher * * @return CompletableFuture indicating the completion of delete operation */ @Override public CompletableFuture<Void> close() { synchronized (this) { if (dispatcher != null && dispatcher.isConsumerConnected()) { return FutureUtil.failedFuture(new SubscriptionBusyException("Subscription has active consumers")); } return this.pendingAckHandle.closeAsync().thenAccept(v -> { IS_FENCED_UPDATER.set(this, TRUE); log.info("[{}][{}] Successfully closed subscription [{}]", topicName, subName, cursor); }); } }
3.68
hadoop_FileMetadata_getBlobMaterialization
/** * Indicates whether this is an implicit directory (no real blob backing it) * or an explicit one. * * @return Implicit if this is an implicit directory, or Explicit if it's an * explicit directory or a file. */ public BlobMaterialization getBlobMaterialization() { return blobMaterialization; }
3.68
framework_AbstractRemoteDataSource_isWaitingForData
/** * Checks whether this data source is currently waiting for more rows to * become available. * * @return <code>true</code> if waiting for data; otherwise * <code>false</code> */ @Override public boolean isWaitingForData() { return currentRequestCallback != null; }
3.68
flink_HashPartition_insertIntoBuildBuffer
/** * Inserts the given object into the current buffer. This method returns a pointer that can be * used to address the written record in this partition, if it is in-memory. The returned * pointers have no expressiveness in the case where the partition is spilled. * * @param record The object to be written to the partition. * @return A pointer to the object in the partition, or <code>-1</code>, if the partition is * spilled. * @throws IOException Thrown, when this is a spilled partition and the write failed. */ public final long insertIntoBuildBuffer(BT record) throws IOException { this.buildSideRecordCounter++; if (isInMemory()) { final long pointer = this.buildSideWriteBuffer.getCurrentPointer(); this.buildSideSerializer.serialize(record, this.buildSideWriteBuffer); return isInMemory() ? pointer : -1; } else { this.buildSideSerializer.serialize(record, this.buildSideWriteBuffer); return -1; } }
3.68
morf_DataValueLookup_getDouble
/** * Gets the value as a double. Will attempt conversion where possible and * throw a suitable conversion exception if the conversion fails. May return * {@code null} if the value is not set or is explicitly set to {@code null}. * <p> * Warning: this returns a floating-point value which cannot represent values * precisely. Use for scaling factors or measurements but not use for precise * decimal amounts such as monetary amounts. Use * {@link DataValueLookup#getBigDecimal(String)} in those cases. * </p> * * @param name The column name. * @return The value. */ public default Double getDouble(String name) { String value = getValue(name); return value == null ? null : Double.valueOf(value); }
3.68
flink_PendingCheckpoint_acknowledgeMasterState
/** * Acknowledges a master state (state generated on the checkpoint coordinator) to the pending * checkpoint. * * @param identifier The identifier of the master state * @param state The state to acknowledge */ public void acknowledgeMasterState(String identifier, @Nullable MasterState state) { synchronized (lock) { if (!disposed) { if (notYetAcknowledgedMasterStates.remove(identifier) && state != null) { masterStates.add(state); } } } }
3.68
AreaShop_GeneralRegion_restoreRegionBlocks
/** * Restore all blocks in a region for restoring later. * @param fileName The name of the file to save to (extension and folder will be added) * @return true if the region has been restored properly, otherwise false */ public boolean restoreRegionBlocks(String fileName) { if(getRegion() == null) { AreaShop.debug("Region '" + getName() + "' does not exist in WorldGuard, restore failed"); return false; } // The path to save the schematic File restoreFile = new File(plugin.getFileManager().getSchematicFolder() + File.separator + fileName); boolean result = plugin.getWorldEditHandler().restoreRegionBlocks(restoreFile, this); if(result) { AreaShop.debug("Restored schematic for region " + getName()); // Workaround for signs inside the region in combination with async restore of plugins like AsyncWorldEdit and FastAsyncWorldEdit Do.syncLater(10, getSignsFeature()::update); } return result; }
3.68
hbase_CheckAndMutate_timeRange
/** * Specify a timerange * @param timeRange time range to check * @return the CheckAndMutate object */ public Builder timeRange(TimeRange timeRange) { this.timeRange = timeRange; return this; }
3.68
AreaShop_WorldGuardRegionFlagsFeature_translateBukkitToWorldGuardColors
/** * Translate the color codes you put in greeting/farewell messages to the weird color codes of WorldGuard. * @param message The message where the color codes should be translated (this message has bukkit color codes) * @return The string with the WorldGuard color codes */ private String translateBukkitToWorldGuardColors(String message) { String result = message; result = result.replace("&c", "&r"); result = result.replace("&4", "&R"); result = result.replace("&e", "&y"); result = result.replace("&6", "&Y"); result = result.replace("&a", "&g"); result = result.replace("&2", "&G"); result = result.replace("&b", "&c"); result = result.replace("&3", "&C"); result = result.replace("&9", "&b"); result = result.replace("&1", "&B"); result = result.replace("&d", "&p"); result = result.replace("&5", "&P"); result = result.replace("&0", "&0"); result = result.replace("&8", "&1"); result = result.replace("&7", "&2"); result = result.replace("&f", "&w"); result = result.replace("&r", "&x"); return result; }
3.68
flink_Dispatcher_isInGloballyTerminalState
/** * Checks whether the given job has already been executed. * * @param jobId identifying the submitted job * @return a successfully completed future with {@code true} if the job has already finished, * either successfully or as a failure */ private CompletableFuture<Boolean> isInGloballyTerminalState(JobID jobId) { return jobResultStore.hasJobResultEntryAsync(jobId); }
3.68
framework_Table_setColumnWidth
/** * Sets columns width (in pixels). Theme may not necessarily respect very * small or very big values. Setting width to -1 (default) means that theme * will make decision of width. * * <p> * Column can either have a fixed width or expand ratio. The latter one set * is used. See @link {@link #setColumnExpandRatio(Object, float)}. * * @param propertyId * columns property id * @param width * width to be reserved for columns content * @since 4.0.3 */ public void setColumnWidth(Object propertyId, int width) { if (propertyId == null) { // Since propertyId is null, this is the row header. Use the magic // id to store the width of the row header. propertyId = ROW_HEADER_FAKE_PROPERTY_ID; } // Setting column width should remove any expand ratios as well columnExpandRatios.remove(propertyId); if (width < 0) { columnWidths.remove(propertyId); } else { columnWidths.put(propertyId, width); } markAsDirty(); }
3.68
hbase_DependentColumnFilter_toByteArray
/** Returns The filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.DependentColumnFilter.Builder builder = FilterProtos.DependentColumnFilter.newBuilder(); builder.setCompareFilter(super.convert()); if (this.columnFamily != null) { builder.setColumnFamily(UnsafeByteOperations.unsafeWrap(this.columnFamily)); } if (this.columnQualifier != null) { builder.setColumnQualifier(UnsafeByteOperations.unsafeWrap(this.columnQualifier)); } builder.setDropDependentColumn(this.dropDependentColumn); return builder.build().toByteArray(); }
3.68
framework_VaadinService_getSessionLock
/** * Gets the lock instance used to lock the VaadinSession associated with the * given wrapped session. * <p> * This method uses the wrapped session instead of VaadinSession to be able * to lock even before the VaadinSession has been initialized. * </p> * * @param wrappedSession * The wrapped session * @return A lock instance used for locking access to the wrapped session */ protected Lock getSessionLock(WrappedSession wrappedSession) { Object lock = wrappedSession.getAttribute(getLockAttributeName()); if (lock instanceof ReentrantLock) { return (ReentrantLock) lock; } if (lock == null) { return null; } throw new RuntimeException( "Something else than a ReentrantLock was stored in the " + getLockAttributeName() + " in the session"); }
3.68
hibernate-validator_DefaultConstraintMapping_getBeanConfigurations
/** * Returns all bean configurations configured through this constraint mapping. * * @param constraintCreationContext the constraint creation context * * @return a set of {@link BeanConfiguration}s with an element for each type configured through this mapping */ public Set<BeanConfiguration<?>> getBeanConfigurations(ConstraintCreationContext constraintCreationContext) { Set<BeanConfiguration<?>> configurations = newHashSet(); for ( TypeConstraintMappingContextImpl<?> typeContext : typeContexts ) { configurations.add( typeContext.build( constraintCreationContext ) ); } return configurations; }
3.68
hbase_IndividualBytesFieldCell_getTagsArray
// 8) Tags @Override public byte[] getTagsArray() { // Tags can could null return (tags == null) ? HConstants.EMPTY_BYTE_ARRAY : tags; }
3.68
dubbo_ServiceBeanNameBuilder_create
/** * @param attributes * @param defaultInterfaceClass * @param environment * @return * @since 2.7.3 */ public static ServiceBeanNameBuilder create( AnnotationAttributes attributes, Class<?> defaultInterfaceClass, Environment environment) { return new ServiceBeanNameBuilder(attributes, defaultInterfaceClass, environment); }
3.68
dubbo_TreePathDynamicConfiguration_getRootPath
/** * Get the root path from the specified {@link URL connection URl} * * @param url the specified {@link URL connection URl} * @return non-null */ protected String getRootPath(URL url) { String rootPath = url.getParameter(CONFIG_ROOT_PATH_PARAM_NAME, buildRootPath(url)); rootPath = normalize(rootPath); int rootPathLength = rootPath.length(); if (rootPathLength > 1 && rootPath.endsWith(PATH_SEPARATOR)) { rootPath = rootPath.substring(0, rootPathLength - 1); } return rootPath; }
3.68
AreaShop_AreaShop_getRegionManager
/** * Get the RegionManager. * @param world World to get the RegionManager for * @return RegionManager for the given world, if there is one, otherwise null */ public RegionManager getRegionManager(World world) { return this.worldGuardInterface.getRegionManager(world); }
3.68
open-banking-gateway_PathQueryHeadersMapperTemplate_forValidation
/** * Converts context object into object that can be used for validation. * @param context Context to convert * @return Validatable object that can be used with {@link de.adorsys.opba.protocol.xs2a.service.xs2a.validation.Xs2aValidator} * to check if all necessary parameters are present */ public PathQueryHeadersToValidate<P, Q, H> forValidation(C context) { return new PathQueryHeadersToValidate<>( toPath.map(context), toQuery.map(context), toHeaders.map(context) ); }
3.68
pulsar_ConsumerConfiguration_setConsumerName
/** * Set the consumer name. * * @param consumerName */ public ConsumerConfiguration setConsumerName(String consumerName) { checkArgument(consumerName != null && !consumerName.equals("")); conf.setConsumerName(consumerName); return this; }
3.68
hadoop_EntityRowKey_parseRowKeyFromString
/** * Given the encoded row key as string, returns the row key as an object. * @param encodedRowKey String representation of row key. * @return A <cite>EntityRowKey</cite> object. */ public static EntityRowKey parseRowKeyFromString(String encodedRowKey) { return new EntityRowKeyConverter().decodeFromString(encodedRowKey); }
3.68
hadoop_ReadaheadPool_submitReadahead
/** * Submit a request to readahead on the given file descriptor. * @param identifier a textual identifier used in error messages, etc. * @param fd the file descriptor to readahead * @param off the offset at which to start the readahead * @param len the number of bytes to read * @return an object representing this pending request */ public ReadaheadRequest submitReadahead( String identifier, FileDescriptor fd, long off, long len) { ReadaheadRequestImpl req = new ReadaheadRequestImpl( identifier, fd, off, len); pool.execute(req); if (LOG.isTraceEnabled()) { LOG.trace("submit readahead: " + req); } return req; }
3.68
dubbo_ThrowableFunction_execute
/** * Executes {@link ThrowableFunction} * * @param t the function argument * @param function {@link ThrowableFunction} * @param <T> the source type * @param <R> the return type * @return the result after execution */ static <T, R> R execute(T t, ThrowableFunction<T, R> function) { return function.execute(t); }
3.68
framework_WidgetRenderer_getWidget
/** * Returns the widget contained inside the given cell element, or null if it * is not an instance of the given class. Cannot be called for cells that do * not contain a widget. * * @param <W> * the Widget type * @param e * the element inside to find a widget * @param klass * the type of the widget to find * @return the widget inside the element, or null if its type does not match */ protected static <W extends Widget> W getWidget(TableCellElement e, Class<W> klass) { W w = WidgetUtil.findWidget(e.getFirstChildElement(), klass); assert w == null || w.getElement() == e .getFirstChildElement() : "Widget not found inside cell"; return w; }
3.68
hbase_FSWALEntry_getTxid
/** Returns The transaction id of this edit. */ long getTxid() { return this.txid; }
3.68
AreaShop_GeneralRegion_getWorld
/** * Get the World of the region. * @return The World where the region is located */ @Override public World getWorld() { return Bukkit.getWorld(getWorldName()); }
3.68
querydsl_SQLExpressions_count
/** * Start a window function expression * * @param expr expression * @return count(expr) */ public static WindowOver<Long> count(Expression<?> expr) { return new WindowOver<Long>(Long.class, Ops.AggOps.COUNT_AGG, expr); }
3.68
flink_FlinkVersion_rangeOf
/** Returns all versions within the defined range, inclusive both start and end. */ public static Set<FlinkVersion> rangeOf(FlinkVersion start, FlinkVersion end) { return Stream.of(FlinkVersion.values()) .filter(v -> v.ordinal() >= start.ordinal() && v.ordinal() <= end.ordinal()) .collect(Collectors.toCollection(LinkedHashSet::new)); }
3.68
framework_BinderValidationStatus_createUnresolvedStatus
/** * Convenience method for creating a unresolved validation status for the * given binder. * <p> * In practice this status means that the values might not be valid, but * validation errors should be hidden. * * @param source * the source binder * @return a unresolved validation status * @param <BEAN> * the bean type of the binder */ public static <BEAN> BinderValidationStatus<BEAN> createUnresolvedStatus( Binder<BEAN> source) { return new BinderValidationStatus<>(source, source.getBindings().stream().map( b -> BindingValidationStatus.createUnresolvedStatus(b)) .collect(Collectors.toList()), Collections.emptyList()); }
3.68
framework_AbstractDateField_convertToDateString
/** * Converts date range limit into string representation. * * @param temporal * the value * @return textual representation * @see AbstractDateFieldState#rangeStart * @see AbstractDateFieldState#rangeEnd * @since 8.4 */ protected String convertToDateString(T temporal) { if (temporal == null) { return null; } return RANGE_FORMATTER.format(temporal); }
3.68