name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_CliFrontend_handleArgException
/** * Displays an exception message for incorrect command line arguments. * * @param e The exception to display. * @return The return code for the process. */ private static int handleArgException(CliArgsException e) { LOG.error("Invalid command line arguments.", e); System.out.println(e.getMessage()); System.out.println(); System.out.println("Use the help option (-h or --help) to get help on the command."); return 1; }
3.68
flink_OuterJoinRecordStateViews_create
/** Creates a {@link OuterJoinRecordStateView} depends on {@link JoinInputSideSpec}. */ public static OuterJoinRecordStateView create( RuntimeContext ctx, String stateName, JoinInputSideSpec inputSideSpec, InternalTypeInfo<RowData> recordType, long retentionTime) { StateTtlConfig ttlConfig = createTtlConfig(retentionTime); if (inputSideSpec.hasUniqueKey()) { if (inputSideSpec.joinKeyContainsUniqueKey()) { return new OuterJoinRecordStateViews.JoinKeyContainsUniqueKey( ctx, stateName, recordType, ttlConfig); } else { return new OuterJoinRecordStateViews.InputSideHasUniqueKey( ctx, stateName, recordType, inputSideSpec.getUniqueKeyType(), inputSideSpec.getUniqueKeySelector(), ttlConfig); } } else { return new OuterJoinRecordStateViews.InputSideHasNoUniqueKey( ctx, stateName, recordType, ttlConfig); } }
3.68
flink_ExtractionUtils_validateStructuredFieldReadability
/** Validates if a field is properly readable either directly or through a getter. */ static void validateStructuredFieldReadability(Class<?> clazz, Field field) { // field is accessible if (isStructuredFieldDirectlyReadable(field)) { return; } // field needs a getter if (!getStructuredFieldGetter(clazz, field).isPresent()) { throw extractionError( "Field '%s' of class '%s' is neither publicly accessible nor does it have " + "a corresponding getter method.", field.getName(), clazz.getName()); } }
3.68
hbase_CellUtil_compareFamilies
/** * Compares the cell's family with the given byte[] * @param left the cell for which the family has to be compared * @param right the byte[] having the family * @param roffset the offset of the family * @param rlength the length of the family * @return greater than 0 if left cell's family is bigger than byte[], lesser than 0 if left * cell's family is lesser than byte[] and 0 otherwise */ public final static int compareFamilies(Cell left, byte[] right, int roffset, int rlength) { if (left instanceof ByteBufferExtendedCell) { return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getFamilyByteBuffer(), ((ByteBufferExtendedCell) left).getFamilyPosition(), left.getFamilyLength(), right, roffset, rlength); } return Bytes.compareTo(left.getFamilyArray(), left.getFamilyOffset(), left.getFamilyLength(), right, roffset, rlength); }
3.68
hbase_SegmentFactory_createImmutableSegment
// ****** private methods to instantiate concrete store segments **********// private ImmutableSegment createImmutableSegment(final Configuration conf, final CellComparator comparator, MemStoreSegmentsIterator iterator, MemStoreLAB memStoreLAB, int numOfCells, MemStoreCompactionStrategy.Action action, CompactingMemStore.IndexType idxType) { ImmutableSegment res = null; switch (idxType) { case CHUNK_MAP: res = new CellChunkImmutableSegment(comparator, iterator, memStoreLAB, numOfCells, action); break; case CSLM_MAP: assert false; // non-flat segment can not be created here break; case ARRAY_MAP: res = new CellArrayImmutableSegment(comparator, iterator, memStoreLAB, numOfCells, action); break; } return res; }
3.68
dubbo_DubboCertManager_connect0
/** * Try to connect to remote certificate authorization * * @param certConfig certificate authorization address */ protected void connect0(CertConfig certConfig) { String caCertPath = certConfig.getCaCertPath(); String remoteAddress = certConfig.getRemoteAddress(); logger.info( "Try to connect to Dubbo Cert Authority server: " + remoteAddress + ", caCertPath: " + remoteAddress); try { if (StringUtils.isNotEmpty(caCertPath)) { channel = NettyChannelBuilder.forTarget(remoteAddress) .sslContext(GrpcSslContexts.forClient() .trustManager(new File(caCertPath)) .build()) .build(); } else { logger.warn( CONFIG_SSL_CONNECT_INSECURE, "", "", "No caCertPath is provided, will use insecure connection."); channel = NettyChannelBuilder.forTarget(remoteAddress) .sslContext(GrpcSslContexts.forClient() .trustManager(InsecureTrustManagerFactory.INSTANCE) .build()) .build(); } } catch (Exception e) { logger.error(LoggerCodeConstants.CONFIG_SSL_PATH_LOAD_FAILED, "", "", "Failed to load SSL cert file.", e); throw new RuntimeException(e); } }
3.68
graphhopper_AngleCalc_isClockwise
/** * @return true if the given vectors follow a clockwise order abc, bca or cab, * false if the order is counter-clockwise cba, acb or bac, e.g. this returns true: * a b * | / * 0 - c */ public boolean isClockwise(double aX, double aY, double bX, double bY, double cX, double cY) { // simply compare angles between a,b and b,c final double angleDiff = (cX - aX) * (bY - aY) - (cY - aY) * (bX - aX); return angleDiff < 0; }
3.68
hbase_BalanceRequest_setIgnoreRegionsInTransition
/** * Updates BalancerRequest to run the balancer even if there are regions in transition. WARNING: * Advanced usage only, this could cause more issues than it fixes. */ public Builder setIgnoreRegionsInTransition(boolean ignoreRegionsInTransition) { this.ignoreRegionsInTransition = ignoreRegionsInTransition; return this; }
3.68
flink_DataSink_setResources
/** * Sets the resources for this data sink, and the minimum and preferred resources are the same * by default. * * @param resources The resources for this data sink. * @return The data sink with set minimum and preferred resources. */ private DataSink<T> setResources(ResourceSpec resources) { OperatorValidationUtils.validateResources(resources); this.minResources = resources; this.preferredResources = resources; return this; }
3.68
hadoop_DoubleValueSum_getSum
/** * @return the aggregated value */ public double getSum() { return this.sum; }
3.68
hadoop_PlacementConstraint_nodePartitions
/** * Node partitions where the containers of this component can run. */ public PlacementConstraint nodePartitions( List<String> nodePartitions) { this.nodePartitions = nodePartitions; return this; }
3.68
hadoop_ClientThrottlingAnalyzer_run
/** * Periodically analyzes a snapshot of the blob storage metrics and updates * the sleepDuration in order to appropriately throttle storage operations. */ @Override public void run() { boolean doWork = false; try { doWork = doingWork.compareAndSet(0, 1); // prevent concurrent execution of this task if (!doWork) { return; } long now = System.currentTimeMillis(); if (now - blobMetrics.get().startTime >= analysisPeriodMs) { BlobOperationMetrics oldMetrics = blobMetrics.getAndSet( new BlobOperationMetrics(now)); oldMetrics.endTime = now; sleepDuration = analyzeMetricsAndUpdateSleepDuration(oldMetrics, sleepDuration); } } finally { if (doWork) { doingWork.set(0); } } }
3.68
hbase_AuthManager_getMTime
/** * Last modification logical time */ public long getMTime() { return mtime.get(); }
3.68
hadoop_AMOptions_verify
/** * Same as {@link #verify(long, int)} but does not set a max. */ void verify() throws IllegalArgumentException { verify(Integer.MAX_VALUE, Integer.MAX_VALUE); }
3.68
hbase_FileLink_getUnderlyingFileLinkInputStream
/** * If the passed FSDataInputStream is backed by a FileLink, returns the underlying InputStream for * the resolved link target. Otherwise, returns null. */ public static FSDataInputStream getUnderlyingFileLinkInputStream(FSDataInputStream stream) { if (stream.getWrappedStream() instanceof FileLinkInputStream) { return ((FileLinkInputStream) stream.getWrappedStream()).getUnderlyingInputStream(); } return null; }
3.68
flink_AbstractInvokable_getJobConfiguration
/** * Returns the job configuration object which was attached to the original {@link * org.apache.flink.runtime.jobgraph.JobGraph}. * * @return the job configuration object which was attached to the original {@link * org.apache.flink.runtime.jobgraph.JobGraph} */ public Configuration getJobConfiguration() { return this.environment.getJobConfiguration(); }
3.68
pulsar_JwksCache_convertToJwks
/** * The JWK Set is stored in the "keys" key see https://www.rfc-editor.org/rfc/rfc7517#section-5.1. * * @param jwksUri - the URI used to retrieve the JWKS * @param jwks - the JWKS to convert * @return a list of {@link Jwk} */ private List<Jwk> convertToJwks(String jwksUri, Map<String, Object> jwks) throws AuthenticationException { try { @SuppressWarnings("unchecked") List<Map<String, Object>> jwkList = (List<Map<String, Object>>) jwks.get("keys"); final List<Jwk> result = new ArrayList<>(); for (Map<String, Object> jwk : jwkList) { result.add(Jwk.fromValues(jwk)); } return result; } catch (ClassCastException e) { throw new AuthenticationException("Malformed JWKS returned by: " + jwksUri); } }
3.68
hbase_TsvImporterTextMapper_setup
/** * Handles initializing this class with objects specific to it (i.e., the parser). Common * initialization that might be leveraged by a subclass is done in <code>doSetup</code>. Hence a * subclass may choose to override this method and call <code>doSetup</code> as well before * handling it's own custom params. */ @Override protected void setup(Context context) { doSetup(context); Configuration conf = context.getConfiguration(); parser = new ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY), separator); if (parser.getRowKeyColumnIndex() == -1) { throw new RuntimeException("No row key column specified"); } }
3.68
hbase_BackupManager_decorateMasterConfiguration
/** * This method modifies the master's configuration in order to inject backup-related features * (TESTs only) * @param conf configuration */ public static void decorateMasterConfiguration(Configuration conf) { if (!isBackupEnabled(conf)) { return; } // Add WAL archive cleaner plug-in String plugins = conf.get(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS); String cleanerClass = BackupLogCleaner.class.getCanonicalName(); if (!plugins.contains(cleanerClass)) { conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + cleanerClass); } String classes = conf.get(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY); String masterProcedureClass = LogRollMasterProcedureManager.class.getName(); if (classes == null) { conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, masterProcedureClass); } else if (!classes.contains(masterProcedureClass)) { conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, classes + "," + masterProcedureClass); } plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS); conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, (plugins == null ? "" : plugins + ",") + BackupHFileCleaner.class.getName()); if (LOG.isDebugEnabled()) { LOG.debug( "Added log cleaner: {}. Added master procedure manager: {}." + "Added master procedure manager: {}", cleanerClass, masterProcedureClass, BackupHFileCleaner.class.getName()); } }
3.68
flink_SlotStatus_getResourceProfile
/** * Get the resource profile of this slot. * * @return The resource profile */ public ResourceProfile getResourceProfile() { return resourceProfile; }
3.68
dubbo_RpcStatus_removeStatus
/** * @param url */ public static void removeStatus(URL url, String methodName) { String uri = url.toIdentityString(); ConcurrentMap<String, RpcStatus> map = METHOD_STATISTICS.get(uri); if (map != null) { map.remove(methodName); } }
3.68
morf_ResultSetIterator_next
/** * @see java.util.Iterator#next() */ @Override public Record next() { if (!hasNext) { throw new NoSuchElementException(); } Record result = nextRecord; // Attempt to advance advanceResultSet(); return result; }
3.68
hudi_HoodieTableMetadata_getMetadataTableBasePath
/** * Return the base-path of the Metadata Table for the given Dataset identified by base-path */ static Path getMetadataTableBasePath(Path dataTableBasePath) { return new Path(dataTableBasePath, HoodieTableMetaClient.METADATA_TABLE_FOLDER_PATH); }
3.68
hadoop_ActiveAuditManagerS3A_serviceStart
/** * After starting the auditor, it is queried for its * unbonded span, which is then wrapped and stored for * use. */ @Override protected void serviceStart() throws Exception { super.serviceStart(); setUnbondedSpan(new WrappingAuditSpan( auditor.getUnbondedSpan(), false)); LOG.debug("Started audit service {}", auditor); }
3.68
hudi_MarkerUtils_readTimelineServerBasedMarkersFromFileSystem
/** * Reads files containing the markers written by timeline-server-based marker mechanism. * * @param markerDir marker directory. * @param fileSystem file system to use. * @param context instance of {@link HoodieEngineContext} to use * @param parallelism parallelism to use * @return A {@code Map} of file name to the set of markers stored in the file. */ public static Map<String, Set<String>> readTimelineServerBasedMarkersFromFileSystem( String markerDir, FileSystem fileSystem, HoodieEngineContext context, int parallelism) { Path dirPath = new Path(markerDir); try { if (fileSystem.exists(dirPath)) { Predicate<FileStatus> prefixFilter = fileStatus -> fileStatus.getPath().getName().startsWith(MARKERS_FILENAME_PREFIX); Predicate<FileStatus> markerTypeFilter = fileStatus -> !fileStatus.getPath().getName().equals(MARKER_TYPE_FILENAME); return FSUtils.parallelizeSubPathProcess( context, fileSystem, dirPath, parallelism, prefixFilter.and(markerTypeFilter), pairOfSubPathAndConf -> { String markersFilePathStr = pairOfSubPathAndConf.getKey(); SerializableConfiguration conf = pairOfSubPathAndConf.getValue(); return readMarkersFromFile(new Path(markersFilePathStr), conf); }); } return new HashMap<>(); } catch (IOException ioe) { throw new HoodieIOException(ioe.getMessage(), ioe); } }
3.68
druid_DruidAbstractDataSource_testConnectionInternal
/** * @deprecated */ protected boolean testConnectionInternal(Connection conn) { return testConnectionInternal(null, conn); }
3.68
flink_KeyContextHandler_hasKeyContext2
/** * Whether the second input of {@link StreamOperator} has "KeyContext". If false, we can omit * the call of {@link StreamOperator#setKeyContextElement1} for each record arrived on the * second input. * * @return True if the second input has "KeyContext", false otherwise. */ default boolean hasKeyContext2() { return true; }
3.68
hbase_ServerCrashProcedure_assignRegions
/** * Assign the regions on the crashed RS to other Rses. * <p/> * In this method we will go through all the RegionStateNodes of the give regions to find out * whether there is already an TRSP for the region, if so we interrupt it and let it retry on * other server, otherwise we will schedule a TRSP to bring the region online. * <p/> * We will also check whether the table for a region is enabled, if not, we will skip assigning * it. */ private void assignRegions(MasterProcedureEnv env, List<RegionInfo> regions) throws IOException { AssignmentManager am = env.getMasterServices().getAssignmentManager(); boolean retainAssignment = env.getMasterConfiguration().getBoolean(MASTER_SCP_RETAIN_ASSIGNMENT, DEFAULT_MASTER_SCP_RETAIN_ASSIGNMENT); for (RegionInfo region : regions) { RegionStateNode regionNode = am.getRegionStates().getOrCreateRegionStateNode(region); regionNode.lock(); try { // This is possible, as when a server is dead, TRSP will fail to schedule a RemoteProcedure // and then try to assign the region to a new RS. And before it has updated the region // location to the new RS, we may have already called the am.getRegionsOnServer so we will // consider the region is still on this crashed server. Then before we arrive here, the // TRSP could have updated the region location, or even finished itself, so the region is // no longer on this crashed server any more. We should not try to assign it again. Please // see HBASE-23594 for more details. // UPDATE: HBCKServerCrashProcedure overrides isMatchingRegionLocation; this check can get // in the way of our clearing out 'Unknown Servers'. if (!isMatchingRegionLocation(regionNode)) { // See HBASE-24117, though we have already changed the shutdown order, it is still worth // double checking here to confirm that we do not skip assignment incorrectly. if (!am.isRunning()) { throw new DoNotRetryIOException( "AssignmentManager has been stopped, can not process assignment any more"); } LOG.info("{} found {} whose regionLocation no longer matches {}, skipping assign...", this, regionNode, serverName); continue; } if (regionNode.getProcedure() != null) { LOG.info("{} found RIT {}; {}", this, regionNode.getProcedure(), regionNode); regionNode.getProcedure().serverCrashed(env, regionNode, getServerName(), !retainAssignment); continue; } if ( env.getMasterServices().getTableStateManager().isTableState(regionNode.getTable(), TableState.State.DISABLING) ) { // We need to change the state here otherwise the TRSP scheduled by DTP will try to // close the region from a dead server and will never succeed. Please see HBASE-23636 // for more details. env.getAssignmentManager().regionClosedAbnormally(regionNode); LOG.info("{} found table disabling for region {}, set it state to ABNORMALLY_CLOSED.", this, regionNode); continue; } if ( env.getMasterServices().getTableStateManager().isTableState(regionNode.getTable(), TableState.State.DISABLED) ) { // This should not happen, table disabled but has regions on server. LOG.warn("Found table disabled for region {}, procDetails: {}", regionNode, this); continue; } TransitRegionStateProcedure proc = TransitRegionStateProcedure.assign(env, region, !retainAssignment, null); regionNode.setProcedure(proc); addChildProcedure(proc); } finally { regionNode.unlock(); } } }
3.68
flink_DuplicatingCheckpointOutputStream_closeAndGetSecondaryHandle
/** * Returns the state handle from the {@link #secondaryOutputStream}. Also reports suppressed * exceptions from earlier interactions with that stream. */ public StreamStateHandle closeAndGetSecondaryHandle() throws IOException { if (secondaryStreamException == null) { flushInternalBuffer(); return secondaryOutputStream.closeAndGetHandle(); } else { throw new IOException( "Secondary stream previously failed exceptionally", secondaryStreamException); } }
3.68
hadoop_FSStoreOpHandler_get
/** * Will return StoreOp instance based on opCode and StoreType. * @param opCode opCode. * @param storeType storeType. * @return instance of FSNodeStoreLogOp. */ public static FSNodeStoreLogOp get(int opCode, StoreType storeType) { return newInstance(editLogOp.get(storeType).get(opCode)); }
3.68
hudi_ClusteringUtils_createClusteringPlan
/** * Create clustering plan from input fileSliceGroups. */ public static HoodieClusteringPlan createClusteringPlan(String strategyClassName, Map<String, String> strategyParams, List<FileSlice>[] fileSliceGroups, Map<String, String> extraMetadata) { List<HoodieClusteringGroup> clusteringGroups = Arrays.stream(fileSliceGroups).map(fileSliceGroup -> { Map<String, Double> groupMetrics = buildMetrics(fileSliceGroup); List<HoodieSliceInfo> sliceInfos = getFileSliceInfo(fileSliceGroup); return HoodieClusteringGroup.newBuilder().setSlices(sliceInfos).setMetrics(groupMetrics).build(); }).collect(Collectors.toList()); HoodieClusteringStrategy strategy = HoodieClusteringStrategy.newBuilder() .setStrategyClassName(strategyClassName).setStrategyParams(strategyParams) .build(); return HoodieClusteringPlan.newBuilder() .setInputGroups(clusteringGroups) .setExtraMetadata(extraMetadata) .setStrategy(strategy) .setPreserveHoodieMetadata(true) .build(); }
3.68
MagicPlugin_ConfigUtils_toMap
// This is here to replace the more efficient but broken by 1.18 CompatibilityUtils.getMap @Nonnull public static Map<String, Object> toMap(ConfigurationSection section) { return toTypedMap(section); }
3.68
pulsar_ManagedCursorContainer_swap
/** * Swap two items in the heap. */ private void swap(Item item1, Item item2) { int idx1 = item1.idx; int idx2 = item2.idx; heap.set(idx2, item1); heap.set(idx1, item2); // Update the indexes too item1.idx = idx2; item2.idx = idx1; }
3.68
MagicPlugin_MapController_save
/** * Saves the configuration file. * * <p>This is called automatically as changes are made, but you can call it in onDisable to be safe. */ public void save(boolean asynchronous) { if (!loaded) { if (plugin == null) { Bukkit.getLogger().warning("[Magic] Attempted to save image map data before initialization"); } else { plugin.getLogger().warning("Attempted to save image map data before initialization"); } return; } if (configurationFile == null || disabled) return; if (asynchronous && (saveTask != null || plugin == null)) return; Runnable runnable = new SaveMapsRunnable(this, idMap.values()); if (asynchronous) { saveTask = Bukkit.getScheduler().runTaskAsynchronously(plugin, runnable); } else { runnable.run(); } }
3.68
hbase_LruBlockCache_updateSizeMetrics
/** * Helper function that updates the local size counter and also updates any per-cf or * per-blocktype metrics it can discern from given {@link LruCachedBlock} */ private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { long heapsize = cb.heapSize(); BlockType bt = cb.getBuffer().getBlockType(); if (evict) { heapsize *= -1; } if (bt != null) { if (bt.isBloom()) { bloomBlockSize.add(heapsize); } else if (bt.isIndex()) { indexBlockSize.add(heapsize); } else if (bt.isData()) { dataBlockSize.add(heapsize); } } return size.addAndGet(heapsize); }
3.68
flink_FlinkHints_resolveSubQuery
/** Resolve the RelNode of the sub query in conditions. */ private static RexNode resolveSubQuery(RexNode rexNode, Function<RelNode, RelNode> resolver) { return rexNode.accept( new RexShuttle() { @Override public RexNode visitSubQuery(RexSubQuery subQuery) { RelNode oldRel = subQuery.rel; RelNode newRel = resolver.apply(oldRel); if (oldRel != newRel) { return super.visitSubQuery(subQuery.clone(newRel)); } return subQuery; } }); }
3.68
hbase_ClusterMetricsBuilder_toOption
/** * Convert ClusterMetrics.Option to ClusterStatusProtos.Option * @param option a ClusterMetrics.Option * @return converted ClusterStatusProtos.Option */ public static ClusterStatusProtos.Option toOption(ClusterMetrics.Option option) { switch (option) { case HBASE_VERSION: return ClusterStatusProtos.Option.HBASE_VERSION; case LIVE_SERVERS: return ClusterStatusProtos.Option.LIVE_SERVERS; case DEAD_SERVERS: return ClusterStatusProtos.Option.DEAD_SERVERS; case UNKNOWN_SERVERS: return ClusterStatusProtos.Option.UNKNOWN_SERVERS; case REGIONS_IN_TRANSITION: return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION; case CLUSTER_ID: return ClusterStatusProtos.Option.CLUSTER_ID; case MASTER_COPROCESSORS: return ClusterStatusProtos.Option.MASTER_COPROCESSORS; case MASTER: return ClusterStatusProtos.Option.MASTER; case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS; case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON; case SERVERS_NAME: return Option.SERVERS_NAME; case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT; case TABLE_TO_REGIONS_COUNT: return ClusterStatusProtos.Option.TABLE_TO_REGIONS_COUNT; case TASKS: return ClusterStatusProtos.Option.TASKS; case DECOMMISSIONED_SERVERS: return ClusterStatusProtos.Option.DECOMMISSIONED_SERVERS; // should not reach here default: throw new IllegalArgumentException("Invalid option: " + option); } }
3.68
hudi_AvroSchemaCompatibility_incompatible
/** * Returns a details object representing an incompatible schema pair, including * error details. * * @return a SchemaCompatibilityDetails object with INCOMPATIBLE * SchemaCompatibilityType, and state representing the violating part. */ public static SchemaCompatibilityResult incompatible(SchemaIncompatibilityType incompatibilityType, Schema readerFragment, Schema writerFragment, String message, List<String> location) { Incompatibility incompatibility = new Incompatibility(incompatibilityType, readerFragment, writerFragment, message, location); return new SchemaCompatibilityResult(SchemaCompatibilityType.INCOMPATIBLE, Collections.singletonList(incompatibility)); }
3.68
hbase_ZKTableArchiveClient_getArchivingEnabled
/** * Determine if archiving is enabled (but not necessarily fully propagated) for a table * @param table name of the table to check * @return <tt>true</tt> if it is, <tt>false</tt> otherwise * @throws IOException if an unexpected network issue occurs * @throws KeeperException if zookeeper can't be reached */ public boolean getArchivingEnabled(String table) throws IOException, KeeperException { return getArchivingEnabled(Bytes.toBytes(table)); }
3.68
hbase_CoprocessorHost_callObserver
/** * In case of coprocessors which have many kinds of observers (for eg, {@link RegionCoprocessor} * has BulkLoadObserver, RegionObserver, etc), some implementations may not need all observers, * in which case they will return null for that observer's getter. We simply ignore such cases. */ @Override void callObserver() throws IOException { Optional<O> observer = observerGetter.apply(getEnvironment().getInstance()); if (observer.isPresent()) { call(observer.get()); } }
3.68
flink_BaseHybridHashTable_partitionLevelHash
/** Partition level hash again, for avoid two layer hash conflict. */ static int partitionLevelHash(int hash) { return hash ^ (hash >>> 16); }
3.68
flink_UserDefinedFunctionHelper_generateInlineFunctionName
/** Name for anonymous, inline functions. */ public static String generateInlineFunctionName(UserDefinedFunction function) { // use "*...*" to indicate anonymous function similar to types at other locations return String.format("*%s*", function.functionIdentifier()); }
3.68
hbase_ClassSize_sizeOfByteArray
/** * Calculate the memory consumption (in byte) of a part of a byte array, including the array * header and the part of the backing byte array. This function is used when the byte array backs * multiple objects. For example, in {@link org.apache.hadoop.hbase.KeyValue}, multiple KeyValue * objects share a same backing byte array ({@link org.apache.hadoop.hbase.KeyValue#bytes}). Also * see {@link org.apache.hadoop.hbase.KeyValue#heapSize()}. * @param len the length (in byte) used partially in the backing byte array * @return the memory consumption (in byte) of the part of the byte array */ public static long sizeOfByteArray(int len) { return memoryLayout.sizeOfByteArray(len); }
3.68
framework_GridDropTarget_getDropThreshold
/** * Gets the threshold between drop locations from the top and the bottom of * the row. * * @return The threshold in pixels. */ public int getDropThreshold() { return getState(false).dropThreshold; }
3.68
dubbo_NacosNamingServiceUtils_toServiceInstance
/** * Convert the {@link Instance} to {@link ServiceInstance} * * @param instance {@link Instance} * @return non-null * @since 2.7.5 */ public static ServiceInstance toServiceInstance(URL registryUrl, Instance instance) { DefaultServiceInstance serviceInstance = new DefaultServiceInstance( NamingUtils.getServiceName(instance.getServiceName()), instance.getIp(), instance.getPort(), ScopeModelUtil.getApplicationModel(registryUrl.getScopeModel())); serviceInstance.setMetadata(instance.getMetadata()); serviceInstance.setEnabled(instance.isEnabled()); serviceInstance.setHealthy(instance.isHealthy()); return serviceInstance; }
3.68
dubbo_DefaultSerializeClassChecker_loadClass
/** * Try load class * * @param className class name * @throws IllegalArgumentException if class is blocked */ public Class<?> loadClass(ClassLoader classLoader, String className) throws ClassNotFoundException { Class<?> aClass = loadClass0(classLoader, className); if (!aClass.isPrimitive() && !Serializable.class.isAssignableFrom(aClass)) { String msg = "[Serialization Security] Serialized class " + className + " has not implement Serializable interface. " + "Current mode is strict check, will disallow to deserialize it by default. "; if (serializeSecurityManager.getWarnedClasses().add(className)) { logger.error(PROTOCOL_UNTRUSTED_SERIALIZE_CLASS, "", "", msg); } if (checkSerializable) { throw new IllegalArgumentException(msg); } } return aClass; }
3.68
flink_LogicalTypeMerging_findRoundDecimalType
/** Finds the result type of a decimal rounding operation. */ public static DecimalType findRoundDecimalType(int precision, int scale, int round) { if (round >= scale) { return new DecimalType(false, precision, scale); } if (round < 0) { return new DecimalType( false, Math.min(DecimalType.MAX_PRECISION, 1 + precision - scale), 0); } // 0 <= r < s // NOTE: rounding may increase the digits by 1, therefore we need +1 on precisions. return new DecimalType(false, 1 + precision - scale + round, round); }
3.68
hadoop_ResourceVector_newInstance
/** * Creates a new {@code ResourceVector} with all pre-defined resources set to * zero. * @return zero resource vector */ public static ResourceVector newInstance() { ResourceVector zeroResourceVector = new ResourceVector(); for (ResourceInformation resource : ResourceUtils.getResourceTypesArray()) { zeroResourceVector.setValue(resource.getName(), 0); } return zeroResourceVector; }
3.68
hbase_ExecutorService_setAllowCoreThreadTimeout
/** * Allows timing out of core threads. Good to set this for non-critical thread pools for release * of unused resources. Refer to {@link ThreadPoolExecutor#allowCoreThreadTimeOut} for * additional details. */ public ExecutorConfig setAllowCoreThreadTimeout(boolean allowCoreThreadTimeout) { this.allowCoreThreadTimeout = allowCoreThreadTimeout; return this; }
3.68
hbase_RegionNormalizerManager_setNormalizerOn
/** * Set region normalizer on/off * @param normalizerOn whether normalizer should be on or off */ public void setNormalizerOn(boolean normalizerOn) throws IOException { regionNormalizerStateStore.set(normalizerOn); }
3.68
hbase_ActivePolicyEnforcement_getPolicies
/** * Returns an unmodifiable version of the active {@link SpaceViolationPolicyEnforcement}s. */ public Map<TableName, SpaceViolationPolicyEnforcement> getPolicies() { return Collections.unmodifiableMap(activePolicies); }
3.68
dubbo_ApplicationModel_allConsumerModels
/** * @deprecated use {@link ServiceRepository#allConsumerModels()} */ @Deprecated public static Collection<ConsumerModel> allConsumerModels() { return defaultModel().getApplicationServiceRepository().allConsumerModels(); }
3.68
hbase_HRegion_shouldSyncWAL
/** * Check whether we should sync the wal from the table's durability settings */ private boolean shouldSyncWAL() { return regionDurability.ordinal() > Durability.ASYNC_WAL.ordinal(); }
3.68
hbase_JvmVersion_isBadJvmVersion
/** * Return true if the current JVM version is known to be unstable with HBase. */ public static boolean isBadJvmVersion() { String version = System.getProperty("java.version"); return version != null && BAD_JVM_VERSIONS.contains(version); }
3.68
querydsl_PathBuilder_getSet
/** * Create a new Set typed path * * @param <A> * @param <E> * @param property property name * @param type property type * @param queryType expression type * @return property path */ public <A, E extends SimpleExpression<A>> SetPath<A, E> getSet(String property, Class<A> type, Class<? super E> queryType) { validate(property, Set.class); return super.createSet(property, type, queryType, PathInits.DIRECT); }
3.68
morf_XmlDataSetConsumer_open
/** * @see org.alfasoftware.morf.dataset.DataSetConsumer#open() */ @Override public void open() { xmlStreamProvider.open(); if (clearDestinationBehaviour.equals(ClearDestinationBehaviour.CLEAR)) { // we're outputting, so clear the destination of any previous runs xmlStreamProvider.clearDestination(); } }
3.68
hbase_HRegion_startNonceOperation
/** * Starts the nonce operation for a mutation, if needed. * @return whether to proceed this mutation. */ private boolean startNonceOperation() throws IOException { if ( region.rsServices == null || region.rsServices.getNonceManager() == null || nonce == HConstants.NO_NONCE ) { return true; } boolean canProceed; try { canProceed = region.rsServices.getNonceManager().startOperation(nonceGroup, nonce, region.rsServices); } catch (InterruptedException ex) { throw new InterruptedIOException("Nonce start operation interrupted"); } return canProceed; }
3.68
flink_OutputCollector_collect
/** Collects a record and emits it to all writers. */ @Override public void collect(T record) { if (record != null) { this.delegate.setInstance(record); try { for (RecordWriter<SerializationDelegate<T>> writer : writers) { writer.emit(this.delegate); } } catch (IOException e) { throw new RuntimeException( "Emitting the record caused an I/O exception: " + e.getMessage(), e); } } else { throw new NullPointerException( "The system does not support records that are null. " + "Null values are only supported as fields inside other objects."); } }
3.68
hudi_MarkerDirState_fetchPendingMarkerCreationRequests
/** * @return futures of pending marker creation requests and removes them from the list. */ public List<MarkerCreationFuture> fetchPendingMarkerCreationRequests() { return getPendingMarkerCreationRequests(true); }
3.68
framework_DropEvent_getComponent
/** * Returns the drop target component where the drop event occurred. * * @return Component on which a drag source was dropped. */ @Override @SuppressWarnings("unchecked") public T getComponent() { return (T) super.getComponent(); }
3.68
querydsl_AbstractSQLQuery_setUseLiterals
/** * Set whether literals are used in SQL strings instead of parameter bindings (default: false) * * <p>Warning: When literals are used, prepared statement won't have any parameter bindings * and also batch statements will only be simulated, but not executed as actual batch statements.</p> * * @param useLiterals true for literals and false for bindings */ public void setUseLiterals(boolean useLiterals) { this.useLiterals = useLiterals; }
3.68
hbase_AsyncAdmin_balancerSwitch
/** * Turn the load balancer on or off. * @param on Set to <code>true</code> to enable, <code>false</code> to disable. * @return Previous balancer value wrapped by a {@link CompletableFuture}. */ default CompletableFuture<Boolean> balancerSwitch(boolean on) { return balancerSwitch(on, false); }
3.68
hbase_ProcedureStoreTracker_resetModified
/** * Clears the list of updated procedure ids. This doesn't affect global list of active procedure * ids. */ public void resetModified() { for (Map.Entry<Long, BitSetNode> entry : map.entrySet()) { entry.getValue().resetModified(); } minModifiedProcId = Long.MAX_VALUE; maxModifiedProcId = Long.MIN_VALUE; }
3.68
rocketmq-connect_AbstractConnectController_connectors
/** * Get a list of connectors currently running in this cluster. * * @return A list of connector names */ public Collection<String> connectors() { return configManagementService.snapshot().connectors(); }
3.68
hadoop_FileIoProvider_delete
/** * Delete a file. * @param volume target volume. null if unavailable. * @param f File to delete. * @return true if the file was successfully deleted. */ public boolean delete(@Nullable FsVolumeSpi volume, File f) { final long begin = profilingEventHook.beforeMetadataOp(volume, DELETE); try { faultInjectorEventHook.beforeMetadataOp(volume, DELETE); boolean deleted = f.delete(); profilingEventHook.afterMetadataOp(volume, DELETE, begin); return deleted; } catch (Exception e) { onFailure(volume, begin); throw e; } }
3.68
hudi_ClusteringPlanStrategy_getFileSlicesEligibleForClustering
/** * Return file slices eligible for clustering. FileIds in pending clustering/compaction are not eligible for clustering. */ protected Stream<FileSlice> getFileSlicesEligibleForClustering(String partition) { SyncableFileSystemView fileSystemView = (SyncableFileSystemView) getHoodieTable().getSliceView(); Set<HoodieFileGroupId> fgIdsInPendingCompactionLogCompactionAndClustering = Stream.concat(fileSystemView.getPendingCompactionOperations(), fileSystemView.getPendingLogCompactionOperations()) .map(instantTimeOpPair -> instantTimeOpPair.getValue().getFileGroupId()) .collect(Collectors.toSet()); fgIdsInPendingCompactionLogCompactionAndClustering.addAll(fileSystemView.getFileGroupsInPendingClustering().map(Pair::getKey).collect(Collectors.toSet())); return hoodieTable.getSliceView().getLatestFileSlicesStateless(partition) // file ids already in clustering are not eligible .filter(slice -> !fgIdsInPendingCompactionLogCompactionAndClustering.contains(slice.getFileGroupId())); }
3.68
dubbo_Bytes_bytes2short
/** * to short. * * @param b byte array. * @param off offset. * @return short. */ public static short bytes2short(byte[] b, int off) { return (short) (((b[off + 1] & 0xFF) << 0) + ((b[off + 0]) << 8)); }
3.68
pulsar_KerberosName_apply
/** * Try to apply this rule to the given name represented as a parameter * array. * @param params first element is the realm, second and later elements are * are the components of the name "a/b@FOO" -> {"FOO", "a", "b"} * @return the short name if this rule applies or null * @throws IOException throws if something is wrong with the rules */ String apply(String[] params) throws IOException { String result = null; if (isDefault) { if (defaultRealm.equals(params[0])) { result = params[1]; } } else if (params.length - 1 == numOfComponents) { String base = replaceParameters(format, params); if (match == null || match.matcher(base).matches()) { if (fromPattern == null) { result = base; } else { result = replaceSubstitution(base, fromPattern, toPattern, repeat); } } } if (result != null && nonSimplePattern.matcher(result).find()) { throw new NoMatchingRule("Non-simple name " + result + " after auth_to_local rule " + this); } return result; }
3.68
rocketmq-connect_DorisSinkTask_put
/** * Put the records to the sink * * @param records */ @Override public void put(List<ConnectRecord> records) throws ConnectException { if (records.isEmpty()) { return; } final int recordsCount = records.size(); log.debug("Received {} records.", recordsCount); try { updater.write(records); } catch (TableAlterOrCreateException tace) { throw tace; } catch (SQLException sqle) { SQLException sqlAllMessagesException = getAllMessagesException(sqle); if (remainingRetries > 0) { // updater.closeQuietly(); start(originalConfig); remainingRetries--; throw new RetriableException(sqlAllMessagesException); } } remainingRetries = config.getMaxRetries(); }
3.68
hudi_HoodieColumnProjectionUtils_getReadColumnIDs
/** * Returns an array of column ids(start from zero) which is set in the given * parameter <tt>conf</tt>. */ public static List<Integer> getReadColumnIDs(Configuration conf) { String skips = conf.get(READ_COLUMN_IDS_CONF_STR, READ_COLUMN_IDS_CONF_STR_DEFAULT); String[] list = StringUtils.split(skips); List<Integer> result = new ArrayList<Integer>(list.length); for (String element : list) { // it may contain duplicates, remove duplicates Integer toAdd = Integer.parseInt(element); if (!result.contains(toAdd)) { result.add(toAdd); } // NOTE: some code uses this list to correlate with column names, and yet these lists may // contain duplicates, which this call will remove and the other won't. As far as I can // tell, no code will actually use these two methods together; all is good if the code // gets the ID list without relying on this method. Or maybe it just works by magic. } return result; }
3.68
pulsar_ClientConfiguration_getAuthentication
/** * @return the authentication provider to be used */ public Authentication getAuthentication() { return confData.getAuthentication(); }
3.68
hbase_MergeTableRegionsProcedure_postRollBackMergeRegions
/** * Action after rollback a merge table regions action. */ private void postRollBackMergeRegions(final MasterProcedureEnv env) throws IOException { final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { cpHost.postRollBackMergeRegionsAction(regionsToMerge, getUser()); } }
3.68
hadoop_CipherSuite_getConfigSuffix
/** * Returns suffix of cipher suite configuration. * @return String configuration suffix */ public String getConfigSuffix() { String[] parts = name.split("/"); StringBuilder suffix = new StringBuilder(); for (String part : parts) { suffix.append(".").append(StringUtils.toLowerCase(part)); } return suffix.toString(); }
3.68
zxing_OneDimensionalCodeWriter_encode
/** * Encode the contents following specified format. * {@code width} and {@code height} are required size. This method may return bigger size * {@code BitMatrix} when specified size is too small. The user can set both {@code width} and * {@code height} to zero to get minimum size barcode. If negative value is set to {@code width} * or {@code height}, {@code IllegalArgumentException} is thrown. */ @Override public BitMatrix encode(String contents, BarcodeFormat format, int width, int height, Map<EncodeHintType,?> hints) { if (contents.isEmpty()) { throw new IllegalArgumentException("Found empty contents"); } if (width < 0 || height < 0) { throw new IllegalArgumentException("Negative size is not allowed. Input: " + width + 'x' + height); } Collection<BarcodeFormat> supportedFormats = getSupportedWriteFormats(); if (supportedFormats != null && !supportedFormats.contains(format)) { throw new IllegalArgumentException("Can only encode " + supportedFormats + ", but got " + format); } int sidesMargin = getDefaultMargin(); if (hints != null && hints.containsKey(EncodeHintType.MARGIN)) { sidesMargin = Integer.parseInt(hints.get(EncodeHintType.MARGIN).toString()); } boolean[] code = encode(contents, hints); return renderResult(code, width, height, sidesMargin); }
3.68
graphhopper_KVStorage_isEquals
// compared to entries.equals(lastEntries) this method avoids a NPE if a value is null and throws an IAE instead private boolean isEquals(List<KeyValue> entries, List<KeyValue> lastEntries) { if (lastEntries != null && entries.size() == lastEntries.size()) { for (int i = 0; i < entries.size(); i++) { KeyValue kv = entries.get(i); if (kv.value == null) throw new IllegalArgumentException("value for key " + kv.key + " cannot be null"); if (!kv.equals(lastEntries.get(i))) return false; } return true; } return false; }
3.68
hadoop_Chain_checkReducerAlreadySet
// if a reducer chain check the Reducer has been already set or not protected static void checkReducerAlreadySet(boolean isMap, Configuration jobConf, String prefix, boolean shouldSet) { if (!isMap) { if (shouldSet) { if (jobConf.getClass(prefix + CHAIN_REDUCER_CLASS, null) == null) { throw new IllegalStateException( "A Mapper can be added to the chain only after the Reducer has " + "been set"); } } else { if (jobConf.getClass(prefix + CHAIN_REDUCER_CLASS, null) != null) { throw new IllegalStateException("Reducer has been already set"); } } } }
3.68
flink_CopyOnWriteSkipListStateMap_iterateAndProcess
/** * Iterate the skip list and perform given function. * * @param keySegment memory segment storing the key. * @param keyOffset offset of the key. * @param keyLen length of the key. * @param function the function to apply when the skip list contains the given key, which * accepts two parameters: an encapsulation of [previous_node, current_node, next_node] and * a boolean indicating whether the node with same key has been logically removed, and * returns a state. * @return the iterate and processing result */ private SkipListIterateAndProcessResult iterateAndProcess( MemorySegment keySegment, int keyOffset, int keyLen, BiFunction<SkipListNodePointers, Boolean, S> function) { int deleteCount = 0; long prevNode = findPredecessor(keySegment, keyOffset, 1); long currentNode = helpGetNextNode(prevNode, 0); long nextNode; int c; while (currentNode != NIL_NODE) { nextNode = helpGetNextNode(currentNode, 0); // Check whether the current code is already logically removed to save some comparisons // on key, // with the cost of an additional remove-then-add operation if the to-be-removed node // has the same key // with the to-be-put one. boolean isRemoved = isNodeRemoved(currentNode); if (isRemoved && highestRequiredSnapshotVersionPlusOne == 0 && deleteCount < numKeysToDeleteOneTime) { doPhysicalRemove(currentNode, prevNode, nextNode); logicallyRemovedNodes.remove(currentNode); currentNode = nextNode; deleteCount++; continue; } c = compareSegmentAndNode(keySegment, keyOffset, keyLen, currentNode); if (c < 0) { // The given key is less than the current node, break the loop break; } else if (c > 0) { // The given key is larger than the current node, continue prevNode = currentNode; currentNode = nextNode; } else { // The given key is equal to the current node, apply the function S state = function.apply( new SkipListNodePointers(prevNode, currentNode, nextNode), isRemoved); return new SkipListIterateAndProcessResult(prevNode, currentNode, true, state); } } return new SkipListIterateAndProcessResult(prevNode, currentNode, false, null); }
3.68
hbase_MovingAverage_start
/** * Mark start time of an execution. * @return time in ns. */ protected long start() { return System.nanoTime(); }
3.68
flink_KeyMap_putOrAggregate
/** * Inserts or aggregates a value into the hash map. If the hash map does not yet contain the * key, this method inserts the value. If the table already contains the key (and a value) this * method will use the given ReduceFunction function to combine the existing value and the given * value to a new value, and store that value for the key. * * @param key The key to map the value. * @param value The new value to insert, or aggregate with the existing value. * @param aggregator The aggregator to use if a value is already contained. * @return The value in the map after this operation: Either the given value, or the aggregated * value. * @throws java.lang.NullPointerException Thrown, if the key is null. * @throws Exception The method forwards exceptions from the aggregation function. */ public final V putOrAggregate(K key, V value, ReduceFunction<V> aggregator) throws Exception { final int hash = hash(key); final int slot = indexOf(hash); // search the chain from the slot for (Entry<K, V> entry = table[slot]; entry != null; entry = entry.next) { if (entry.hashCode == hash && entry.key.equals(key)) { // found match entry.value = aggregator.reduce(entry.value, value); return entry.value; } } // no match, insert a new value insertNewEntry(hash, key, value, slot); // return the original value return value; }
3.68
hadoop_FlowRunEntityReader_updateFixedColumns
/** * Add {@link QualifierFilter} filters to filter list for each column of flow * run table. * * @return filter list to which qualifier filters have been added. */ private FilterList updateFixedColumns() { FilterList columnsList = new FilterList(Operator.MUST_PASS_ONE); for (FlowRunColumn column : FlowRunColumn.values()) { columnsList.addFilter(new QualifierFilter(CompareOp.EQUAL, new BinaryComparator(column.getColumnQualifierBytes()))); } return columnsList; }
3.68
hbase_Constraints_writeConstraint
/** * Write the given key and associated configuration to the {@link TableDescriptorBuilder}. */ private static TableDescriptorBuilder writeConstraint(TableDescriptorBuilder builder, String key, Configuration conf) throws IOException { // store the key and conf in the descriptor return builder.setValue(key, serializeConfiguration(conf)); }
3.68
zxing_GenericGFPoly_getCoefficient
/** * @return coefficient of x^degree term in this polynomial */ int getCoefficient(int degree) { return coefficients[coefficients.length - 1 - degree]; }
3.68
streampipes_MigrateExtensionsResource_executeMigration
/** * Executes the migration for the given pipeline element based on the given migrator. * @param migrator migrator that executes the migration * @param pipelineElementDescription pipeline element to be migrated * @return the migration result containing either the migrated element or the original one in case of a failure */ protected MigrationResult<T> executeMigration( MmT migrator, T pipelineElementDescription ) { var extractor = getPropertyExtractor(pipelineElementDescription); try { var result = migrator.migrate(pipelineElementDescription, extractor); if (result.success()) { LOG.info("Migration successfully finished."); // Since adapter migration was successful, version can be adapted to the target version. // this step is explicitly performed here and not left to the migration itself to // prevent leaving this step out var migratedProcessor = result.element(); migratedProcessor.setVersion(migrator.config().toVersion()); return MigrationResult.success(migratedProcessor); } else { LOG.error("Migration failed with the following reason: {}", result.message()); // The failed migration is documented in the MigrationResult // The core is expected to handle the response accordingly, so we can safely return a positive status code return result; } } catch (RuntimeException e) { LOG.error("An unexpected exception caused the migration to fail - " + "sending exception report in migration result"); return MigrationResult.failure( pipelineElementDescription, String.format( "Migration failed due to an unexpected exception: %s", StringUtils.join(e.getStackTrace(), "\n") ) ); } }
3.68
flink_HiveParserSemanticAnalyzer_findCTEFromName
/* * We allow CTE definitions in views. So we can end up with a hierarchy of CTE definitions: * - at the top level of a query statement * - where a view is referenced. * - views may refer to other views. * * The scoping rules we use are: to search for a CTE from the current HiveParserQB outwards. In order to * disambiguate between CTES are different levels we qualify(prefix) them with the id of the HiveParserQB * they appear in when adding them to the <code>aliasToCTEs</code> map. * */ private HiveParserBaseSemanticAnalyzer.CTEClause findCTEFromName( HiveParserQB qb, String cteName) { StringBuilder qId = new StringBuilder(); if (qb.getId() != null) { qId.append(qb.getId()); } while (qId.length() > 0) { String nm = qId + ":" + cteName; HiveParserBaseSemanticAnalyzer.CTEClause cte = aliasToCTEs.get(nm); if (cte != null) { return cte; } int lastIndex = qId.lastIndexOf(":"); lastIndex = Math.max(lastIndex, 0); qId.setLength(lastIndex); } return aliasToCTEs.get(cteName); }
3.68
framework_VaadinSession_storeInSession
/** * Stores this VaadinSession in the HTTP session. * * @param service * The service this session is associated with * @param session * The HTTP session this VaadinSession should be stored in * @deprecated as of 7.6, call * {@link VaadinService#storeSession(VaadinSession, WrappedSession)} * instead */ @Deprecated public void storeInSession(VaadinService service, WrappedSession session) { service.storeSession(this, session); }
3.68
hbase_Subprocedure_cancel
/** * Method to cancel the Subprocedure by injecting an exception from and external source. */ public void cancel(String msg, Throwable cause) { LOG.error(msg, cause); complete = true; if (cause instanceof ForeignException) { monitor.receive((ForeignException) cause); } else { monitor.receive(new ForeignException(getMemberName(), cause)); } }
3.68
hmily_AggregateBinder_wasSupplied
/** * Was supplied boolean. * * @return the boolean */ public boolean wasSupplied() { return this.supplied != null; }
3.68
hadoop_ExponentialRetryPolicy_shouldRetry
/** * Returns if a request should be retried based on the retry count, current response, * and the current strategy. The valid http status code lies in the range of 1xx-5xx. * But an invalid status code might be set due to network or timeout kind of issues. * Such invalid status code also qualify for retry. * * @param retryCount The current retry attempt count. * @param statusCode The status code of the response, or -1 for socket error. * @return true if the request should be retried; false otherwise. */ public boolean shouldRetry(final int retryCount, final int statusCode) { return retryCount < this.retryCount && (statusCode < HTTP_CONTINUE || statusCode == HttpURLConnection.HTTP_CLIENT_TIMEOUT || (statusCode >= HttpURLConnection.HTTP_INTERNAL_ERROR && statusCode != HttpURLConnection.HTTP_NOT_IMPLEMENTED && statusCode != HttpURLConnection.HTTP_VERSION)); }
3.68
framework_JsonCodec_decodeInternalType
/** * Decodes a value that is of an internal type. * <p> * Ensures the encoded value is of the same type as target type. * </p> * <p> * Allows restricting collections so that they must be declared using * generics. If this is used then all objects in the collection are encoded * using the declared type. Otherwise only internal types are allowed in * collections. * </p> * * @param targetType * The type that should be returned by this method * @param restrictToInternalTypes * @param encodedJsonValue * @param connectorTracker * @return */ public static Object decodeInternalType(Type targetType, boolean restrictToInternalTypes, JsonValue encodedJsonValue, ConnectorTracker connectorTracker) { if (!isInternalType(targetType)) { throw new JsonException("Type " + targetType + " is not a supported internal type."); } String transportType = getInternalTransportType(targetType); if (encodedJsonValue.getType() == JsonType.NULL) { return null; } else if (targetType == Void.class) { throw new JsonException( "Something other than null was encoded for a null type"); } // UidlValue if (targetType == UidlValue.class) { return decodeUidlValue((JsonArray) encodedJsonValue, connectorTracker); } // Collections if (JsonConstants.VTYPE_LIST.equals(transportType)) { return decodeList(targetType, restrictToInternalTypes, (JsonArray) encodedJsonValue, connectorTracker); } else if (JsonConstants.VTYPE_SET.equals(transportType)) { return decodeSet(targetType, restrictToInternalTypes, (JsonArray) encodedJsonValue, connectorTracker); } else if (JsonConstants.VTYPE_MAP.equals(transportType)) { return decodeMap(targetType, restrictToInternalTypes, encodedJsonValue, connectorTracker); } // Arrays if (JsonConstants.VTYPE_ARRAY.equals(transportType)) { return decodeObjectArray((JsonArray) encodedJsonValue, connectorTracker); } else if (JsonConstants.VTYPE_STRINGARRAY.equals(transportType)) { return decodeArray(String.class, (JsonArray) encodedJsonValue, null); } // Special Vaadin types if (JsonConstants.VTYPE_CONNECTOR.equals(transportType)) { return connectorTracker.getConnector(encodedJsonValue.asString()); } // Legacy types if (JsonConstants.VTYPE_STRING.equals(transportType)) { return encodedJsonValue.asString(); } else if (JsonConstants.VTYPE_INTEGER.equals(transportType)) { return (int) encodedJsonValue.asNumber(); } else if (JsonConstants.VTYPE_LONG.equals(transportType)) { return (long) encodedJsonValue.asNumber(); } else if (JsonConstants.VTYPE_FLOAT.equals(transportType)) { return (float) encodedJsonValue.asNumber(); } else if (JsonConstants.VTYPE_DOUBLE.equals(transportType)) { return encodedJsonValue.asNumber(); } else if (JsonConstants.VTYPE_BOOLEAN.equals(transportType)) { return encodedJsonValue.asBoolean(); } throw new JsonException("Unknown type " + transportType); }
3.68
dubbo_ReflectUtils_findParameterizedTypes
/** * Find the {@link Set} of {@link ParameterizedType} * * @param sourceClass the source {@link Class class} * @return non-null read-only {@link Set} * @since 2.7.5 */ public static Set<ParameterizedType> findParameterizedTypes(Class<?> sourceClass) { // Add Generic Interfaces List<Type> genericTypes = new LinkedList<>(asList(sourceClass.getGenericInterfaces())); // Add Generic Super Class genericTypes.add(sourceClass.getGenericSuperclass()); Set<ParameterizedType> parameterizedTypes = genericTypes.stream() .filter(type -> type instanceof ParameterizedType) // filter ParameterizedType .map(ParameterizedType.class::cast) // cast to ParameterizedType .collect(Collectors.toSet()); if (parameterizedTypes.isEmpty()) { // If not found, try to search super types recursively genericTypes.stream() .filter(type -> type instanceof Class) .map(Class.class::cast) .forEach(superClass -> parameterizedTypes.addAll(findParameterizedTypes(superClass))); } return unmodifiableSet(parameterizedTypes); // build as a Set }
3.68
hibernate-validator_SizeValidatorForArraysOfByte_isValid
/** * Checks the number of entries in an array. * * @param array The array to validate. * @param constraintValidatorContext context in which the constraint is evaluated. * * @return Returns {@code true} if the array is {@code null} or the number of entries in * {@code array} is between the specified {@code min} and {@code max} values (inclusive), * {@code false} otherwise. */ @Override public boolean isValid(byte[] array, ConstraintValidatorContext constraintValidatorContext) { if ( array == null ) { return true; } return array.length >= min && array.length <= max; }
3.68
pulsar_ClientConfiguration_setTlsHostnameVerificationEnable
/** * It allows to validate hostname verification when client connects to broker over tls. It validates incoming x509 * certificate and matches provided hostname(CN/SAN) with expected broker's host name. It follows RFC 2818, 3.1. * Server Identity hostname verification. * * @see <a href="https://tools.ietf.org/html/rfc2818">rfc2818</a> * * @param tlsHostnameVerificationEnable */ public void setTlsHostnameVerificationEnable(boolean tlsHostnameVerificationEnable) { confData.setTlsHostnameVerificationEnable(tlsHostnameVerificationEnable); }
3.68
hbase_MajorCompactionTTLRequest_getColFamilyCutoffTime
// If the CF has no TTL, return -1, else return the current time - TTL. private long getColFamilyCutoffTime(ColumnFamilyDescriptor colDesc) { if (colDesc.getTimeToLive() == HConstants.FOREVER) { return -1; } return EnvironmentEdgeManager.currentTime() - (colDesc.getTimeToLive() * 1000L); }
3.68
hbase_AbstractFSWALProvider_requestLogRoll
/** * request a log roll, but don't actually do it. */ static void requestLogRoll(final WAL wal) { ((AbstractFSWAL<?>) wal).requestLogRoll(); }
3.68
hbase_RegionNormalizerWorkQueue_putAll
/** * Inserts the specified elements at the tail of the queue. Any elements already present in the * queue are ignored. * @param c the elements to add */ public void putAll(Collection<? extends E> c) { if (c == null) { throw new NullPointerException(); } lock.writeLock().lock(); try { delegate.addAll(c); if (!delegate.isEmpty()) { notEmpty.signal(); } } finally { lock.writeLock().unlock(); } }
3.68
hadoop_S3AReadOpContext_getIOStatisticsAggregator
/** * Return the IOStatistics aggregator. * * @return instance of IOStatisticsAggregator. */ public IOStatisticsAggregator getIOStatisticsAggregator() { return ioStatisticsAggregator; }
3.68
hbase_ActiveMasterManager_blockUntilBecomingActiveMaster
/** * Block until becoming the active master. Method blocks until there is not another active master * and our attempt to become the new active master is successful. This also makes sure that we are * watching the master znode so will be notified if another master dies. * @param checkInterval the interval to check if the master is stopped * @param startupTaskGroup the task group for master startup to track the progress * @return True if no issue becoming active master else false if another master was running or if * some other problem (zookeeper, stop flag has been set on this Master) */ boolean blockUntilBecomingActiveMaster(int checkInterval, TaskGroup startupTaskGroup) { MonitoredTask blockUntilActive = startupTaskGroup.addTask("Blocking until becoming active master"); String backupZNode = ZNodePaths .joinZNode(this.watcher.getZNodePaths().backupMasterAddressesZNode, this.sn.toString()); while (!(master.isAborted() || master.isStopped())) { blockUntilActive.setStatus("Trying to register in ZK as active master"); // Try to become the active master, watch if there is another master. // Write out our ServerName as versioned bytes. try { if ( MasterAddressTracker.setMasterAddress(this.watcher, this.watcher.getZNodePaths().masterAddressZNode, this.sn, infoPort) ) { // If we were a backup master before, delete our ZNode from the backup // master directory since we are the active now) if (ZKUtil.checkExists(this.watcher, backupZNode) != -1) { LOG.info("Deleting ZNode for " + backupZNode + " from backup master directory"); ZKUtil.deleteNodeFailSilent(this.watcher, backupZNode); } // Save the znode in a file, this will allow to check if we crash in the launch scripts ZNodeClearer.writeMyEphemeralNodeOnDisk(this.sn.toString()); // We are the master, return blockUntilActive.setStatus("Successfully registered as active master."); this.clusterHasActiveMaster.set(true); activeMasterServerName = sn; LOG.info("Registered as active master=" + this.sn); return true; } // Invalidate the active master name so that subsequent requests do not get any stale // master information. Will be re-fetched if needed. activeMasterServerName = null; // There is another active master running elsewhere or this is a restart // and the master ephemeral node has not expired yet. this.clusterHasActiveMaster.set(true); String msg; byte[] bytes = ZKUtil.getDataAndWatch(this.watcher, this.watcher.getZNodePaths().masterAddressZNode); if (bytes == null) { msg = ("A master was detected, but went down before its address " + "could be read. Attempting to become the next active master"); } else { ServerName currentMaster; try { currentMaster = ProtobufUtil.parseServerNameFrom(bytes); } catch (DeserializationException e) { LOG.warn("Failed parse", e); // Hopefully next time around we won't fail the parse. Dangerous. continue; } if (ServerName.isSameAddress(currentMaster, this.sn)) { msg = ("Current master has this master's address, " + currentMaster + "; master was restarted? Deleting node."); // Hurry along the expiration of the znode. ZKUtil.deleteNode(this.watcher, this.watcher.getZNodePaths().masterAddressZNode); // We may have failed to delete the znode at the previous step, but // we delete the file anyway: a second attempt to delete the znode is likely to fail // again. ZNodeClearer.deleteMyEphemeralNodeOnDisk(); } else { msg = "Another master is the active master, " + currentMaster + "; waiting to become the next active master"; } } LOG.info(msg); blockUntilActive.setStatus(msg); } catch (KeeperException ke) { master.abort("Received an unexpected KeeperException, aborting", ke); return false; } synchronized (this.clusterHasActiveMaster) { while (clusterHasActiveMaster.get() && !master.isStopped()) { try { clusterHasActiveMaster.wait(checkInterval); } catch (InterruptedException e) { // We expect to be interrupted when a master dies, // will fall out if so LOG.debug("Interrupted waiting for master to die", e); } } if (clusterShutDown.get()) { this.master.stop("Cluster went down before this master became active"); } } } return false; }
3.68
hbase_MobUtils_parseDate
/** * Parses the string to a date. * @param dateString The string format of a date, it's yyyymmdd. * @return A date. */ public static Date parseDate(String dateString) throws ParseException { return LOCAL_FORMAT.get().parse(dateString); }
3.68
framework_VTree_setState
/** For internal use only. May be removed or replaced in the future. */ public void setState(boolean state, boolean notifyServer) { if (open == state) { return; } if (state) { if (!childrenLoaded && notifyServer) { client.updateVariable(paintableId, "requestChildTree", true, false); } if (notifyServer) { client.updateVariable(paintableId, "expand", new String[] { key }, true); } addStyleName(CLASSNAME + "-expanded"); Roles.getTreeitemRole().setAriaExpandedState(getElement(), ExpandedValue.TRUE); childNodeContainer.setVisible(true); } else { removeStyleName(CLASSNAME + "-expanded"); Roles.getTreeitemRole().setAriaExpandedState(getElement(), ExpandedValue.FALSE); childNodeContainer.setVisible(false); if (notifyServer) { client.updateVariable(paintableId, "collapse", new String[] { key }, true); } } open = state; if (!rendering) { doLayout(); } }
3.68
flink_S3TestCredentials_getTestBucketUri
/** * Gets the URI for the path under which all tests should put their data. * * <p>This method throws an exception if the bucket was not configured. Tests should use {@link * #assumeCredentialsAvailable()} to skip tests when credentials are not available. */ public static String getTestBucketUri() { return getTestBucketUriWithScheme("s3"); }
3.68