name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_Configuration_getClass
/** * Returns the class associated with the given key as a string. * * @param <T> The type of the class to return. * @param key The key pointing to the associated value * @param defaultValue The optional default value returned if no entry exists * @param classLoader The class loader used to resolve the class. * @return The value associated with the given key, or the default value, if to entry for the * key exists. */ @SuppressWarnings("unchecked") public <T> Class<T> getClass( String key, Class<? extends T> defaultValue, ClassLoader classLoader) throws ClassNotFoundException { Optional<Object> o = getRawValue(key); if (!o.isPresent()) { return (Class<T>) defaultValue; } if (o.get().getClass() == String.class) { return (Class<T>) Class.forName((String) o.get(), true, classLoader); } throw new IllegalArgumentException( "Configuration cannot evaluate object of class " + o.get().getClass() + " as a class name"); }
3.68
framework_NullValidator_getErrorMessage
/** * Gets the error message that is displayed in case the value is invalid. * * @return the Error Message. */ public String getErrorMessage() { return errorMessage; }
3.68
morf_AbstractSqlDialectTest_testAlterColumnFromNotNullableToNotNullable
/** * Test changing a non-nullable column to a non-nullable column (i.e. alter column statement without leaving nullability set to <code>false</code>. */ @Test public void testAlterColumnFromNotNullableToNotNullable() { testAlterTableColumn(TEST_TABLE, AlterationType.ALTER, getColumn(TEST_TABLE, FLOAT_FIELD), column(FLOAT_FIELD, DataType.DECIMAL, 20, 3), expectedAlterTableAlterColumnFromNotNullableToNotNullableStatement()); }
3.68
flink_JoinOperator_projectTuple9
/** * Projects a pair of joined elements to a {@link Tuple} with the previously selected * fields. Requires the classes of the fields of the resulting tuples. * * @return The projected data set. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3, T4, T5, T6, T7, T8> ProjectJoin<I1, I2, Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>> projectTuple9() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo<Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>> tType = new TupleTypeInfo<Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>>(fTypes); return new ProjectJoin<I1, I2, Tuple9<T0, T1, T2, T3, T4, T5, T6, T7, T8>>( this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this); }
3.68
hbase_MetricsTableRequests_updatePut
/** * Update the Put time histogram * @param t time it took */ public void updatePut(long t) { if (isEnableTableLatenciesMetrics()) { putTimeHistogram.update(t); } }
3.68
hbase_ColumnSchemaModel___getCompression
/** Returns the value of the COMPRESSION attribute or its default if unset */ public String __getCompression() { Object o = attrs.get(COMPRESSION); return o != null ? o.toString() : ColumnFamilyDescriptorBuilder.DEFAULT_COMPRESSION.name(); }
3.68
graphhopper_WayToEdgeConverter_getNodes
/** * All the intermediate nodes, i.e. for an edge chain like this: * <pre> * a b c d * 0---1---2---3---4 * </pre> * where 'a' is the from-edge and 'd' is the to-edge this will be [1,2,3] */ public IntArrayList getNodes() { return nodes; }
3.68
open-banking-gateway_EncryptionKeySerde_asString
/** * Convert symmetric key with initialization vector to string. * @param secretKeyWithIv Symmetric Key + IV * @return Serialized key */ @SneakyThrows public String asString(SecretKeyWithIv secretKeyWithIv) { return mapper.writeValueAsString(new SecretKeyWithIvContainer(secretKeyWithIv)); }
3.68
hbase_SnapshotScannerHDFSAclHelper_removeNamespaceAccessAcl
/** * Remove table access acl from namespace dir when delete table * @param tableName the table * @param removeUsers the users whose access acl will be removed * @return false if an error occurred, otherwise true */ public boolean removeNamespaceAccessAcl(TableName tableName, Set<String> removeUsers, String operation) { try { long start = EnvironmentEdgeManager.currentTime(); if (removeUsers.size() > 0) { handleNamespaceAccessAcl(tableName.getNamespaceAsString(), removeUsers, HDFSAclOperation.OperationType.REMOVE); } LOG.info("Remove HDFS acl when {} table {}, cost {} ms", operation, tableName, EnvironmentEdgeManager.currentTime() - start); return true; } catch (Exception e) { LOG.error("Remove HDFS acl error when {} table {}", operation, tableName, e); return false; } }
3.68
querydsl_StringExpressions_lpad
/** * Create a {@code lpad(in, length, c)} expression * * <p>Returns in left-padded to length characters with c</p> * * @param in string to be padded * @param length target length * @param c padding char * @return lpad(in, length, c) */ public static StringExpression lpad(Expression<String> in, int length, char c) { return Expressions.stringOperation(Ops.StringOps.LPAD2, in, ConstantImpl.create(length), ConstantImpl.create(c)); }
3.68
hadoop_ZKClient_getServiceData
/** * get data published by the service at the registration address. * * @param path the path where the service is registered * @return the data of the registered service * @throws IOException if there are I/O errors. * @throws InterruptedException if any thread has interrupted. */ public String getServiceData(String path) throws IOException, InterruptedException { String data; try { Stat stat = new Stat(); byte[] byteData = zkClient.getData(path, false, stat); data = new String(byteData, StandardCharsets.UTF_8); } catch(KeeperException ke) { throw new IOException(ke); } return data; }
3.68
framework_FieldGroup_wrapInTransactionalProperty
/** * Wrap property to transactional property. */ protected <T> Property.Transactional<T> wrapInTransactionalProperty( Property<T> itemProperty) { return new TransactionalPropertyWrapper<T>(itemProperty); }
3.68
hadoop_MawoConfiguration_getZKRetriesNum
/** * Get ZooKeeper retries number. * @return value of ZooKeeper.retries.num */ public int getZKRetriesNum() { return Integer.parseInt(configsMap.get(ZK_RETRIES_NUM)); }
3.68
framework_CalendarComponentEvent_getComponent
/* * (non-Javadoc) * * @see com.vaadin.ui.Component.Event#getComponent() */ @Override public Calendar getComponent() { return (Calendar) super.getComponent(); }
3.68
pulsar_ResourceGroupService_getRgUsageAggregationLatency
// Visibility for testing. protected static Summary.Child.Value getRgUsageAggregationLatency() { return rgUsageAggregationLatency.get(); }
3.68
hbase_MetaTableMetrics_registerAndMarkMeter
// Helper function to register and mark meter if not present private void registerAndMarkMeter(String requestMeter) { if (requestMeter.isEmpty()) { return; } if (!registry.get(requestMeter).isPresent()) { metrics.add(requestMeter); } registry.meter(requestMeter).mark(); }
3.68
hadoop_S3ListResult_objectKeys
/** * Get the list of keys in the list result. * @return a possibly empty list */ private List<String> objectKeys() { return getS3Objects().stream() .map(S3Object::key) .collect(Collectors.toList()); }
3.68
framework_ScrollbarBundle_getScrollPos
/** * Gets the scroll position of the scrollbar in the axis the scrollbar is * representing. * * @return the new scroll position in pixels */ public final double getScrollPos() { int internalScrollPos = internalGetScrollPos(); assert Math.abs(internalScrollPos - toInt32(scrollPos)) <= 1 : "calculated scroll position (" + scrollPos + ") did not match the DOM element scroll position (" + internalScrollPos + ")"; return scrollPos; }
3.68
dubbo_TriHttp2RemoteFlowController_decrementFlowControlWindow
/** * Decrement the per stream and connection flow control window by {@code bytes}. */ private void decrementFlowControlWindow(int bytes) { try { int negativeBytes = -bytes; connectionState.incrementStreamWindow(negativeBytes); incrementStreamWindow(negativeBytes); } catch (Http2Exception e) { // Should never get here since we're decrementing. throw new IllegalStateException("Invalid window state when writing frame: " + e.getMessage(), e); } }
3.68
flink_PythonEnvUtils_startPythonProcess
/** * Starts python process. * * @param pythonEnv the python Environment which will be in a process. * @param commands the commands that python process will execute. * @return the process represent the python process. * @throws IOException Thrown if an error occurred when python process start. */ static Process startPythonProcess( PythonEnvironment pythonEnv, List<String> commands, boolean redirectToPipe) throws IOException { ProcessBuilder pythonProcessBuilder = new ProcessBuilder(); Map<String, String> env = pythonProcessBuilder.environment(); if (pythonEnv.pythonPath != null) { String defaultPythonPath = env.get("PYTHONPATH"); if (Strings.isNullOrEmpty(defaultPythonPath)) { env.put("PYTHONPATH", pythonEnv.pythonPath); } else { env.put( "PYTHONPATH", String.join(File.pathSeparator, pythonEnv.pythonPath, defaultPythonPath)); } } if (pythonEnv.archivesDirectory != null) { pythonProcessBuilder.directory(new File(pythonEnv.archivesDirectory)); } pythonEnv.systemEnv.forEach(env::put); commands.add(0, pythonEnv.pythonExec); pythonProcessBuilder.command(commands); // redirect the stderr to stdout pythonProcessBuilder.redirectErrorStream(true); if (redirectToPipe) { pythonProcessBuilder.redirectOutput(ProcessBuilder.Redirect.PIPE); } else { // set the child process the output same as the parent process. pythonProcessBuilder.redirectOutput(ProcessBuilder.Redirect.INHERIT); } LOG.info( "Starting Python process with environment variables: {{}}, command: {}", env.entrySet().stream() .map(e -> e.getKey() + "=" + e.getValue()) .collect(Collectors.joining(", ")), String.join(" ", commands)); Process process = pythonProcessBuilder.start(); if (!process.isAlive()) { throw new RuntimeException("Failed to start Python process. "); } return process; }
3.68
flink_MemoryManager_getExternalSharedMemoryResource
/** * Acquires a shared resource, identified by a type string. If the resource already exists, this * returns a descriptor to the resource. If the resource does not yet exist, the method * initializes a new resource using the initializer function and given size. * * <p>The resource opaque, meaning the memory manager does not understand its structure. * * <p>The OpaqueMemoryResource object returned from this method must be closed once not used any * further. Once all acquisitions have closed the object, the resource itself is closed. */ public <T extends AutoCloseable> OpaqueMemoryResource<T> getExternalSharedMemoryResource( String type, LongFunctionWithException<T, Exception> initializer, long numBytes) throws Exception { // This object identifies the lease in this request. It is used only to identify the release // operation. // Using the object to represent the lease is a bit nicer safer than just using a reference // counter. final Object leaseHolder = new Object(); final SharedResources.ResourceAndSize<T> resource = sharedResources.getOrAllocateSharedResource( type, leaseHolder, initializer, numBytes); final ThrowingRunnable<Exception> disposer = () -> sharedResources.release(type, leaseHolder); return new OpaqueMemoryResource<>(resource.resourceHandle(), resource.size(), disposer); }
3.68
hbase_DefaultOperationQuota_updateEstimateConsumeQuota
/** * Update estimate quota(read/write size/capacityUnits) which will be consumed * @param numWrites the number of write requests * @param numReads the number of read requests * @param numScans the number of scan requests */ protected void updateEstimateConsumeQuota(int numWrites, int numReads, int numScans) { writeConsumed = estimateConsume(OperationType.MUTATE, numWrites, 100); readConsumed = estimateConsume(OperationType.GET, numReads, 100); readConsumed += estimateConsume(OperationType.SCAN, numScans, 1000); writeCapacityUnitConsumed = calculateWriteCapacityUnit(writeConsumed); readCapacityUnitConsumed = calculateReadCapacityUnit(readConsumed); }
3.68
flink_TaskManagerServices_fromConfiguration
/** * Creates and returns the task manager services. * * @param taskManagerServicesConfiguration task manager configuration * @param permanentBlobService permanentBlobService used by the services * @param taskManagerMetricGroup metric group of the task manager * @param ioExecutor executor for async IO operations * @param scheduledExecutor scheduled executor in rpc service * @param fatalErrorHandler to handle class loading OOMs * @param workingDirectory the working directory of the process * @return task manager components * @throws Exception */ public static TaskManagerServices fromConfiguration( TaskManagerServicesConfiguration taskManagerServicesConfiguration, PermanentBlobService permanentBlobService, MetricGroup taskManagerMetricGroup, ExecutorService ioExecutor, ScheduledExecutor scheduledExecutor, FatalErrorHandler fatalErrorHandler, WorkingDirectory workingDirectory) throws Exception { // pre-start checks checkTempDirs(taskManagerServicesConfiguration.getTmpDirPaths()); final TaskEventDispatcher taskEventDispatcher = new TaskEventDispatcher(); // start the I/O manager, it will create some temp directories. final IOManager ioManager = new IOManagerAsync(taskManagerServicesConfiguration.getTmpDirPaths()); final ShuffleEnvironment<?, ?> shuffleEnvironment = createShuffleEnvironment( taskManagerServicesConfiguration, taskEventDispatcher, taskManagerMetricGroup, ioExecutor, scheduledExecutor); final int listeningDataPort = shuffleEnvironment.start(); LOG.info( "TaskManager data connection initialized successfully; listening internally on port: {}", listeningDataPort); final KvStateService kvStateService = KvStateService.fromConfiguration(taskManagerServicesConfiguration); kvStateService.start(); final UnresolvedTaskManagerLocation unresolvedTaskManagerLocation = new UnresolvedTaskManagerLocation( taskManagerServicesConfiguration.getResourceID(), taskManagerServicesConfiguration.getExternalAddress(), // we expose the task manager location with the listening port // iff the external data port is not explicitly defined taskManagerServicesConfiguration.getExternalDataPort() > 0 ? taskManagerServicesConfiguration.getExternalDataPort() : listeningDataPort, taskManagerServicesConfiguration.getNodeId()); final BroadcastVariableManager broadcastVariableManager = new BroadcastVariableManager(); final TaskSlotTable<Task> taskSlotTable = createTaskSlotTable( taskManagerServicesConfiguration.getNumberOfSlots(), taskManagerServicesConfiguration.getTaskExecutorResourceSpec(), taskManagerServicesConfiguration.getTimerServiceShutdownTimeout(), taskManagerServicesConfiguration.getPageSize(), ioExecutor); final JobTable jobTable = DefaultJobTable.create(); final JobLeaderService jobLeaderService = new DefaultJobLeaderService( unresolvedTaskManagerLocation, taskManagerServicesConfiguration.getRetryingRegistrationConfiguration()); final TaskExecutorLocalStateStoresManager taskStateManager = new TaskExecutorLocalStateStoresManager( taskManagerServicesConfiguration.isLocalRecoveryEnabled(), taskManagerServicesConfiguration.getLocalRecoveryStateDirectories(), ioExecutor); final TaskExecutorStateChangelogStoragesManager changelogStoragesManager = new TaskExecutorStateChangelogStoragesManager(); final TaskExecutorChannelStateExecutorFactoryManager channelStateExecutorFactoryManager = new TaskExecutorChannelStateExecutorFactoryManager(); final TaskExecutorFileMergingManager fileMergingManager = new TaskExecutorFileMergingManager(); final boolean failOnJvmMetaspaceOomError = taskManagerServicesConfiguration .getConfiguration() .getBoolean(CoreOptions.FAIL_ON_USER_CLASS_LOADING_METASPACE_OOM); final boolean checkClassLoaderLeak = taskManagerServicesConfiguration .getConfiguration() .getBoolean(CoreOptions.CHECK_LEAKED_CLASSLOADER); final LibraryCacheManager libraryCacheManager = new BlobLibraryCacheManager( permanentBlobService, BlobLibraryCacheManager.defaultClassLoaderFactory( taskManagerServicesConfiguration.getClassLoaderResolveOrder(), taskManagerServicesConfiguration .getAlwaysParentFirstLoaderPatterns(), failOnJvmMetaspaceOomError ? fatalErrorHandler : null, checkClassLoaderLeak), false); final SlotAllocationSnapshotPersistenceService slotAllocationSnapshotPersistenceService; if (taskManagerServicesConfiguration.isLocalRecoveryEnabled()) { slotAllocationSnapshotPersistenceService = new FileSlotAllocationSnapshotPersistenceService( workingDirectory.getSlotAllocationSnapshotDirectory()); } else { slotAllocationSnapshotPersistenceService = NoOpSlotAllocationSnapshotPersistenceService.INSTANCE; } final GroupCache<JobID, PermanentBlobKey, JobInformation> jobInformationCache = new DefaultGroupCache.Factory<JobID, PermanentBlobKey, JobInformation>().create(); final GroupCache<JobID, PermanentBlobKey, TaskInformation> taskInformationCache = new DefaultGroupCache.Factory<JobID, PermanentBlobKey, TaskInformation>().create(); final GroupCache<JobID, PermanentBlobKey, ShuffleDescriptorGroup> shuffleDescriptorsCache = new DefaultGroupCache.Factory<JobID, PermanentBlobKey, ShuffleDescriptorGroup>() .create(); return new TaskManagerServices( unresolvedTaskManagerLocation, taskManagerServicesConfiguration.getManagedMemorySize().getBytes(), ioManager, shuffleEnvironment, kvStateService, broadcastVariableManager, taskSlotTable, jobTable, jobLeaderService, taskStateManager, fileMergingManager, changelogStoragesManager, channelStateExecutorFactoryManager, taskEventDispatcher, ioExecutor, libraryCacheManager, slotAllocationSnapshotPersistenceService, new SharedResources(), jobInformationCache, taskInformationCache, shuffleDescriptorsCache); }
3.68
dubbo_ServiceAnnotationResolver_resolveInterfaceClassName
/** * Resolve the class name of interface * * @return if not found, return <code>null</code> */ public String resolveInterfaceClassName() { Class interfaceClass; // first, try to get the value from "interfaceName" attribute String interfaceName = resolveAttribute("interfaceName"); if (isEmpty(interfaceName)) { // If not found, try "interfaceClass" interfaceClass = resolveAttribute("interfaceClass"); } else { interfaceClass = resolveClass(interfaceName, getClass().getClassLoader()); } if (isGenericClass(interfaceClass)) { interfaceName = interfaceClass.getName(); } else { interfaceName = null; } if (isEmpty(interfaceName)) { // If not fund, try to get the first interface from the service type Class[] interfaces = serviceType.getInterfaces(); if (isNotEmpty(interfaces)) { interfaceName = interfaces[0].getName(); } } return interfaceName; }
3.68
hadoop_CommitContext_getInnerSubmitter
/** * Return a submitter. As this pool is used less often, * create it on demand. * If created with 0 threads, this returns null so * TaskPool knows to run it in the current thread. * @return a submitter or null */ public synchronized TaskPool.Submitter getInnerSubmitter() { if (innerSubmitter == null && committerThreads > 0) { innerSubmitter = new PoolSubmitter(buildThreadPool(committerThreads)); } return innerSubmitter; }
3.68
framework_VComboBox_getDataReceivedHandler
/** * Returns a handler receiving notifications from the connector about * communications. * * @return the dataReceivedHandler */ public DataReceivedHandler getDataReceivedHandler() { return dataReceivedHandler; }
3.68
querydsl_CurveExpression_isClosed
/** * Returns 1 (TRUE) if this Curve is closed [StartPoint ( ) = EndPoint ( )]. * * @return closed */ public BooleanExpression isClosed() { if (closed == null) { closed = Expressions.booleanOperation(SpatialOps.IS_CLOSED, mixin); } return closed; }
3.68
hbase_HRegionLocation_getHostnamePort
/** * Returns String made of hostname and port formatted as per * {@link Addressing#createHostAndPortStr(String, int)} */ public String getHostnamePort() { return Addressing.createHostAndPortStr(this.getHostname(), this.getPort()); }
3.68
hbase_BulkLoadHFilesTool_createTable
/** * If the table is created for the first time, then "completebulkload" reads the files twice. More * modifications necessary if we want to avoid doing it. */ private void createTable(TableName tableName, Path hfofDir, AsyncAdmin admin) throws IOException { final FileSystem fs = hfofDir.getFileSystem(getConf()); // Add column families // Build a set of keys List<ColumnFamilyDescriptorBuilder> familyBuilders = new ArrayList<>(); SortedMap<byte[], Integer> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); visitBulkHFiles(fs, hfofDir, new BulkHFileVisitor<ColumnFamilyDescriptorBuilder>() { @Override public ColumnFamilyDescriptorBuilder bulkFamily(byte[] familyName) { ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder.newBuilder(familyName); familyBuilders.add(builder); return builder; } @Override public void bulkHFile(ColumnFamilyDescriptorBuilder builder, FileStatus hfileStatus) throws IOException { Path hfile = hfileStatus.getPath(); try (HFile.Reader reader = HFile.createReader(fs, hfile, CacheConfig.DISABLED, true, getConf())) { if (builder.getCompressionType() != reader.getFileContext().getCompression()) { builder.setCompressionType(reader.getFileContext().getCompression()); LOG.info("Setting compression " + reader.getFileContext().getCompression().name() + " for family " + builder.getNameAsString()); } byte[] first = reader.getFirstRowKey().get(); byte[] last = reader.getLastRowKey().get(); LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); // To eventually infer start key-end key boundaries Integer value = map.getOrDefault(first, 0); map.put(first, value + 1); value = map.containsKey(last) ? map.get(last) : 0; map.put(last, value - 1); } } }, true); byte[][] keys = inferBoundaries(map); TableDescriptorBuilder tdBuilder = TableDescriptorBuilder.newBuilder(tableName); familyBuilders.stream().map(ColumnFamilyDescriptorBuilder::build) .forEachOrdered(tdBuilder::setColumnFamily); FutureUtils.get(admin.createTable(tdBuilder.build(), keys)); LOG.info("Table " + tableName + " is available!!"); }
3.68
morf_ChangeColumn_apply
/** * @see org.alfasoftware.morf.upgrade.SchemaChange#apply(org.alfasoftware.morf.metadata.Schema) */ @Override public Schema apply(Schema schema) { return applyChange(schema, fromColumn, toColumn); }
3.68
AreaShop_SignsFeature_locationToString
/** * Convert a location to a string to use as map key. * @param location The location to get the key for * @return A string to use in a map for a location */ public static String locationToString(Location location) { return location.getWorld().getName() + ";" + location.getBlockX() + ";" + location.getBlockY() + ";" + location.getBlockZ(); }
3.68
flink_UnsortedGrouping_reduce
/** * Applies a Reduce transformation on a grouped {@link DataSet}. * * <p>For each group, the transformation consecutively calls a {@link * org.apache.flink.api.common.functions.RichReduceFunction} until only a single element for * each group remains. A ReduceFunction combines two elements into one new element of the same * type. * * @param reducer The ReduceFunction that is applied on each group of the DataSet. * @return A ReduceOperator that represents the reduced DataSet. * @see org.apache.flink.api.common.functions.RichReduceFunction * @see ReduceOperator * @see DataSet */ public ReduceOperator<T> reduce(ReduceFunction<T> reducer) { if (reducer == null) { throw new NullPointerException("Reduce function must not be null."); } return new ReduceOperator<T>( this, inputDataSet.clean(reducer), Utils.getCallLocationName()); }
3.68
pulsar_ResourceUnitRanking_addPreAllocatedServiceUnit
/** * Pre-allocate a ServiceUnit to this ResourceUnit. */ public void addPreAllocatedServiceUnit(String suName, ResourceQuota quota) { this.preAllocatedBundles.add(suName); this.preAllocatedQuota.add(quota); estimateLoadPercentage(); }
3.68
framework_ReverseConverter_getModelType
/* * (non-Javadoc) * * @see com.vaadin.data.util.converter.Converter#getSourceType() */ @Override public Class<MODEL> getModelType() { return realConverter.getPresentationType(); }
3.68
hadoop_HeaderProcessing_retrieveHeaders
/** * Query the store, get all the headers into a map. Each Header * has the "header." prefix. * Caller must have read access. * The value of each header is the string value of the object * UTF-8 encoded. * @param path path of object. * @param statistic statistic to use for duration tracking. * @return the headers * @throws IOException failure, including file not found. */ private Map<String, byte[]> retrieveHeaders( final Path path, final Statistic statistic) throws IOException { StoreContext context = getStoreContext(); String objectKey = context.pathToKey(path); String symbol = statistic.getSymbol(); S3AStatisticsContext instrumentation = context.getInstrumentation(); Map<String, byte[]> headers = new TreeMap<>(); HeadObjectResponse md; // Attempting to get metadata for the root, so use head bucket. if (objectKey.isEmpty()) { HeadBucketResponse headBucketResponse = trackDuration(instrumentation, symbol, () -> callbacks.getBucketMetadata()); if (headBucketResponse.sdkHttpResponse() != null && headBucketResponse.sdkHttpResponse().headers() != null && headBucketResponse.sdkHttpResponse().headers().get(AWSHeaders.CONTENT_TYPE) != null) { maybeSetHeader(headers, XA_CONTENT_TYPE, headBucketResponse.sdkHttpResponse().headers().get(AWSHeaders.CONTENT_TYPE).get(0)); } maybeSetHeader(headers, XA_CONTENT_LENGTH, 0); return headers; } try { md = trackDuration(instrumentation, symbol, () -> callbacks.getObjectMetadata(objectKey)); } catch (FileNotFoundException e) { // no entry. It could be a directory, so try again. md = trackDuration(instrumentation, symbol, () -> callbacks.getObjectMetadata(objectKey + "/")); } // all user metadata Map<String, String> rawHeaders = md.metadata(); rawHeaders.forEach((key, value) -> headers.put(XA_HEADER_PREFIX + key, encodeBytes(value))); // and add the usual content length &c, if set maybeSetHeader(headers, XA_CACHE_CONTROL, md.cacheControl()); maybeSetHeader(headers, XA_CONTENT_DISPOSITION, md.contentDisposition()); maybeSetHeader(headers, XA_CONTENT_ENCODING, md.contentEncoding()); maybeSetHeader(headers, XA_CONTENT_LANGUAGE, md.contentLanguage()); // If CSE is enabled, use the unencrypted content length. // TODO: CSE is not supported yet, add these headers in during CSE work. // if (md.getUserMetaDataOf(Headers.CRYPTO_CEK_ALGORITHM) != null // && md.getUserMetaDataOf(Headers.UNENCRYPTED_CONTENT_LENGTH) != null) { // maybeSetHeader(headers, XA_CONTENT_LENGTH, // md.getUserMetaDataOf(Headers.UNENCRYPTED_CONTENT_LENGTH)); // } else { // maybeSetHeader(headers, XA_CONTENT_LENGTH, // md.contentLength()); // } // maybeSetHeader(headers, XA_CONTENT_MD5, // md.getContentMD5()); // TODO: Add back in else block during CSE work. maybeSetHeader(headers, XA_CONTENT_LENGTH, md.contentLength()); if (md.sdkHttpResponse() != null && md.sdkHttpResponse().headers() != null && md.sdkHttpResponse().headers().get("Content-Range") != null) { maybeSetHeader(headers, XA_CONTENT_RANGE, md.sdkHttpResponse().headers().get("Content-Range").get(0)); } maybeSetHeader(headers, XA_CONTENT_TYPE, md.contentType()); maybeSetHeader(headers, XA_ETAG, md.eTag()); maybeSetHeader(headers, XA_LAST_MODIFIED, Date.from(md.lastModified())); // AWS custom headers maybeSetHeader(headers, XA_ARCHIVE_STATUS, md.archiveStatus()); maybeSetHeader(headers, XA_OBJECT_LOCK_LEGAL_HOLD_STATUS, md.objectLockLegalHoldStatus()); maybeSetHeader(headers, XA_OBJECT_LOCK_MODE, md.objectLockMode()); maybeSetHeader(headers, XA_OBJECT_LOCK_RETAIN_UNTIL_DATE, md.objectLockRetainUntilDate()); maybeSetHeader(headers, XA_OBJECT_REPLICATION_STATUS, md.replicationStatus()); maybeSetHeader(headers, XA_S3_VERSION_ID, md.versionId()); maybeSetHeader(headers, XA_SERVER_SIDE_ENCRYPTION, md.serverSideEncryptionAsString()); maybeSetHeader(headers, XA_STORAGE_CLASS, md.storageClassAsString()); return headers; }
3.68
flink_StreamConfig_triggerSerializationAndReturnFuture
/** Trigger the object config serialization and return the completable future. */ public CompletableFuture<StreamConfig> triggerSerializationAndReturnFuture( Executor ioExecutor) { FutureUtils.combineAll(chainedTaskFutures.values()) .thenAcceptAsync( chainedConfigs -> { try { // Serialize all the objects to config. serializeAllConfigs(); InstantiationUtil.writeObjectToConfig( chainedConfigs.stream() .collect( Collectors.toMap( StreamConfig::getVertexID, Function.identity())), this.config, CHAINED_TASK_CONFIG); serializationFuture.complete(this); } catch (Throwable throwable) { serializationFuture.completeExceptionally(throwable); } }, ioExecutor); return serializationFuture; }
3.68
framework_ContainerOrderedWrapper_removeItemSetChangeListener
/* * Removes a Item set change listener from the object. Don't add a JavaDoc * comment here, we use the default documentation from implemented * interface. */ @Override public void removeItemSetChangeListener( Container.ItemSetChangeListener listener) { if (container instanceof Container.ItemSetChangeNotifier) { ((Container.ItemSetChangeNotifier) container) .removeItemSetChangeListener( new PiggybackListener(listener)); } }
3.68
flink_TableConfigUtils_getMaxIdleStateRetentionTime
/** * Similar to {@link TableConfig#getMaxIdleStateRetentionTime()}. * * @see TableConfig#getMaxIdleStateRetentionTime() */ @Deprecated public static long getMaxIdleStateRetentionTime(ReadableConfig tableConfig) { return tableConfig.get(ExecutionConfigOptions.IDLE_STATE_RETENTION).toMillis() * 3 / 2; }
3.68
framework_AbstractComponent_getLocale
/* * Don't add a JavaDoc comment here, we use the default documentation from * implemented interface. */ @Override public Locale getLocale() { if (locale != null) { return locale; } HasComponents parent = getParent(); if (parent != null) { return parent.getLocale(); } final VaadinSession session = getSession(); if (session != null) { return session.getLocale(); } return null; }
3.68
flink_AvroParquetReaders_forReflectRecord
/** * Creates a new {@link AvroParquetRecordFormat} that reads the parquet file into Avro records * via reflection. * * <p>To read into Avro {@link GenericRecord GenericRecords}, use the {@link * #forGenericRecord(Schema)} method. * * <p>To read into Avro {@link org.apache.avro.specific.SpecificRecord SpecificRecords}, use the * {@link #forSpecificRecord(Class)} method. * * @see #forGenericRecord(Schema) * @see #forSpecificRecord(Class) */ public static <T> StreamFormat<T> forReflectRecord(final Class<T> typeClass) { if (SpecificRecordBase.class.isAssignableFrom(typeClass)) { throw new IllegalArgumentException( "Please use AvroParquetReaders.forSpecificRecord(Class<T>) for SpecificRecord."); } else if (GenericRecord.class.isAssignableFrom(typeClass)) { throw new IllegalArgumentException( "Please use AvroParquetReaders.forGenericRecord(Class<T>) for GenericRecord." + "Cannot read and create Avro GenericRecord without specifying the Avro Schema. " + "This is because Flink needs to be able serialize the results in its data flow, which is" + "very inefficient without the schema. And while the Schema is stored in the Avro file header," + "Flink needs this schema during 'pre-flight' time when the data flow is set up and wired," + "which is before there is access to the files"); } // this is a PoJo that Avo will reader via reflect de-serialization // for Flink, this is just a plain PoJo type return new AvroParquetRecordFormat<>( TypeExtractor.createTypeInfo(typeClass), () -> ReflectData.get()); }
3.68
hadoop_RegexMountPointInterceptorFactory_create
/** * interceptorSettingsString string should be like ${type}:${string}, * e.g. replaceresolveddstpath:word1,word2. * * @param interceptorSettingsString * @return Return interceptor based on setting or null on bad/unknown config. */ public static RegexMountPointInterceptor create( String interceptorSettingsString) { int typeTagIndex = interceptorSettingsString .indexOf(RegexMountPoint.INTERCEPTOR_INTERNAL_SEP); if (typeTagIndex == -1 || (typeTagIndex == ( interceptorSettingsString.length() - 1))) { return null; } String typeTag = interceptorSettingsString.substring(0, typeTagIndex).trim() .toLowerCase(); RegexMountPointInterceptorType interceptorType = RegexMountPointInterceptorType.get(typeTag); if (interceptorType == null) { return null; } switch (interceptorType) { case REPLACE_RESOLVED_DST_PATH: RegexMountPointInterceptor interceptor = RegexMountPointResolvedDstPathReplaceInterceptor .deserializeFromString(interceptorSettingsString); return interceptor; default: // impossible now return null; } }
3.68
querydsl_PathMetadataFactory_forDelegate
/** * Create a new PathMetadata instance for delegate access * * @param delegate delegate path * @return wrapped path */ public static <T> PathMetadata forDelegate(Path<T> delegate) { return new PathMetadata(delegate, delegate, PathType.DELEGATE); }
3.68
framework_VColorPickerArea_getColor
/** * Gets the color. * * @since 8.4 * @return the color */ public String getColor() { return color; }
3.68
framework_VScrollTable_fireHeaderClickedEvent
/** * Fires a header click event after the user has clicked a column header * cell * * @param event * The click event */ private void fireHeaderClickedEvent(Event event) { if (client.hasEventListeners(VScrollTable.this, TableConstants.HEADER_CLICK_EVENT_ID)) { MouseEventDetails details = MouseEventDetailsBuilder .buildMouseEventDetails(event); client.updateVariable(paintableId, "headerClickEvent", details.toString(), false); client.updateVariable(paintableId, "headerClickCID", cid, true); } }
3.68
flink_JobVertexInputInfo_getExecutionVertexInputInfos
/** The input information of subtasks of this job vertex. */ public List<ExecutionVertexInputInfo> getExecutionVertexInputInfos() { return executionVertexInputInfos; }
3.68
framework_VTabsheet_renderContent
/** * Renders the widget content for a tab sheet. * * @param newWidget * the content widget or {@code null} if there is none */ public void renderContent(Widget newWidget) { assert tabPanel.getWidgetCount() <= 1; if (null == newWidget) { newWidget = new SimplePanel(); } if (tabPanel.getWidgetCount() == 0) { tabPanel.add(newWidget); } else if (tabPanel.getWidget(0) != newWidget) { tabPanel.remove(0); tabPanel.add(newWidget); } assert tabPanel.getWidgetCount() <= 1; // There's never any other index than 0, but maintaining API for now tabPanel.showWidget(0); iLayout(); updateOpenTabSize(); removeStyleDependentName("loading"); }
3.68
hadoop_FutureIOSupport_awaitFuture
/** * Given a future, evaluate it. Raised exceptions are * extracted and handled. * See {@link FutureIO#awaitFuture(Future, long, TimeUnit)}. * @param future future to evaluate * @param <T> type of the result. * @param timeout timeout. * @param unit unit. * @return the result, if all went well. * @throws InterruptedIOException future was interrupted * @throws IOException if something went wrong * @throws RuntimeException any nested RTE thrown * @throws TimeoutException the future timed out. */ @Deprecated public static <T> T awaitFuture(final Future<T> future, final long timeout, final TimeUnit unit) throws InterruptedIOException, IOException, RuntimeException, TimeoutException { return FutureIO.awaitFuture(future, timeout, unit); }
3.68
hbase_Segment_heapSizeChange
/** * @return The increase in heap size because of this cell addition. This includes this cell POJO's * heap size itself and additional overhead because of addition on to CSLM. */ protected long heapSizeChange(Cell cell, boolean allocated) { long res = 0; if (allocated) { boolean onHeap = true; MemStoreLAB memStoreLAB = getMemStoreLAB(); if (memStoreLAB != null) { onHeap = memStoreLAB.isOnHeap(); } res += indexEntryOnHeapSize(onHeap); if (onHeap) { res += cell.heapSize(); } res = ClassSize.align(res); } return res; }
3.68
flink_RestServerEndpointConfiguration_getRestBindPortRange
/** * Returns the port range that the REST server endpoint should listen on. * * @return port range that the REST server endpoint should listen on */ public String getRestBindPortRange() { return restBindPortRange; }
3.68
flink_StatsSummary_getAverage
/** * Calculates the average over all seen values. * * @return Average over all seen values. */ public long getAverage() { if (count == 0) { return 0; } else { return sum / count; } }
3.68
morf_FieldFromSelectFirst_toString
/** * @see java.lang.Object#toString() */ @Override public String toString() { return selectFirstStatement.toString() + super.toString(); }
3.68
hadoop_CounterGroupFactory_isFrameworkGroup
/** * Check whether a group name is a name of a framework group (including * the filesystem group). * * @param name to check * @return true for framework group names */ public static synchronized boolean isFrameworkGroup(String name) { return s2i.get(name) != null || name.equals(FS_GROUP_NAME); }
3.68
rocketmq-connect_JsonConverter_asJsonSchema
/** * convert ConnectRecord schema to json schema * * @param schema * @return */ public JSONObject asJsonSchema(Schema schema) { if (schema == null) { return null; } // from cached JSONObject cached = fromConnectSchemaCache.get(schema); if (cached != null) { return cached.clone(); } JSONObject jsonSchema; // convert field type name switch (schema.getFieldType()) { case BOOLEAN: jsonSchema = JsonSchema.BOOLEAN_SCHEMA(); break; case BYTES: jsonSchema = JsonSchema.BYTES_SCHEMA(); break; case FLOAT64: jsonSchema = JsonSchema.DOUBLE_SCHEMA(); break; case FLOAT32: jsonSchema = JsonSchema.FLOAT_SCHEMA(); break; case INT8: jsonSchema = JsonSchema.INT8_SCHEMA(); break; case INT16: jsonSchema = JsonSchema.INT16_SCHEMA(); break; case INT32: jsonSchema = JsonSchema.INT32_SCHEMA(); break; case INT64: jsonSchema = JsonSchema.INT64_SCHEMA(); break; case STRING: jsonSchema = JsonSchema.STRING_SCHEMA(); break; case ARRAY: jsonSchema = new JSONObject(); jsonSchema.put(JsonSchema.SCHEMA_TYPE_FIELD_NAME, JsonSchema.ARRAY_TYPE_NAME); jsonSchema.put(JsonSchema.ARRAY_ITEMS_FIELD_NAME, asJsonSchema(schema.getValueSchema())); break; case MAP: jsonSchema = new JSONObject(); jsonSchema.put(JsonSchema.SCHEMA_TYPE_FIELD_NAME, JsonSchema.MAP_TYPE_NAME); jsonSchema.put(JsonSchema.MAP_KEY_FIELD_NAME, asJsonSchema(schema.getKeySchema())); jsonSchema.put(JsonSchema.MAP_VALUE_FIELD_NAME, asJsonSchema(schema.getValueSchema())); break; case STRUCT: jsonSchema = new JSONObject(new ConcurrentHashMap<>()); jsonSchema.put(JsonSchema.SCHEMA_TYPE_FIELD_NAME, JsonSchema.STRUCT_TYPE_NAME); // field list JSONArray fields = new JSONArray(); for (Field field : schema.getFields()) { String fieldName = field.getName(); JSONObject fieldJsonSchema = asJsonSchema(field.getSchema()); fieldJsonSchema.put(JsonSchema.STRUCT_FIELD_NAME_FIELD_NAME, fieldName); fields.add(fieldJsonSchema); } jsonSchema.put(JsonSchema.STRUCT_FIELDS_FIELD_NAME, fields); break; default: throw new ConnectException("Couldn't translate unsupported schema type " + schema + "."); } // optional jsonSchema.put(JsonSchema.SCHEMA_OPTIONAL_FIELD_NAME, schema.isOptional()); // name if (schema.getName() != null) { jsonSchema.put(JsonSchema.SCHEMA_NAME_FIELD_NAME, schema.getName()); } // version if (schema.getVersion() != null) { jsonSchema.put(JsonSchema.SCHEMA_VERSION_FIELD_NAME, schema.getVersion()); } // doc if (schema.getDoc() != null) { jsonSchema.put(JsonSchema.SCHEMA_DOC_FIELD_NAME, schema.getDoc()); } // parameters if (schema.getParameters() != null) { JSONObject jsonSchemaParams = new JSONObject(); for (Map.Entry<String, String> prop : schema.getParameters().entrySet()) { jsonSchemaParams.put(prop.getKey(), prop.getValue()); } jsonSchema.put(JsonSchema.SCHEMA_PARAMETERS_FIELD_NAME, jsonSchemaParams); } // default value if (schema.getDefaultValue() != null) { jsonSchema.put(JsonSchema.SCHEMA_DEFAULT_FIELD_NAME, convertToJson(schema, schema.getDefaultValue())); } // add cache fromConnectSchemaCache.put(schema, jsonSchema); return jsonSchema; }
3.68
hudi_HoodieGlobalSimpleIndex_getAllBaseFilesInTable
/** * Load all files for all partitions as <Partition, filename> pair data. */ private List<Pair<String, HoodieBaseFile>> getAllBaseFilesInTable( final HoodieEngineContext context, final HoodieTable hoodieTable) { HoodieTableMetaClient metaClient = hoodieTable.getMetaClient(); List<String> allPartitionPaths = FSUtils.getAllPartitionPaths(context, config.getMetadataConfig(), metaClient.getBasePath()); // Obtain the latest data files from all the partitions. return getLatestBaseFilesForAllPartitions(allPartitionPaths, context, hoodieTable); }
3.68
hadoop_AbfsRestOperation_incrementCounter
/** * Incrementing Abfs counters with a long value. * * @param statistic the Abfs statistic that needs to be incremented. * @param value the value to be incremented by. */ private void incrementCounter(AbfsStatistic statistic, long value) { if (abfsCounters != null) { abfsCounters.incrementCounter(statistic, value); } }
3.68
hbase_TableRecordReader_close
/** * Closes the split. * @see org.apache.hadoop.mapreduce.RecordReader#close() */ @Override public void close() { this.recordReaderImpl.close(); }
3.68
hadoop_RMDelegatedNodeLabelsUpdater_serviceStop
/** * Terminate the timer. * * @throws Exception exception occurs. */ @Override protected void serviceStop() throws Exception { if (nodeLabelsScheduler != null) { nodeLabelsScheduler.cancel(); } super.serviceStop(); }
3.68
hbase_LocalHBaseCluster_getLiveRegionServers
/** * @return List of running servers (Some servers may have been killed or aborted during lifetime * of cluster; these servers are not included in this list). */ public List<JVMClusterUtil.RegionServerThread> getLiveRegionServers() { List<JVMClusterUtil.RegionServerThread> liveServers = new ArrayList<>(); List<RegionServerThread> list = getRegionServers(); for (JVMClusterUtil.RegionServerThread rst : list) { if (rst.isAlive()) liveServers.add(rst); else LOG.info("Not alive " + rst.getName()); } return liveServers; }
3.68
hbase_SegmentScanner_shouldUseScanner
/** * This functionality should be resolved in the higher level which is MemStoreScanner, currently * returns true as default. Doesn't throw IllegalStateException in order not to change the * signature of the overridden method */ @Override public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) { return getSegment().shouldSeek(scan.getColumnFamilyTimeRange().getOrDefault( store.getColumnFamilyDescriptor().getName(), scan.getTimeRange()), oldestUnexpiredTS); }
3.68
flink_FlinkRelUtil_mergeable
/** The internal reusable method for filter, project nd calc. */ private static boolean mergeable( int[] topInputRefCounter, List<RexNode> topProjects, List<RexNode> bottomProjects) { RexUtil.apply(new InputRefCounter(true, topInputRefCounter), topProjects, null); boolean mergeable = true; for (int idx = 0; idx < bottomProjects.size(); idx++) { RexNode node = bottomProjects.get(idx); if (!RexUtil.isDeterministic(node)) { assert idx < topInputRefCounter.length; if (topInputRefCounter[idx] > 1) { mergeable = false; break; } } } return mergeable; }
3.68
flink_ExternalSerializer_of
/** * Creates an instance of a {@link ExternalSerializer} defined by the given {@link DataType}. */ public static <I, E> ExternalSerializer<I, E> of(DataType dataType, boolean isInternalInput) { return new ExternalSerializer<>( dataType, InternalSerializers.create(dataType.getLogicalType()), isInternalInput); }
3.68
pulsar_ManagedLedgerConfig_setMaxUnackedRangesToPersist
/** * @param maxUnackedRangesToPersist * max unacked message ranges that will be persisted and receverd. */ public ManagedLedgerConfig setMaxUnackedRangesToPersist(int maxUnackedRangesToPersist) { this.maxUnackedRangesToPersist = maxUnackedRangesToPersist; return this; }
3.68
framework_AbstractMultiSelectConnector_getCaption
/** * Returns the caption for the given item. * * @param item * the item, not {@code null} * @return caption of the item */ static String getCaption(JsonObject item) { return item.getString(ListingJsonConstants.JSONKEY_ITEM_VALUE); }
3.68
hbase_StoreFileWriter_withFavoredNodes
/** * @param favoredNodes an array of favored nodes or possibly null * @return this (for chained invocation) */ public Builder withFavoredNodes(InetSocketAddress[] favoredNodes) { this.favoredNodes = favoredNodes; return this; }
3.68
flink_Tuple12_toString
/** * Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8, * f9, f10, f11), where the individual fields are the value returned by calling {@link * Object#toString} on that field. * * @return The string representation of the tuple. */ @Override public String toString() { return "(" + StringUtils.arrayAwareToString(this.f0) + "," + StringUtils.arrayAwareToString(this.f1) + "," + StringUtils.arrayAwareToString(this.f2) + "," + StringUtils.arrayAwareToString(this.f3) + "," + StringUtils.arrayAwareToString(this.f4) + "," + StringUtils.arrayAwareToString(this.f5) + "," + StringUtils.arrayAwareToString(this.f6) + "," + StringUtils.arrayAwareToString(this.f7) + "," + StringUtils.arrayAwareToString(this.f8) + "," + StringUtils.arrayAwareToString(this.f9) + "," + StringUtils.arrayAwareToString(this.f10) + "," + StringUtils.arrayAwareToString(this.f11) + ")"; }
3.68
hbase_UserProvider_login
/** * Login with given keytab and principal. This can be used for both SPN(Service Principal Name) * and UPN(User Principal Name) which format should be clientname@REALM. * @param fileConfKey config name for client keytab * @param principalConfKey config name for client principal * @throws IOException underlying exception from UserGroupInformation.loginUserFromKeytab */ public void login(String fileConfKey, String principalConfKey) throws IOException { User.login(getConf().get(fileConfKey), getConf().get(principalConfKey)); }
3.68
hbase_FsDelegationToken_acquireDelegationToken
/** * Acquire the delegation token for the specified filesystem and token kind. Before requesting a * new delegation token, tries to find one already available. * @param tokenKind non-null token kind to get delegation token from the {@link UserProvider} * @param fs the filesystem that requires the delegation token * @throws IOException on fs.getDelegationToken() failure */ public void acquireDelegationToken(final String tokenKind, final FileSystem fs) throws IOException { Objects.requireNonNull(tokenKind, "tokenKind:null"); if (userProvider.isHadoopSecurityEnabled()) { this.fs = fs; userToken = userProvider.getCurrent().getToken(tokenKind, fs.getCanonicalServiceName()); if (userToken == null) { hasForwardedToken = false; userToken = fs.getDelegationToken(renewer); } else { hasForwardedToken = true; LOG.info("Use the existing token: " + userToken); } } }
3.68
hadoop_NamenodeStatusReport_getTotalSpace
/** * Get the total space. * * @return The total space. */ public long getTotalSpace() { return this.totalSpace; }
3.68
flink_LogicalTypeChecks_getPrecision
/** Returns the precision of all types that define a precision implicitly or explicitly. */ public static int getPrecision(LogicalType logicalType) { return logicalType.accept(PRECISION_EXTRACTOR); }
3.68
flink_ExecNodeContext_getTypeAsString
/** * Returns the {@link #name} and {@link #version}, to be serialized into the JSON plan as one * string, which in turn will be parsed by {@link ExecNodeContext#ExecNodeContext(String)} when * deserialized from a JSON plan or when needed by {@link * ExecNodeTypeIdResolver#typeFromId(DatabindContext, String)}. */ @JsonValue public String getTypeAsString() { if (name == null || version == null) { throw new TableException( String.format( "Can not serialize ExecNode with id: %d. Missing type, this is a bug," + " please file a ticket.", getId())); } return name + "_" + version; }
3.68
framework_VaadinFinderLocatorStrategy_getPropertyValue
/** * Helper method to get the string-form value of a named property of a * component connector * * @since 7.2 * @param c * any ComponentConnector instance * @param propertyName * property name to test for * @return a string, if the property is found, or null, if the property does * not exist on the object (or some other error is encountered). */ private String getPropertyValue(ComponentConnector c, String propertyName) { Property prop = AbstractConnector.getStateType(c) .getProperty(propertyName); try { return prop.getValue(c.getState()).toString(); } catch (Exception e) { return null; } }
3.68
dubbo_NacosServiceName_isConcrete
/** * Is the concrete service name or not * * @return if concrete , return <code>true</code>, or <code>false</code> */ public boolean isConcrete() { return isConcrete(serviceInterface) && isConcrete(version) && isConcrete(group); }
3.68
hadoop_BatchedRequests_getSchedulingRequests
/** * Get Collection of SchedulingRequests in this batch. * @return Collection of Scheduling Requests. */ @Override public Collection<SchedulingRequest> getSchedulingRequests() { return requests; }
3.68
hbase_HRegionFileSystem_getStoreFilesLocatedStatus
/** * Returns the store files' LocatedFileStatus which available for the family. This methods * performs the filtering based on the valid store files. * @param familyName Column Family Name * @return a list of store files' LocatedFileStatus for the specified family. */ public static List<LocatedFileStatus> getStoreFilesLocatedStatus(final HRegionFileSystem regionfs, final String familyName, final boolean validate) throws IOException { Path familyDir = regionfs.getStoreDir(familyName); List<LocatedFileStatus> locatedFileStatuses = CommonFSUtils.listLocatedStatus(regionfs.getFileSystem(), familyDir); if (locatedFileStatuses == null) { if (LOG.isTraceEnabled()) { LOG.trace("No StoreFiles for: " + familyDir); } return null; } List<LocatedFileStatus> validStoreFiles = Lists.newArrayList(); for (LocatedFileStatus status : locatedFileStatuses) { if (validate && !StoreFileInfo.isValid(status)) { // recovered.hfiles directory is expected inside CF path when hbase.wal.split.to.hfile to // true, refer HBASE-23740 if (!HConstants.RECOVERED_HFILES_DIR.equals(status.getPath().getName())) { LOG.warn("Invalid StoreFile: {}", status.getPath()); } } else { validStoreFiles.add(status); } } return validStoreFiles; }
3.68
flink_GlobalConfiguration_loadYAMLResource
/** * Loads a YAML-file of key-value pairs. * * <p>Colon and whitespace ": " separate key and value (one per line). The hash tag "#" starts a * single-line comment. * * <p>Example: * * <pre> * jobmanager.rpc.address: localhost # network address for communication with the job manager * jobmanager.rpc.port : 6123 # network port to connect to for communication with the job manager * taskmanager.rpc.port : 6122 # network port the task manager expects incoming IPC connections * </pre> * * <p>This does not span the whole YAML specification, but only the *syntax* of simple YAML * key-value pairs (see issue #113 on GitHub). If at any point in time, there is a need to go * beyond simple key-value pairs syntax compatibility will allow to introduce a YAML parser * library. * * @param file the YAML file to read from * @see <a href="http://www.yaml.org/spec/1.2/spec.html">YAML 1.2 specification</a> */ private static Configuration loadYAMLResource(File file) { final Configuration config = new Configuration(); try (BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(file)))) { String line; int lineNo = 0; while ((line = reader.readLine()) != null) { lineNo++; // 1. check for comments String[] comments = line.split("#", 2); String conf = comments[0].trim(); // 2. get key and value if (conf.length() > 0) { String[] kv = conf.split(": ", 2); // skip line with no valid key-value pair if (kv.length == 1) { LOG.warn( "Error while trying to split key and value in configuration file " + file + ":" + lineNo + ": Line is not a key-value pair (missing space after ':'?)"); continue; } String key = kv[0].trim(); String value = kv[1].trim(); // sanity check if (key.length() == 0 || value.length() == 0) { LOG.warn( "Error after splitting key and value in configuration file " + file + ":" + lineNo + ": Key or value was empty"); continue; } config.setString(key, value); } } } catch (IOException e) { throw new RuntimeException("Error parsing YAML configuration.", e); } return config; }
3.68
flink_NettyShuffleUtils_getMinMaxFloatingBuffersPerInputGate
/** * Calculates and returns the floating network buffer pool size used by the input gate. The * left/right value of the returned pair represent the min/max buffers require by the pool. */ public static Pair<Integer, Integer> getMinMaxFloatingBuffersPerInputGate( final int numFloatingBuffersPerGate) { // We should guarantee at-least one floating buffer for local channel state recovery. return Pair.of(1, numFloatingBuffersPerGate); }
3.68
hmily_EtcdClient_addListener
/** * Add listener. * * @param context the context * @param passiveHandler the passive handler * @param config the config * @throws InterruptedException exception */ void addListener(final Supplier<ConfigLoader.Context> context, final ConfigLoader.PassiveHandler<EtcdPassiveConfig> passiveHandler, final EtcdConfig config) throws InterruptedException { if (!config.isPassive()) { return; } if (client == null) { LOGGER.warn("Etcd client is null..."); } new Thread(() -> { while (true) { try { client.getWatchClient().watch(ByteSequence.fromString(config.getKey())).listen().getEvents().stream().forEach(watchEvent -> { KeyValue keyValue = watchEvent.getKeyValue(); EtcdPassiveConfig etcdPassiveConfig = new EtcdPassiveConfig(); etcdPassiveConfig.setKey(config.getKey()); etcdPassiveConfig.setFileExtension(config.getFileExtension()); etcdPassiveConfig.setValue(keyValue.getValue() != null ? keyValue.getValue().toStringUtf8() : null); passiveHandler.passive(context, etcdPassiveConfig); }); } catch (InterruptedException e) { LOGGER.error("", e); } } }).start(); LOGGER.info("passive Etcd remote started...."); }
3.68
framework_AbstractInMemoryContainer_setFilteredItemIds
/** * Internal helper method to set the internal list of filtered item * identifiers. Should not be used outside this class except for * implementing clone(), may disappear from future versions. * * @param filteredItemIds */ @Deprecated protected void setFilteredItemIds(List<ITEMIDTYPE> filteredItemIds) { this.filteredItemIds = filteredItemIds; }
3.68
hbase_KeyValueHeap_getHeap
/** Returns the current Heap */ public PriorityQueue<KeyValueScanner> getHeap() { return this.heap; }
3.68
flink_CheckpointProperties_discardOnJobSuspended
/** * Returns whether the checkpoint should be discarded when the owning job reaches the {@link * JobStatus#SUSPENDED} state. * * @return <code>true</code> if the checkpoint should be discarded when the owning job reaches * the {@link JobStatus#SUSPENDED} state; <code>false</code> otherwise. * @see CompletedCheckpointStore */ boolean discardOnJobSuspended() { return discardSuspended; }
3.68
hbase_MobFileName_create
/** * Creates an instance of MobFileName. * @param fileName The string format of a file name. * @return An instance of a MobFileName. */ public static MobFileName create(String fileName) { // The format of a file name is md5HexString(0-31bytes) + date(32-39bytes) + UUID // + "_" + region // The date format is yyyyMMdd String startKey = fileName.substring(0, STARTKEY_END_INDEX); String date = fileName.substring(STARTKEY_END_INDEX, DATE_END_INDEX); String uuid = fileName.substring(DATE_END_INDEX, UUID_END_INDEX); String regionName = fileName.substring(UUID_END_INDEX + 1); return new MobFileName(startKey, date, uuid, regionName); }
3.68
morf_HumanReadableStatementHelper_generateColumnDefinitionString
/** * Generates a column definition string of the format "TYPE(LENGTH,PRECISION)". * * @param definition the column definition * @return the column definition as a string */ private static String generateColumnDefinitionString(final Column definition) { if (definition.getType().hasScale()) { return String.format("%s(%d,%d)", definition.getType(), definition.getWidth(), definition.getScale()); } if (definition.getType().hasWidth()) { return String.format("%s(%d)", definition.getType(), definition.getWidth()); } return String.format("%s", definition.getType()); }
3.68
framework_VColorPickerArea_getHTML
/** * Gets the caption's contents as HTML. * * @return the caption's HTML */ @Override public String getHTML() { return caption.getHTML(); }
3.68
hbase_StoreScanner_next
/** * Get the next row of values from this Store. * @return true if there are more rows, false if scanner is done */ @Override public boolean next(List<Cell> outResult, ScannerContext scannerContext) throws IOException { if (scannerContext == null) { throw new IllegalArgumentException("Scanner context cannot be null"); } if (checkFlushed() && reopenAfterFlush()) { return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); } // if the heap was left null, then the scanners had previously run out anyways, close and // return. if (this.heap == null) { // By this time partial close should happened because already heap is null close(false);// Do all cleanup except heap.close() return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } Cell cell = this.heap.peek(); if (cell == null) { close(false);// Do all cleanup except heap.close() return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } // only call setRow if the row changes; avoids confusing the query matcher // if scanning intra-row // If no limits exists in the scope LimitScope.Between_Cells then we are sure we are changing // rows. Else it is possible we are still traversing the same row so we must perform the row // comparison. if (!scannerContext.hasAnyLimit(LimitScope.BETWEEN_CELLS) || matcher.currentRow() == null) { this.countPerRow = 0; matcher.setToNewRow(cell); } // Clear progress away unless invoker has indicated it should be kept. if (!scannerContext.getKeepProgress() && !scannerContext.getSkippingRow()) { scannerContext.clearProgress(); } Optional<RpcCall> rpcCall = matcher.isUserScan() ? RpcServer.getCurrentCall() : Optional.empty(); int count = 0; long totalBytesRead = 0; // track the cells for metrics only if it is a user read request. boolean onlyFromMemstore = matcher.isUserScan(); try { LOOP: do { // Update and check the time limit based on the configured value of cellsPerTimeoutCheck // Or if the preadMaxBytes is reached and we may want to return so we can switch to stream // in // the shipped method below. if ( kvsScanned % cellsPerHeartbeatCheck == 0 || (scanUsePread && readType == Scan.ReadType.DEFAULT && bytesRead > preadMaxBytes) ) { if (scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) { return scannerContext.setScannerState(NextState.TIME_LIMIT_REACHED).hasMoreValues(); } } // Do object compare - we set prevKV from the same heap. if (prevCell != cell) { ++kvsScanned; } checkScanOrder(prevCell, cell, comparator); int cellSize = PrivateCellUtil.estimatedSerializedSizeOf(cell); bytesRead += cellSize; if (scanUsePread && readType == Scan.ReadType.DEFAULT && bytesRead > preadMaxBytes) { // return immediately if we want to switch from pread to stream. We need this because we // can // only switch in the shipped method, if user use a filter to filter out everything and // rpc // timeout is very large then the shipped method will never be called until the whole scan // is finished, but at that time we have already scan all the data... // See HBASE-20457 for more details. // And there is still a scenario that can not be handled. If we have a very large row, // which // have millions of qualifiers, and filter.filterRow is used, then even if we set the flag // here, we still need to scan all the qualifiers before returning... scannerContext.returnImmediately(); } heap.recordBlockSize(blockSize -> { if (rpcCall.isPresent()) { rpcCall.get().incrementBlockBytesScanned(blockSize); } scannerContext.incrementBlockProgress(blockSize); }); prevCell = cell; scannerContext.setLastPeekedCell(cell); topChanged = false; ScanQueryMatcher.MatchCode qcode = matcher.match(cell); switch (qcode) { case INCLUDE: case INCLUDE_AND_SEEK_NEXT_ROW: case INCLUDE_AND_SEEK_NEXT_COL: Filter f = matcher.getFilter(); if (f != null) { cell = f.transformCell(cell); } this.countPerRow++; // add to results only if we have skipped #storeOffset kvs // also update metric accordingly if (this.countPerRow > storeOffset) { outResult.add(cell); // Update local tracking information count++; totalBytesRead += cellSize; /** * Increment the metric if all the cells are from memstore. If not we will account it * for mixed reads */ onlyFromMemstore = onlyFromMemstore && heap.isLatestCellFromMemstore(); // Update the progress of the scanner context scannerContext.incrementSizeProgress(cellSize, cell.heapSize()); scannerContext.incrementBatchProgress(1); if (matcher.isUserScan() && totalBytesRead > maxRowSize) { String message = "Max row size allowed: " + maxRowSize + ", but the row is bigger than that, the row info: " + CellUtil.toString(cell, false) + ", already have process row cells = " + outResult.size() + ", it belong to region = " + store.getHRegion().getRegionInfo().getRegionNameAsString(); LOG.warn(message); throw new RowTooBigException(message); } if (storeLimit > -1 && this.countPerRow >= (storeLimit + storeOffset)) { // do what SEEK_NEXT_ROW does. if (!matcher.moreRowsMayExistAfter(cell)) { close(false);// Do all cleanup except heap.close() return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } matcher.clearCurrentRow(); seekToNextRow(cell); break LOOP; } } if (qcode == ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_ROW) { if (!matcher.moreRowsMayExistAfter(cell)) { close(false);// Do all cleanup except heap.close() return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } matcher.clearCurrentRow(); seekOrSkipToNextRow(cell); } else if (qcode == ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL) { seekOrSkipToNextColumn(cell); } else { this.heap.next(); } if (scannerContext.checkBatchLimit(LimitScope.BETWEEN_CELLS)) { break LOOP; } if (scannerContext.checkSizeLimit(LimitScope.BETWEEN_CELLS)) { break LOOP; } continue; case DONE: // Optimization for Gets! If DONE, no more to get on this row, early exit! if (get) { // Then no more to this row... exit. close(false);// Do all cleanup except heap.close() // update metric return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } matcher.clearCurrentRow(); return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); case DONE_SCAN: close(false);// Do all cleanup except heap.close() return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); case SEEK_NEXT_ROW: // This is just a relatively simple end of scan fix, to short-cut end // us if there is an endKey in the scan. if (!matcher.moreRowsMayExistAfter(cell)) { close(false);// Do all cleanup except heap.close() return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } matcher.clearCurrentRow(); seekOrSkipToNextRow(cell); NextState stateAfterSeekNextRow = needToReturn(outResult); if (stateAfterSeekNextRow != null) { return scannerContext.setScannerState(stateAfterSeekNextRow).hasMoreValues(); } break; case SEEK_NEXT_COL: seekOrSkipToNextColumn(cell); NextState stateAfterSeekNextColumn = needToReturn(outResult); if (stateAfterSeekNextColumn != null) { return scannerContext.setScannerState(stateAfterSeekNextColumn).hasMoreValues(); } break; case SKIP: this.heap.next(); break; case SEEK_NEXT_USING_HINT: Cell nextKV = matcher.getNextKeyHint(cell); if (nextKV != null) { int difference = comparator.compare(nextKV, cell); if ( ((!scan.isReversed() && difference > 0) || (scan.isReversed() && difference < 0)) ) { seekAsDirection(nextKV); NextState stateAfterSeekByHint = needToReturn(outResult); if (stateAfterSeekByHint != null) { return scannerContext.setScannerState(stateAfterSeekByHint).hasMoreValues(); } break; } } heap.next(); break; default: throw new RuntimeException("UNEXPECTED"); } // One last chance to break due to size limit. The INCLUDE* cases above already check // limit and continue. For the various filtered cases, we need to check because block // size limit may have been exceeded even if we don't add cells to result list. if (scannerContext.checkSizeLimit(LimitScope.BETWEEN_CELLS)) { return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); } } while ((cell = this.heap.peek()) != null); if (count > 0) { return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); } // No more keys close(false);// Do all cleanup except heap.close() return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } finally { // increment only if we have some result if (count > 0 && matcher.isUserScan()) { // if true increment memstore metrics, if not the mixed one updateMetricsStore(onlyFromMemstore); } } }
3.68
morf_ChangeColumn_getToColumn
/** * Gets the column definition after the change * * @return the column definition after the change */ public Column getToColumn() { return toColumn; }
3.68
hbase_NamespacesModel_getNamespaces
/** Returns all namespaces */ public List<String> getNamespaces() { return namespaces; }
3.68
pulsar_ConsumerConfiguration_setCryptoFailureAction
/** * Sets the ConsumerCryptoFailureAction to the value specified. * * @param action * consumer action */ public void setCryptoFailureAction(ConsumerCryptoFailureAction action) { conf.setCryptoFailureAction(action); }
3.68
morf_UnionSetOperator_toString
/** * @see java.lang.Object#toString() */ @Override public String toString() { return "UNION " + unionStrategy + " " + selectStatement; }
3.68
hbase_CellArrayImmutableSegment_initializeCellSet
///////////////////// PRIVATE METHODS ///////////////////// /*------------------------------------------------------------------------*/ // Create CellSet based on CellArrayMap from compacting iterator private void initializeCellSet(int numOfCells, MemStoreSegmentsIterator iterator, MemStoreCompactionStrategy.Action action) { boolean merge = (action == MemStoreCompactionStrategy.Action.MERGE || action == MemStoreCompactionStrategy.Action.MERGE_COUNT_UNIQUE_KEYS); Cell[] cells = new Cell[numOfCells]; // build the Cell Array int i = 0; int numUniqueKeys = 0; Cell prev = null; while (iterator.hasNext()) { Cell c = iterator.next(); // The scanner behind the iterator is doing all the elimination logic if (merge) { // if this is merge we just move the Cell object without copying MSLAB // the sizes still need to be updated in the new segment cells[i] = c; } else { // now we just copy it to the new segment (also MSLAB copy) cells[i] = maybeCloneWithAllocator(c, false); } // second parameter true, because in compaction/merge the addition of the cell to new segment // is always successful updateMetaInfo(cells[i], true, null); // updates the size per cell if (action == MemStoreCompactionStrategy.Action.MERGE_COUNT_UNIQUE_KEYS) { // counting number of unique keys if (prev != null) { if (!CellUtil.matchingRowColumnBytes(prev, c)) { numUniqueKeys++; } } else { numUniqueKeys++; } } prev = c; i++; } if (action == MemStoreCompactionStrategy.Action.COMPACT) { numUniqueKeys = numOfCells; } else if (action != MemStoreCompactionStrategy.Action.MERGE_COUNT_UNIQUE_KEYS) { numUniqueKeys = CellSet.UNKNOWN_NUM_UNIQUES; } // build the immutable CellSet CellArrayMap cam = new CellArrayMap(getComparator(), cells, 0, i, false); this.setCellSet(null, new CellSet(cam, numUniqueKeys)); // update the CellSet of this Segment }
3.68
hadoop_ConnectionPool_getClientIndex
/** * Get the clientIndex used to calculate index for lookup. * @return Client index. */ @VisibleForTesting public AtomicInteger getClientIndex() { return this.clientIndex; }
3.68
flink_DependencyParser_parseDependencyTreeOutput
/** * Parses the output of a Maven build where {@code dependency:tree} was used, and returns a set * of dependencies for each module. */ public static Map<String, DependencyTree> parseDependencyTreeOutput(Path buildOutput) throws IOException { return processLines(buildOutput, DependencyParser::parseDependencyTreeOutput); }
3.68
pulsar_ProducerConfiguration_setBatchingMaxPublishDelay
/** * Set the time period within which the messages sent will be batched <i>default: 1ms</i> if batch messages are * enabled. If set to a non zero value, messages will be queued until this time interval or until * * @see ProducerConfiguration#batchingMaxMessages threshold is reached; all messages will be published as a single * batch message. The consumer will be delivered individual messages in the batch in the same order they were * enqueued * @since 1.0.36 <br> * Make sure all the consumer applications have been updated to use this client version, before starting to * batch messages. * @param batchDelay * the batch delay * @param timeUnit * the time unit of the {@code batchDelay} * @return */ public ProducerConfiguration setBatchingMaxPublishDelay(long batchDelay, TimeUnit timeUnit) { conf.setBatchingMaxPublishDelayMicros(batchDelay, timeUnit); return this; }
3.68
flink_KeyedStateCheckpointOutputStream_getKeyGroupList
/** Returns a list of all key-groups which can be written to this stream. */ public KeyGroupsList getKeyGroupList() { return keyGroupRangeOffsets.getKeyGroupRange(); }
3.68
hudi_BaseSparkUpdateStrategy_getGroupIdsWithUpdate
/** * Get records matched file group ids. * @param inputRecords the records to write, tagged with target file id * @return the records matched file group ids */ protected List<HoodieFileGroupId> getGroupIdsWithUpdate(HoodieData<HoodieRecord<T>> inputRecords) { return inputRecords .filter(record -> record.getCurrentLocation() != null) .map(record -> new HoodieFileGroupId(record.getPartitionPath(), record.getCurrentLocation().getFileId())).distinct().collectAsList(); }
3.68
hudi_HoodieMetaSyncOperations_updateTableSchema
/** * Update schema for the table in the metastore. */ default void updateTableSchema(String tableName, MessageType newSchema) { }
3.68
framework_VScrollTable_getContentAreaBorderHeight
/** * @return border top + border bottom of the scrollable area of table */ private int getContentAreaBorderHeight() { if (contentAreaBorderHeight < 0) { scrollBodyPanel.getElement().getStyle() .setOverflow(Overflow.HIDDEN); int oh = scrollBodyPanel.getOffsetHeight(); int ch = scrollBodyPanel.getElement() .getPropertyInt("clientHeight"); contentAreaBorderHeight = oh - ch; scrollBodyPanel.getElement().getStyle().setOverflow(Overflow.AUTO); } return contentAreaBorderHeight; }
3.68
shardingsphere-elasticjob_GuaranteeService_isAllCompleted
/** * Judge whether job's sharding items are all completed. * * @return job's sharding items are all completed or not */ public boolean isAllCompleted() { return jobNodeStorage.isJobNodeExisted(GuaranteeNode.COMPLETED_ROOT) && configService.load(false).getShardingTotalCount() <= jobNodeStorage.getJobNodeChildrenKeys(GuaranteeNode.COMPLETED_ROOT).size(); }
3.68
flink_KeyGroupRange_getNumberOfKeyGroups
/** @return The number of key-groups in the range */ @Override public int getNumberOfKeyGroups() { return 1 + endKeyGroup - startKeyGroup; }
3.68
zxing_GeoParsedResult_getQuery
/** * @return query string associated with geo URI or null if none exists */ public String getQuery() { return query; }
3.68
hadoop_WeakReferenceThreadMap_removeForCurrentThread
/** * Remove the reference for the current thread. * @return any reference value which existed. */ public V removeForCurrentThread() { return remove(currentThreadId()); }
3.68