name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_FederationUtil_getCompileInfo
/** * Fetch the build/compile information for this jar. * * @return String Compilation info. */ public static String getCompileInfo() { return VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from " + VersionInfo.getBranch(); }
3.68
hbase_ClientIdGenerator_getPid
/** Returns PID of the current process, if it can be extracted from JVM name, or null. */ public static Long getPid() { String name = ManagementFactory.getRuntimeMXBean().getName(); List<String> nameParts = Splitter.on('@').splitToList(name); if (nameParts.size() == 2) { // 12345@somewhere try { return Long.parseLong(Iterators.get(nameParts.iterator(), 0)); } catch (NumberFormatException ex) { LOG.warn("Failed to get PID from [" + name + "]", ex); } } else { LOG.warn("Don't know how to get PID from [" + name + "]"); } return null; }
3.68
flink_HiveParserUtils_getFunctionInfo
// Get FunctionInfo and always look for it in metastore when FunctionRegistry returns null. public static FunctionInfo getFunctionInfo(String funcName) throws SemanticException { FunctionInfo res = FunctionRegistry.getFunctionInfo(funcName); if (res == null) { SessionState sessionState = SessionState.get(); HiveConf hiveConf = sessionState != null ? sessionState.getConf() : null; if (hiveConf != null) { // TODO: need to support overriding hive version try (HiveMetastoreClientWrapper hmsClient = new HiveMetastoreClientWrapper(hiveConf, HiveShimLoader.getHiveVersion())) { String[] parts = FunctionUtils.getQualifiedFunctionNameParts(funcName); Function function = hmsClient.getFunction(parts[0], parts[1]); getSessionHiveShim() .registerTemporaryFunction( FunctionUtils.qualifyFunctionName(parts[1], parts[0]), Thread.currentThread() .getContextClassLoader() .loadClass(function.getClassName())); res = FunctionRegistry.getFunctionInfo(funcName); } catch (NoSuchObjectException e) { LOG.warn("Function {} doesn't exist in metastore", funcName); } catch (Exception e) { LOG.warn("Failed to look up function in metastore", e); } } } return res; }
3.68
flink_AbstractPagedInputView_clear
/** * Clears the internal state of the view. After this call, all read attempts will fail, until * the {@link #advance()} or {@link #seekInput(MemorySegment, int, int)} method have been * invoked. */ protected void clear() { this.currentSegment = null; this.positionInSegment = this.headerLength; this.limitInSegment = headerLength; }
3.68
framework_VRichTextArea_setValue
/** * Sets the value of the text area. * * @param value * The text value, as HTML */ public void setValue(String value) { if (rta.isAttached()) { rta.setHTML(value); } else { html.setHTML(value); } }
3.68
flink_SlotProfile_getTaskResourceProfile
/** Returns the desired resource profile for the task slot. */ public ResourceProfile getTaskResourceProfile() { return taskResourceProfile; }
3.68
flink_RecordProcessorUtils_getRecordProcessor1
/** * Get record processor for the first input of {@link TwoInputStreamOperator}, which will omit * call of {@link StreamOperator#setKeyContextElement1} if it doesn't have key context. * * @param operator the {@link TwoInputStreamOperator} * @return the record processor */ public static <T> ThrowingConsumer<StreamRecord<T>, Exception> getRecordProcessor1( TwoInputStreamOperator<T, ?, ?> operator) { boolean canOmitSetKeyContext; if (operator instanceof AbstractStreamOperator) { canOmitSetKeyContext = canOmitSetKeyContext((AbstractStreamOperator<?>) operator, 0); } else { canOmitSetKeyContext = operator instanceof KeyContextHandler && !((KeyContextHandler) operator).hasKeyContext1(); } if (canOmitSetKeyContext) { return operator::processElement1; } else { return record -> { operator.setKeyContextElement1(record); operator.processElement1(record); }; } }
3.68
pulsar_SaslAuthenticationDataProvider_authenticate
// create token that evaluated by client, and will send to server. @Override public AuthData authenticate(AuthData commandData) throws AuthenticationException { // init if (Arrays.equals(commandData.getBytes(), AuthData.INIT_AUTH_DATA_BYTES)) { if (pulsarSaslClient.hasInitialResponse()) { return pulsarSaslClient.evaluateChallenge(AuthData.of(new byte[0])); } return AuthData.of(new byte[0]); } return pulsarSaslClient.evaluateChallenge(commandData); }
3.68
Activiti_DelegateHelper_getExtensionElements
/** * Returns for the activityId of the passed {@link DelegateExecution} the * {@link Map} of {@link ExtensionElement} instances. These represent the * extension elements defined in the BPMN 2.0 XML as part of that particular * activity. * <p> * If the execution is currently being used for executing an * {@link ExecutionListener}, the extension elements of the listener will be * used. Use the {@link #getFlowElementExtensionElements(DelegateExecution)} * or {@link #getListenerExtensionElements(DelegateExecution)} instead to * specifically get the extension elements of either the flow element or the * listener. */ public static Map<String, List<ExtensionElement>> getExtensionElements(DelegateExecution execution) { if (isExecutingExecutionListener(execution)) { return getListenerExtensionElements(execution); } else { return getFlowElementExtensionElements(execution); } }
3.68
framework_VAbstractCalendarPanel_isReadonly
/** * Checks whether the widget is not editable (read-only). * * @return {@code true} if the widget is read-only */ protected boolean isReadonly() { return parent.isReadonly(); }
3.68
flink_AbstractStreamingJoinOperator_getRecords
/** * Gets the iterable of records. This is usually be called when the {@link * AssociatedRecords} is from inner side. */ public Iterable<RowData> getRecords() { return new RecordsIterable(records); }
3.68
hbase_MasterObserver_postCompletedSnapshotAction
/** * Called after the snapshot operation has been completed. * @param ctx the environment to interact with the framework and master * @param snapshot the SnapshotDescriptor for the snapshot * @param tableDescriptor the TableDescriptor of the table to snapshot */ default void postCompletedSnapshotAction(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription snapshot, TableDescriptor tableDescriptor) throws IOException { }
3.68
hadoop_EntityIdentifier_getType
/** * Get the entity type. * @return The entity type. */ public String getType() { return type; }
3.68
hadoop_RegexMountPoint_replaceRegexCaptureGroupInPath
/** * Use capture group named regexGroupNameOrIndexStr in mather to replace * parsedDestPath. * E.g. link: ^/user/(?<username>\\w+) => s3://$user.apache.com/_${user} * srcMatcher is from /user/hadoop. * Then the params will be like following. * parsedDestPath: s3://$user.apache.com/_${user}, * regexGroupNameOrIndexStr: user * groupRepresentationStrSetInDest: {user:$user; user:${user}} * return value will be s3://hadoop.apache.com/_hadoop * @param parsedDestPath * @param srcMatcher * @param regexGroupNameOrIndexStr * @param groupRepresentationStrSetInDest * @return return parsedDestPath while ${var},$var replaced or * parsedDestPath nothing found. */ private String replaceRegexCaptureGroupInPath( String parsedDestPath, Matcher srcMatcher, String regexGroupNameOrIndexStr, Set<String> groupRepresentationStrSetInDest) { String groupValue = getRegexGroupValueFromMather( srcMatcher, regexGroupNameOrIndexStr); if (groupValue == null) { return parsedDestPath; } for (String varName : groupRepresentationStrSetInDest) { parsedDestPath = parsedDestPath.replace(varName, groupValue); LOGGER.debug("parsedDestPath value is:" + parsedDestPath); } return parsedDestPath; }
3.68
framework_LegacyWindow_setContent
/** * Set the content of the window. For a {@link LegacyWindow}, the content * must be a {@link ComponentContainer}. * * @param content */ @Override public void setContent(Component content) { if (!(content instanceof ComponentContainer)) { throw new IllegalArgumentException( "The content of a LegacyWindow must be a ComponentContainer"); } super.setContent(content); }
3.68
flink_FunctionUtils_uncheckedConsumer
/** * Converts a {@link ThrowingConsumer} into a {@link Consumer} which throws checked exceptions * as unchecked. * * @param throwingConsumer to convert into a {@link Consumer} * @param <A> input type * @return {@link Consumer} which throws all checked exceptions as unchecked */ public static <A> Consumer<A> uncheckedConsumer(ThrowingConsumer<A, ?> throwingConsumer) { return (A value) -> { try { throwingConsumer.accept(value); } catch (Throwable t) { ExceptionUtils.rethrow(t); } }; }
3.68
flink_Router_maxLength
/** Helper for toString. */ private static int maxLength(List<String> coll) { int max = 0; for (String e : coll) { int length = e.length(); if (length > max) { max = length; } } return max; }
3.68
hbase_SnapshotInfo_getCorruptedStoreFilesCount
/** Returns the number of corrupted store files */ public int getCorruptedStoreFilesCount() { return hfilesCorrupted.get(); }
3.68
hbase_HbckReport_getCheckingEndTimestamp
/** * Used for web ui to show when the HBCK checking report generated. */ public Instant getCheckingEndTimestamp() { return checkingEndTimestamp; }
3.68
hadoop_Server_getTempDir
/** * Returns the server temp dir. * * @return the server temp dir. */ public String getTempDir() { return tempDir; }
3.68
flink_CompressWriterFactory_withHadoopCompression
/** * Compresses the data using the provided Hadoop {@link CompressionCodec} and {@link * Configuration}. * * @param codecName Simple/complete name or alias of the CompressionCodec * @param hadoopConfig Hadoop Configuration * @return the instance of CompressionWriterFactory * @throws IOException */ public CompressWriterFactory<IN> withHadoopCompression( String codecName, Configuration hadoopConfig) throws IOException { this.codecExtension = getHadoopCodecExtension(codecName, hadoopConfig); this.hadoopCodecName = codecName; for (Map.Entry<String, String> entry : hadoopConfig) { hadoopConfigMap.put(entry.getKey(), entry.getValue()); } return this; }
3.68
flink_DeclarativeSlotManager_checkResourceRequirementsWithDelay
/** * Depending on the implementation of {@link ResourceAllocationStrategy}, checking resource * requirements and potentially making a re-allocation can be heavy. In order to cover more * changes with each check, thus reduce the frequency of unnecessary re-allocations, the checks * are performed with a slight delay. */ private void checkResourceRequirementsWithDelay() { if (requirementsCheckDelay.toMillis() <= 0) { checkResourceRequirements(); } else { if (requirementsCheckFuture == null || requirementsCheckFuture.isDone()) { requirementsCheckFuture = new CompletableFuture<>(); scheduledExecutor.schedule( () -> mainThreadExecutor.execute( () -> { checkResourceRequirements(); Preconditions.checkNotNull(requirementsCheckFuture) .complete(null); }), requirementsCheckDelay.toMillis(), TimeUnit.MILLISECONDS); } } }
3.68
framework_ScrollbarBundle_addScrollHandler
/** * Adds a scroll handler to the scrollbar bundle. * * @param handler * the handler to add * @return the registration object for the handler registration */ public HandlerRegistration addScrollHandler(final ScrollHandler handler) { return getHandlerManager().addHandler(ScrollEvent.TYPE, handler); }
3.68
pulsar_BrokerInterceptorUtils_load
/** * Load the broker interceptors according to the interceptor definition. * * @param metadata the broker interceptors definition. */ BrokerInterceptorWithClassLoader load(BrokerInterceptorMetadata metadata, String narExtractionDirectory) throws IOException { final File narFile = metadata.getArchivePath().toAbsolutePath().toFile(); NarClassLoader ncl = NarClassLoaderBuilder.builder() .narFile(narFile) .parentClassLoader(BrokerInterceptorUtils.class.getClassLoader()) .extractionDirectory(narExtractionDirectory) .build(); BrokerInterceptorDefinition def = getBrokerInterceptorDefinition(ncl); if (StringUtils.isBlank(def.getInterceptorClass())) { throw new IOException("Broker interceptors `" + def.getName() + "` does NOT provide a broker" + " interceptors implementation"); } try { Class interceptorClass = ncl.loadClass(def.getInterceptorClass()); Object interceptor = interceptorClass.getDeclaredConstructor().newInstance(); if (!(interceptor instanceof BrokerInterceptor)) { throw new IOException("Class " + def.getInterceptorClass() + " does not implement broker interceptor interface"); } BrokerInterceptor pi = (BrokerInterceptor) interceptor; return new BrokerInterceptorWithClassLoader(pi, ncl); } catch (Throwable t) { rethrowIOException(t); return null; } }
3.68
hadoop_QuotaUsage_getQuota
/** * Return the directory quota. * * @return quota. */ public long getQuota() { return quota; }
3.68
framework_DesignAttributeHandler_clearElement
/** * Clears the children and attributes of the given element. * * @param design * the element to be cleared */ public static void clearElement(Element design) { Attributes attr = design.attributes(); for (Attribute a : attr.asList()) { attr.remove(a.getKey()); } List<Node> children = new ArrayList<>(); children.addAll(design.childNodes()); for (Node node : children) { node.remove(); } }
3.68
flink_SpillingThread_go
/** Entry point of the thread. */ @Override public void go() throws IOException, InterruptedException { // ------------------- In-Memory Cache ------------------------ final Queue<CircularElement<E>> cache = new ArrayDeque<>(); boolean cacheOnly = readCache(cache); // check whether the thread was canceled if (!isRunning()) { return; } MutableObjectIterator<E> largeRecords = null; // check if we can stay in memory with the large record handler if (cacheOnly && largeRecordHandler != null && largeRecordHandler.hasData()) { List<MemorySegment> memoryForLargeRecordSorting = new ArrayList<>(); CircularElement<E> circElement; while ((circElement = this.dispatcher.poll(SortStage.READ)) != null) { circElement.getBuffer().dispose(); memoryForLargeRecordSorting.addAll(circElement.getMemory()); } if (memoryForLargeRecordSorting.isEmpty()) { cacheOnly = false; LOG.debug("Going to disk-based merge because of large records."); } else { LOG.debug("Sorting large records, to add them to in-memory merge."); largeRecords = largeRecordHandler.finishWriteAndSortKeys(memoryForLargeRecordSorting); } } // ------------------- In-Memory Merge ------------------------ if (cacheOnly) { mergeInMemory(cache, largeRecords); return; } // ------------------- Spilling Phase ------------------------ List<ChannelWithBlockCount> channelIDs = startSpilling(cache); // ------------------- Merging Phase ------------------------ mergeOnDisk(channelIDs); }
3.68
flink_LogicalTypeChecks_hasLegacyTypes
/** * Checks whether a (possibly nested) logical type contains {@link LegacyTypeInformationType} or * {@link TypeInformationRawType}. */ public static boolean hasLegacyTypes(LogicalType logicalType) { return hasNested(logicalType, t -> t instanceof LegacyTypeInformationType); }
3.68
hbase_RegionMetrics_getNameAsString
/** Returns the region name as a string */ default String getNameAsString() { return Bytes.toStringBinary(getRegionName()); }
3.68
hadoop_EntityIdentifier_getId
/** * Get the entity Id. * @return The entity Id. */ public String getId() { return id; }
3.68
Activiti_ObjectValueExpression_setValue
/** * Throw an exception. */ @Override public void setValue(ELContext context, Object value) { throw new ELException( LocalMessages.get( "error.value.set.rvalue", "<object value expression>" ) ); }
3.68
hudi_BaseHoodieWriteClient_savepoint
/** * Savepoint a specific commit instant time. Latest version of data files as of the passed in instantTime * will be referenced in the savepoint and will never be cleaned. The savepointed commit will never be rolledback or archived. * <p> * This gives an option to rollback the state to the savepoint anytime. Savepoint needs to be manually created and * deleted. * <p> * Savepoint should be on a commit that could not have been cleaned. * * @param instantTime Commit that should be savepointed * @param user User creating the savepoint * @param comment Comment for the savepoint */ public void savepoint(String instantTime, String user, String comment) { HoodieTable<T, I, K, O> table = createTable(config, hadoopConf); table.savepoint(context, instantTime, user, comment); }
3.68
flink_TopNBuffer_getSortKeyComparator
/** * Gets sort key comparator used by buffer. * * @return sort key comparator used by buffer */ public Comparator<RowData> getSortKeyComparator() { return sortKeyComparator; }
3.68
hbase_ByteArrayOutputStream_getBuffer
/** Returns the underlying array where the data gets accumulated */ public byte[] getBuffer() { return this.buf; }
3.68
graphhopper_CarAverageSpeedParser_applyBadSurfaceSpeed
/** * @param way needed to retrieve tags * @param speed speed guessed e.g. from the road type or other tags * @return The assumed speed */ protected double applyBadSurfaceSpeed(ReaderWay way, double speed) { // limit speed if bad surface if (badSurfaceSpeed > 0 && isValidSpeed(speed) && speed > badSurfaceSpeed) { String surface = way.getTag("surface", ""); int colonIndex = surface.indexOf(":"); if (colonIndex != -1) surface = surface.substring(0, colonIndex); if (badSurfaceSpeedMap.contains(surface)) speed = badSurfaceSpeed; } return speed; }
3.68
hbase_HBaseServerBase_installShutdownHook
/** * In order to register ShutdownHook, this method is called when HMaster and HRegionServer are * started. For details, please refer to HBASE-26951 */ protected final void installShutdownHook() { ShutdownHook.install(conf, dataFs, this, Thread.currentThread()); isShutdownHookInstalled = true; }
3.68
hadoop_DatanodeLocalInfo_getConfigVersion
/** get config version */ public String getConfigVersion() { return this.configVersion; }
3.68
graphhopper_VectorTile_hasFloatValue
/** * <code>optional float float_value = 2;</code> */ public boolean hasFloatValue() { return ((bitField0_ & 0x00000002) == 0x00000002); }
3.68
flink_Tuple2_of
/** * Creates a new tuple and assigns the given values to the tuple's fields. This is more * convenient than using the constructor, because the compiler can infer the generic type * arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new * Tuple3<Integer, Double, String>(n, x, s)} */ public static <T0, T1> Tuple2<T0, T1> of(T0 f0, T1 f1) { return new Tuple2<>(f0, f1); }
3.68
flink_ThroughputCalculator_calculateThroughput
/** @return Calculated throughput based on the collected data for the last period. */ public long calculateThroughput() { if (measurementStartTime != NOT_TRACKED) { long absoluteTimeMillis = clock.absoluteTimeMillis(); currentMeasurementTime += absoluteTimeMillis - measurementStartTime; measurementStartTime = absoluteTimeMillis; } long throughput = calculateThroughput(currentAccumulatedDataSize, currentMeasurementTime); currentAccumulatedDataSize = currentMeasurementTime = 0; return throughput; }
3.68
framework_FieldGroup_getField
/** * Returns the field that is bound to the given property id. * * @param propertyId * The property id to use to lookup the field * @return The field that is bound to the property id or null if no field is * bound to that property id */ public Field<?> getField(Object propertyId) { return propertyIdToField.get(propertyId); }
3.68
hbase_MasterSnapshotVerifier_verifyTableInfo
/** * Check that the table descriptor for the snapshot is a valid table descriptor * @param manifest snapshot manifest to inspect */ private void verifyTableInfo(final SnapshotManifest manifest) throws IOException { TableDescriptor htd = manifest.getTableDescriptor(); if (htd == null) { throw new CorruptedSnapshotException("Missing Table Descriptor", ProtobufUtil.createSnapshotDesc(snapshot)); } if (!htd.getTableName().getNameAsString().equals(snapshot.getTable())) { throw new CorruptedSnapshotException("Invalid Table Descriptor. Expected " + snapshot.getTable() + " name, got " + htd.getTableName().getNameAsString(), ProtobufUtil.createSnapshotDesc(snapshot)); } }
3.68
morf_JdbcUrlElements_build
/** * @return The {@link JdbcUrlElements}. */ public JdbcUrlElements build() { return new JdbcUrlElements(databaseTypeIdentifier, host, port, instanceName, databaseName, schemaName); }
3.68
flink_HiveParserQBParseInfo_getInsertOverwriteTables
// See also {@link #isInsertIntoTable(String)} public Map<String, HiveParserASTNode> getInsertOverwriteTables() { return insertOverwriteTables; }
3.68
hbase_HbckTableInfo_handleHoleInRegionChain
/** * There is a hole in the hdfs regions that violates the table integrity rules. Create a new * empty region that patches the hole. */ @Override public void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeStopKey) throws IOException { errors.reportError(HbckErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN, "There is a hole in the region chain between " + Bytes.toStringBinary(holeStartKey) + " and " + Bytes.toStringBinary(holeStopKey) + ". Creating a new regioninfo and region " + "dir in hdfs to plug the hole."); TableDescriptor htd = getTableInfo().getTableDescriptor(); RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName()) .setStartKey(holeStartKey).setEndKey(holeStopKey).build(); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); LOG.info("Plugged hole by creating new empty region: " + newRegion + " " + region); hbck.fixes++; }
3.68
framework_VerticalScrollbarVisibilityChangeHandler_dispatch
/* * (non-Javadoc) * * @see * com.google.gwt.event.shared.GwtEvent#dispatch(com.google.gwt.event. * shared .EventHandler) */ @Override protected void dispatch( VerticalScrollbarVisibilityChangeHandler handler) { handler.onVisibilityChange(this); }
3.68
framework_Footer_join
/** * Merges column cells in the row. Original cells are hidden, and new * merged cell is shown instead. The cell has a width of all merged * cells together, inherits styles of the first merged cell but has * empty caption. * * @param cellsToMerge * the cells which should be merged. The cells should not be * merged to any other cell set. * @return the remaining visible cell after the merge * * @see #join(Set) * @see com.vaadin.ui.AbstractComponent#setCaption(String) setCaption */ @Override public FooterCell join(FooterCell... cellsToMerge) { return join(Stream.of(cellsToMerge)); }
3.68
hadoop_Chain_interruptAllThreads
// interrupt all threads private synchronized void interruptAllThreads() { for (Thread th : threads) { th.interrupt(); } for (ChainBlockingQueue<?> queue : blockingQueues) { queue.interrupt(); } }
3.68
hbase_HttpServer_addEndpoint
/** * Add an endpoint that the HTTP server should listen to. the endpoint of that the HTTP server * should listen to. The scheme specifies the protocol (i.e. HTTP / HTTPS), the host specifies * the binding address, and the port specifies the listening port. Unspecified or zero port * means that the server can listen to any port. */ public Builder addEndpoint(URI endpoint) { endpoints.add(endpoint); return this; }
3.68
hbase_PermissionStorage_getUserNamespacePermissions
/** * Returns the currently granted permissions for a given namespace as the specified user plus * associated permissions. */ public static List<UserPermission> getUserNamespacePermissions(Configuration conf, String namespace, String user, boolean hasFilterUser) throws IOException { return getUserPermissions(conf, Bytes.toBytes(toNamespaceEntry(namespace)), null, null, user, hasFilterUser); }
3.68
hadoop_HttpReferrerAuditHeader_withEvaluated
/** * Add an evaluated attribute to the current map. * Replaces any with the existing key. * Set evaluated methods. * @param key key * @param value new value * @return the builder */ public Builder withEvaluated(String key, Supplier<String> value) { evaluated.put(key, value); return this; }
3.68
hbase_HMaster_login
/** * For compatibility, if failed with regionserver credentials, try the master one */ @Override protected void login(UserProvider user, String host) throws IOException { try { user.login(SecurityConstants.REGIONSERVER_KRB_KEYTAB_FILE, SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, host); } catch (IOException ie) { user.login(SecurityConstants.MASTER_KRB_KEYTAB_FILE, SecurityConstants.MASTER_KRB_PRINCIPAL, host); } }
3.68
framework_AbstractMedia_getAltText
/** * @return The text/html that is displayed when a browser doesn't support * HTML5. */ public String getAltText() { return getState(false).altText; }
3.68
hbase_RequestConverter_buildDeleteNamespaceRequest
/** * Creates a protocol buffer DeleteNamespaceRequest * @return a DeleteNamespaceRequest */ public static DeleteNamespaceRequest buildDeleteNamespaceRequest(final String name) { DeleteNamespaceRequest.Builder builder = DeleteNamespaceRequest.newBuilder(); builder.setNamespaceName(name); return builder.build(); }
3.68
framework_VMenuBar_showChildMenu
/** * Shows the child menu of an item. The caller must ensure that the item has * a submenu. * * @param item */ public void showChildMenu(CustomMenuItem item) { int left = 0; int top = 0; if (subMenu) { left = item.getParentMenu().getAbsoluteLeft() + item.getParentMenu().getOffsetWidth(); top = item.getAbsoluteTop(); } else { left = item.getAbsoluteLeft(); top = item.getParentMenu().getAbsoluteTop() + item.getParentMenu().getOffsetHeight(); } showChildMenuAt(item, top, left); }
3.68
framework_Upload_fireStarted
/** * Emit upload received event. * * @param filename * @param mimeType */ protected void fireStarted(String filename, String mimeType) { fireEvent(new Upload.StartedEvent(this, filename, mimeType, contentLength)); }
3.68
flink_PythonOperatorChainingOptimizer_buildOutputMap
/** * Construct the key-value pairs where the value is the output transformations of the key * transformation. */ private static Map<Transformation<?>, Set<Transformation<?>>> buildOutputMap( List<Transformation<?>> transformations) { final Map<Transformation<?>, Set<Transformation<?>>> outputMap = new HashMap<>(); final Queue<Transformation<?>> toTransformQueue = Queues.newArrayDeque(transformations); final Set<Transformation<?>> alreadyTransformed = Sets.newIdentityHashSet(); while (!toTransformQueue.isEmpty()) { Transformation<?> transformation = toTransformQueue.poll(); if (!alreadyTransformed.contains(transformation)) { alreadyTransformed.add(transformation); for (Transformation<?> input : transformation.getInputs()) { Set<Transformation<?>> outputs = outputMap.computeIfAbsent(input, i -> Sets.newHashSet()); outputs.add(transformation); } toTransformQueue.addAll(transformation.getInputs()); } } return outputMap; }
3.68
hbase_WALPlayer_createSubmittableJob
/** * Sets up the actual job. * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ public Job createSubmittableJob(String[] args) throws IOException { Configuration conf = getConf(); setupTime(conf, WALInputFormat.START_TIME_KEY); setupTime(conf, WALInputFormat.END_TIME_KEY); String inputDirs = args[0]; String[] tables = args.length == 1 ? new String[] {} : args[1].split(","); String[] tableMap; if (args.length > 2) { tableMap = args[2].split(","); if (tableMap.length != tables.length) { throw new IOException("The same number of tables and mapping must be provided."); } } else { // if no mapping is specified, map each table to itself tableMap = tables; } conf.setStrings(TABLES_KEY, tables); conf.setStrings(TABLE_MAP_KEY, tableMap); conf.set(FileInputFormat.INPUT_DIR, inputDirs); Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime())); job.setJarByClass(WALPlayer.class); job.setInputFormatClass(WALInputFormat.class); job.setMapOutputKeyClass(ImmutableBytesWritable.class); String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); if (hfileOutPath != null) { LOG.debug("add incremental job :" + hfileOutPath + " from " + inputDirs); // WALPlayer needs ExtendedCellSerialization so that sequenceId can be propagated when // sorting cells in CellSortReducer job.getConfiguration().setBoolean(HFileOutputFormat2.EXTENDED_CELL_SERIALIZATION_ENABLED_KEY, true); // the bulk HFile case List<TableName> tableNames = getTableNameList(tables); job.setMapperClass(WALKeyValueMapper.class); job.setReducerClass(CellSortReducer.class); Path outputDir = new Path(hfileOutPath); FileOutputFormat.setOutputPath(job, outputDir); job.setMapOutputValueClass(MapReduceExtendedCell.class); try (Connection conn = ConnectionFactory.createConnection(conf);) { List<TableInfo> tableInfoList = new ArrayList<TableInfo>(); for (TableName tableName : tableNames) { Table table = conn.getTable(tableName); RegionLocator regionLocator = conn.getRegionLocator(tableName); tableInfoList.add(new TableInfo(table.getDescriptor(), regionLocator)); } MultiTableHFileOutputFormat.configureIncrementalLoad(job, tableInfoList); } TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), org.apache.hbase.thirdparty.com.google.common.base.Preconditions.class); } else { // output to live cluster job.setMapperClass(WALMapper.class); job.setOutputFormatClass(MultiTableOutputFormat.class); TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.initCredentials(job); // No reducers. job.setNumReduceTasks(0); } String codecCls = WALCellCodec.getWALCellCodecClass(conf).getName(); try { TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Class.forName(codecCls)); } catch (Exception e) { throw new IOException("Cannot determine wal codec class " + codecCls, e); } return job; }
3.68
framework_LayoutManager_reportOuterWidth
/** * Registers the outer width (including margins, borders and paddings) of a * component. This can be used as an optimization by ManagedLayouts; by * informing the LayoutManager about what size a component will have, the * layout propagation can continue directly without first measuring the * potentially resized elements. * * @param component * the component for which the size is reported * @param outerWidth * the new outer width (including margins, borders and paddings) * of the component in pixels */ public void reportOuterWidth(ComponentConnector component, int outerWidth) { Element element = component.getWidget().getElement(); MeasuredSize measuredSize = getMeasuredSize(element); if (isLayoutRunning()) { boolean widthChanged = measuredSize.setOuterWidth(outerWidth); if (widthChanged) { onConnectorChange(component, true, false); notifyListenersAndDepdendents(element, true, false); } currentDependencyTree.setNeedsHorizontalMeasure(component, false); } else if (measuredSize.getOuterWidth() != outerWidth) { setNeedsMeasure(component); } }
3.68
hadoop_LocalityMulticastAMRMProxyPolicy_getLocalityBasedWeighting
/** * Compute the weight to assign to a subcluster based on how many local * requests a subcluster is target of. */ private float getLocalityBasedWeighting(long reqId, SubClusterId targetId, AllocationBookkeeper allocationBookkeeper) { float totWeight = allocationBookkeeper.getTotNumLocalizedContainers(reqId); float localWeight = allocationBookkeeper.getNumLocalizedContainers(reqId, targetId); return totWeight > 0 ? localWeight / totWeight : 0; }
3.68
morf_NamedParameterPreparedStatement_clearBatch
/** * @see PreparedStatement#clearBatch() * @exception SQLException if a database access error occurs, * this method is called on a closed <code>Statement</code> or the * driver does not support batch updates */ public void clearBatch() throws SQLException { statement.clearBatch(); }
3.68
druid_MySqlStatementParser_parseBlock
/** * parse loop statement with label */ public SQLBlockStatement parseBlock(String label) { SQLBlockStatement block = new SQLBlockStatement(); block.setLabelName(label); accept(Token.BEGIN); this.parseStatementList(block.getStatementList(), -1, block); accept(Token.END); acceptIdentifier(label); return block; }
3.68
framework_LayoutDependencyTree_markHeightAsChanged
/** * Marks the component's height as changed. Iterates through all components * whose vertical size depends on this component's size. If the dependent is * a managed layout triggers need for vertical layouting, otherwise triggers * need for vertical measuring for any dependent components of that * component in turn. Finally triggers horizontal measuring for the * scrolling boundary, in case vertical scrollbar has appeared or * disappeared due the height change. * * @param connector * the connector of the component whose height has changed, * should not be {@code null} */ public void markHeightAsChanged(ComponentConnector connector) { LayoutDependency dependency = getDependency(connector.getConnectorId(), VERTICAL); dependency.markSizeAsChanged(); }
3.68
hadoop_ServerWebApp_getDir
/** * Convenience method that looks for Java System property defining a * diretory and if not present defaults to the specified directory. * * @param name server name, used as prefix of the Java System property. * @param dirType dir type, use as postfix of the Java System property. * @param defaultDir the default directory to return if the Java System * property <code>name + dirType</code> is not defined. * * @return the directory defined in the Java System property or the * the default directory if the Java System property is not defined. */ static String getDir(String name, String dirType, String defaultDir) { String sysProp = name + dirType; return System.getProperty(sysProp, defaultDir); }
3.68
druid_SQLUtils_sort
/** * 重新排序建表语句,解决建表语句的依赖关系 * * @param sql * @param dbType */ public static String sort(String sql, DbType dbType) { List stmtList = SQLUtils.parseStatements(sql, DbType.oracle); SQLCreateTableStatement.sort(stmtList); return SQLUtils.toSQLString(stmtList, dbType); }
3.68
hudi_InternalBloomFilter_add
/** * Adds a key to <i>this</i> filter. * * @param key The key to add. */ @Override public void add(Key key) { if (key == null) { throw new NullPointerException("key cannot be null"); } int[] h = hash.hash(key); hash.clear(); for (int i = 0; i < nbHash; i++) { bits.set(h[i]); } }
3.68
flink_SplitAssignmentTracker_onCheckpointComplete
/** * when a checkpoint has been successfully made, this method is invoked to clean up the * assignment history before this successful checkpoint. * * @param checkpointId the id of the successful checkpoint. */ public void onCheckpointComplete(long checkpointId) { assignmentsByCheckpointId.entrySet().removeIf(entry -> entry.getKey() <= checkpointId); }
3.68
hbase_StorageClusterVersionModel_toString
/* * (non-Javadoc) * @see java.lang.Object#toString() */ @Override public String toString() { return version; }
3.68
querydsl_BeanMap_firePropertyChange
/** * Called during a successful {@link #put(Object,Object)} operation. * Default implementation does nothing. Override to be notified of * property changes in the bean caused by this map. * * @param key the name of the property that changed * @param oldValue the old value for that property * @param newValue the new value for that property */ protected void firePropertyChange(String key, Object oldValue, Object newValue) { }
3.68
flink_DateTimeUtils_ymd
/** Appends year-month-day to a buffer; assumes they are valid. */ private static StringBuilder ymd(StringBuilder b, int year, int month, int day) { int4(b, year); b.append('-'); int2(b, month); b.append('-'); int2(b, day); return b; }
3.68
hbase_HFileBlockIndex_ensureSingleLevel
/** * @throws IOException if we happened to write a multi-level index. */ public void ensureSingleLevel() throws IOException { if (numLevels > 1) { throw new IOException( "Wrote a " + numLevels + "-level index with " + rootChunk.getNumEntries() + " root-level entries, but " + "this is expected to be a single-level block index."); } }
3.68
flink_WindowMapState_values
/** * Returns all the values in the state. * * @return An iterable view of all the values in the state. * @throws Exception Thrown if the system cannot access the state. */ public Iterable<UV> values(W window) throws Exception { windowState.setCurrentNamespace(window); return windowState.values(); }
3.68
morf_SqlDialect_indexDropStatements
/** * Generate the SQL to drop an index from a table. * * @param table The table to drop the index from. * @param indexToBeRemoved The index to be dropped. * @return The SQL to drop the specified index. */ public Collection<String> indexDropStatements(@SuppressWarnings("unused") Table table, Index indexToBeRemoved) { return ImmutableList.of("DROP INDEX " + indexToBeRemoved.getName()); }
3.68
pulsar_MultiRolesTokenAuthorizationProvider_canConsumeAsync
/** * Check if the specified role has permission to receive messages from the specified fully qualified topic * name. * * @param topicName the fully qualified topic name associated with the topic. * @param role the app id used to receive messages from the topic. * @param subscription the subscription name defined by the client */ @Override public CompletableFuture<Boolean> canConsumeAsync(TopicName topicName, String role, AuthenticationDataSource authenticationData, String subscription) { return authorize(role, authenticationData, r -> super.canConsumeAsync(topicName, r, authenticationData, subscription)); }
3.68
framework_VLayoutSlot_positionVertically
/** * Position the slot vertically and set the height and the bottom margin. * * @param currentLocation * the top position for this slot * @param allocatedSpace * how much vertical space is available for this slot * @param marginBottom * the bottom margin this slot should have (removed if negative) */ public void positionVertically(double currentLocation, double allocatedSpace, double marginBottom) { Style style = wrapper.getStyle(); double contentHeight = allocatedSpace; int captionHeight; VCaption caption = getCaption(); Style captionStyle = caption == null ? null : caption.getElement().getStyle(); if (caption == null || caption.shouldBePlacedAfterComponent()) { style.clearPaddingTop(); captionHeight = 0; } else { captionHeight = getCaptionHeight(); contentHeight -= captionHeight; if (contentHeight < 0) { contentHeight = 0; } style.setPaddingTop(captionHeight, Unit.PX); } if (marginBottom > 0) { style.setMarginBottom(marginBottom, Unit.PX); } else { style.clearMarginBottom(); } style.setHeight(contentHeight, Unit.PX); double allocatedContentHeight = 0; if (isRelativeHeight()) { String height = getWidget().getElement().getStyle().getHeight(); double percentage = parsePercent(height); allocatedContentHeight = contentHeight * (percentage / 100); reportActualRelativeHeight( Math.round((float) allocatedContentHeight)); } style.setTop(currentLocation, Unit.PX); double padding = 0; AlignmentInfo alignment = getAlignment(); if (!alignment.isTop()) { double usedHeight; if (isRelativeHeight()) { usedHeight = captionHeight + allocatedContentHeight; } else { usedHeight = getUsedHeight(); } if (alignment.isVerticalCenter()) { padding = (allocatedSpace - usedHeight) / 2d; } else { padding = (allocatedSpace - usedHeight); } padding += captionHeight; widget.getElement().getStyle().setTop(padding, Unit.PX); if (captionStyle != null) { captionStyle.setTop(padding - captionHeight, Unit.PX); } } else { // Reset top when changing back to align top widget.getElement().getStyle().clearTop(); if (captionStyle != null) { captionStyle.setTop(0, Unit.PX); } } }
3.68
dubbo_LoggerFactory_getCurrentAdapter
/** * Get the current adapter name * * @return current adapter name */ public static String getCurrentAdapter() { Map<Class<? extends LoggerAdapter>, String> candidates = new HashMap<>(); candidates.put(Log4jLoggerAdapter.class, "log4j"); candidates.put(Slf4jLoggerAdapter.class, "slf4j"); candidates.put(Log4j2LoggerAdapter.class, "log4j2"); candidates.put(JclLoggerAdapter.class, "jcl"); candidates.put(JdkLoggerAdapter.class, "jdk"); String name = candidates.get(loggerAdapter.getClass()); if (name == null) { name = loggerAdapter.getClass().getSimpleName(); } return name; }
3.68
hadoop_RouterFedBalance_setTrashOpt
/** * Specify the trash behaviour of the source path. * @param value the trash option. */ public Builder setTrashOpt(TrashOption value) { this.trashOpt = value; return this; }
3.68
hbase_BinaryPrefixComparator_toByteArray
/** Returns The comparator serialized using pb */ @Override public byte[] toByteArray() { ComparatorProtos.BinaryPrefixComparator.Builder builder = ComparatorProtos.BinaryPrefixComparator.newBuilder(); builder.setComparable(ProtobufUtil.toByteArrayComparable(this.value)); return builder.build().toByteArray(); }
3.68
flink_ExtractionUtils_toClass
/** Converts a {@link Type} to {@link Class} if possible, {@code null} otherwise. */ static @Nullable Class<?> toClass(Type type) { if (type instanceof Class) { return (Class<?>) type; } else if (type instanceof ParameterizedType) { // this is always a class return (Class<?>) ((ParameterizedType) type).getRawType(); } // unsupported: generic arrays, type variables, wildcard types return null; }
3.68
framework_StringToDateConverter_getFormat
/** * Returns the format used by * {@link #convertToPresentation(Date, Class,Locale)} and * {@link #convertToModel(String, Class, Locale)}. * * @param locale * The locale to use * @return A DateFormat instance */ protected DateFormat getFormat(Locale locale) { if (locale == null) { locale = Locale.getDefault(); } DateFormat f = DateFormat.getDateTimeInstance(DateFormat.MEDIUM, DateFormat.MEDIUM, locale); f.setLenient(false); return f; }
3.68
streampipes_InfluxClientProvider_getInfluxDBClient
/** * Create a new InfluxDB client from provided settings * * @param settings Connection settings * @return InfluxDB */ public static InfluxDB getInfluxDBClient(InfluxConnectionSettings settings) { if (settings.getAuthMode() == InfluxAuthMode.TOKEN) { var okHttpClientBuilder = InfluxClientUtils.getHttpClientBuilder(settings.getToken()); return InfluxDBFactory.connect(settings.getConnectionUrl(), okHttpClientBuilder); } else { var okHttpClientBuilder = InfluxClientUtils.getHttpClientBuilder(); return InfluxDBFactory.connect( settings.getConnectionUrl(), settings.getUsername(), settings.getPassword(), okHttpClientBuilder ); } }
3.68
hadoop_Quota_getValidQuotaLocations
/** * Get valid quota remote locations used in {@link #getQuotaUsage(String)}. * Differentiate the method {@link #getQuotaRemoteLocations(String)}, this * method will do some additional filtering. * @param path Federation path. * @return List of valid quota remote locations. * @throws IOException */ private List<RemoteLocation> getValidQuotaLocations(String path) throws IOException { final List<RemoteLocation> locations = getQuotaRemoteLocations(path); // NameService -> Locations ListMultimap<String, RemoteLocation> validLocations = ArrayListMultimap.create(); for (RemoteLocation loc : locations) { final String nsId = loc.getNameserviceId(); final Collection<RemoteLocation> dests = validLocations.get(nsId); // Ensure the paths in the same nameservice is different. // Do not include parent-child paths. boolean isChildPath = false; for (RemoteLocation d : dests) { if (isParentEntry(loc.getDest(), d.getDest())) { isChildPath = true; break; } } if (!isChildPath) { validLocations.put(nsId, loc); } } return Collections .unmodifiableList(new ArrayList<>(validLocations.values())); }
3.68
hbase_RegionCoprocessorHost_postScannerClose
/** * @exception IOException Exception */ public void postScannerClose(final InternalScanner s) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new RegionObserverOperationWithoutResult() { @Override public void call(RegionObserver observer) throws IOException { observer.postScannerClose(this, s); } }); }
3.68
morf_OracleMetaDataProvider_isEmptyDatabase
/** * Reading all the table metadata is slow on Oracle, so we can optimise the empty * database check by just seeing if there are any tables. * * @see org.alfasoftware.morf.jdbc.DatabaseMetaDataProvider#isEmptyDatabase() */ @Override public boolean isEmptyDatabase() { return keyMap().isEmpty(); }
3.68
hadoop_Chain_nextKeyValue
/** * Advance to the next key, value pair, returning null if at end. * * @return the key object that was read into, or null if no more */ public boolean nextKeyValue() throws IOException, InterruptedException { if (inputQueue != null) { return readFromQueue(); } else if (inputContext.nextKeyValue()) { this.key = inputContext.getCurrentKey(); this.value = inputContext.getCurrentValue(); return true; } else { return false; } }
3.68
druid_IPAddress_parseIPAddress
/** * Convert a decimal-dotted notation representation of an IP address into an 32 bits interger value. * * @param ipAddressStr Decimal-dotted notation (xxx.xxx.xxx.xxx) of the IP address. * @return Return the 32 bits integer representation of the IP address. * @throws InvalidIPAddressException Throws this exception if the specified IP address is not compliant to the * decimal-dotted notation xxx.xxx.xxx.xxx. */ final int parseIPAddress(String ipAddressStr) { int result = 0; if (ipAddressStr == null) { throw new IllegalArgumentException(); } try { String tmp = ipAddressStr; // get the 3 first numbers int offset = 0; for (int i = 0; i < 3; i++) { // get the position of the first dot int index = tmp.indexOf('.'); // if there is not a dot then the ip string representation is // not compliant to the decimal-dotted notation. if (index != -1) { // get the number before the dot and convert it into // an integer. String numberStr = tmp.substring(0, index); int number = Integer.parseInt(numberStr); if ((number < 0) || (number > 255)) { throw new IllegalArgumentException("Invalid IP Address [" + ipAddressStr + "]"); } result += number << offset; offset += 8; tmp = tmp.substring(index + 1); } else { throw new IllegalArgumentException("Invalid IP Address [" + ipAddressStr + "]"); } } // the remaining part of the string should be the last number. if (tmp.length() > 0) { int number = Integer.parseInt(tmp); if ((number < 0) || (number > 255)) { throw new IllegalArgumentException("Invalid IP Address [" + ipAddressStr + "]"); } result += number << offset; ipAddress = result; } else { throw new IllegalArgumentException("Invalid IP Address [" + ipAddressStr + "]"); } } catch (NoSuchElementException ex) { throw new IllegalArgumentException("Invalid IP Address [" + ipAddressStr + "]", ex); } catch (NumberFormatException ex) { throw new IllegalArgumentException("Invalid IP Address [" + ipAddressStr + "]", ex); } return result; }
3.68
dubbo_CuratorFrameworkParams_getParameterValue
/** * Get the parameter value from the specified {@link URL} * * @param url the Dubbo registry {@link URL} * @param <T> the type of value * @return the parameter value if present, or return <code>null</code> */ public <T> T getParameterValue(URL url) { String param = url.getParameter(name); Object value = param != null ? converter.apply(param) : defaultValue; return (T) value; }
3.68
framework_SharedUtil_equals
/** * Checks if a and b are equals using {@link #equals(Object)}. Handles null * values as well. Does not ensure that objects are of the same type. * Assumes that the first object's equals method handle equals properly. * * @param o1 * The first value to compare * @param o2 * The second value to compare * @return true if the objects are equal, false otherwise */ public static boolean equals(Object o1, Object o2) { if (o1 == null) { return o2 == null; } return o1.equals(o2); }
3.68
hadoop_MetricsCache_getMetric
/** * Lookup a metric value * @param key name of the metric * @return the metric value */ public Number getMetric(String key) { AbstractMetric metric = metrics.get(key); return metric != null ? metric.value() : null; }
3.68
hmily_HmilyUndoContextCacheManager_set
/** * Set undo context. * * @param undoContext hmily undo context */ public void set(final HmilyUndoContext undoContext) { CURRENT_LOCAL.get().add(undoContext); }
3.68
morf_SelectStatementBuilder_withCustomHint
/** * Supplies a specified custom hint to the database for a query. * * @param customHint representation of a custom hint * @return this, for method chaining. * @deprecated See {@link org.alfasoftware.morf.sql.CustomHint} */ @Deprecated public org.alfasoftware.morf.sql.SelectStatementBuilder withCustomHint(CustomHint customHint) { this.hints.add(customHint); return this; }
3.68
hadoop_LocalTempDir_tempFile
/** * Create a temp file. * @param conf configuration to use when creating the allocator * @param prefix filename prefix * @param size file size, or -1 if not known * @return the temp file. The file has been created. * @throws IOException IO failure */ public static File tempFile(Configuration conf, String prefix, long size) throws IOException { return getAllocator(conf, BUFFER_DIR).createTmpFileForWrite( prefix, size, conf); }
3.68
hbase_QuotaCache_getQuotaUserName
/** * Applies a request attribute user override if available, otherwise returns the UGI's short * username * @param ugi The request's UserGroupInformation */ private String getQuotaUserName(final UserGroupInformation ugi) { if (userOverrideRequestAttributeKey == null) { return ugi.getShortUserName(); } Optional<RpcCall> rpcCall = RpcServer.getCurrentCall(); if (!rpcCall.isPresent()) { return ugi.getShortUserName(); } byte[] override = rpcCall.get().getRequestAttribute(userOverrideRequestAttributeKey); if (override == null) { return ugi.getShortUserName(); } return Bytes.toString(override); }
3.68
flink_GlobalProperties_reset
/** This method resets the properties to a state where no properties are given. */ public void reset() { this.partitioning = PartitioningProperty.RANDOM_PARTITIONED; this.ordering = null; this.partitioningFields = null; }
3.68
hadoop_AuditingIntegration_exitStage
/** * Remove stage from common audit context. */ public static void exitStage() { currentAuditContext().remove(CONTEXT_ATTR_STAGE); }
3.68
morf_SchemaChangeSequence_renameIndex
/** * @see org.alfasoftware.morf.upgrade.SchemaEditor#renameIndex(java.lang.String, java.lang.String, java.lang.String) */ @Override public void renameIndex(String tableName, String fromIndexName, String toIndexName) { RenameIndex removeIndex = new RenameIndex(tableName, fromIndexName, toIndexName); visitor.visit(removeIndex); schemaAndDataChangeVisitor.visit(removeIndex); }
3.68
flink_CompletedOperationCache_accessOperationResultOrError
/** * Returns the {@link OperationResult} of the asynchronous operation. If the operation is * finished, marks the result as accessed. */ public OperationResult<R> accessOperationResultOrError() { if (operationResult.isFinished()) { markAccessed(); } return operationResult; }
3.68
flink_Tuple19_toString
/** * Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8, * f9, f10, f11, f12, f13, f14, f15, f16, f17, f18), where the individual fields are the value * returned by calling {@link Object#toString} on that field. * * @return The string representation of the tuple. */ @Override public String toString() { return "(" + StringUtils.arrayAwareToString(this.f0) + "," + StringUtils.arrayAwareToString(this.f1) + "," + StringUtils.arrayAwareToString(this.f2) + "," + StringUtils.arrayAwareToString(this.f3) + "," + StringUtils.arrayAwareToString(this.f4) + "," + StringUtils.arrayAwareToString(this.f5) + "," + StringUtils.arrayAwareToString(this.f6) + "," + StringUtils.arrayAwareToString(this.f7) + "," + StringUtils.arrayAwareToString(this.f8) + "," + StringUtils.arrayAwareToString(this.f9) + "," + StringUtils.arrayAwareToString(this.f10) + "," + StringUtils.arrayAwareToString(this.f11) + "," + StringUtils.arrayAwareToString(this.f12) + "," + StringUtils.arrayAwareToString(this.f13) + "," + StringUtils.arrayAwareToString(this.f14) + "," + StringUtils.arrayAwareToString(this.f15) + "," + StringUtils.arrayAwareToString(this.f16) + "," + StringUtils.arrayAwareToString(this.f17) + "," + StringUtils.arrayAwareToString(this.f18) + ")"; }
3.68
hbase_Encryption_hash256
/** * Return the SHA-256 digest of the concatenation of the supplied arguments. */ public static byte[] hash256(byte[]... args) { return hashWithAlg("SHA-256", args); }
3.68
hbase_LruCachedBlockQueue_pollLast
/** Returns The last element in this queue, or {@code null} if the queue is empty. */ public LruCachedBlock pollLast() { return queue.pollLast(); }
3.68