name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
dubbo_ServiceConfigBase_register
/** * Register delay published service to registry. */ public final void register() { register(false); }
3.68
hadoop_TaskAttemptContextImpl_setStatus
/** * Set the current status of the task to the given string. */ @Override public void setStatus(String status) { String normalizedStatus = Task.normalizeStatus(status, conf); setStatusString(normalizedStatus); reporter.setStatus(normalizedStatus); }
3.68
hadoop_S3AMultipartUploader_abortUploadsUnderPath
/** * Upload all MPUs under the path. * @param path path to abort uploads under. * @return a future which eventually returns the number of entries found * @throws IOException submission failure */ @Override public CompletableFuture<Integer> abortUploadsUnderPath(final Path path) throws IOException { statistics.abortUploadsUnderPathInvoked(); return context.submit(new CompletableFuture<>(), () -> writeOperations.abortMultipartUploadsUnderPath( context.pathToKey(path))); }
3.68
framework_TabSheet_setTabCaptionsAsHtml
/** * Sets whether HTML is allowed in the tab captions. * <p> * If set to true, the captions are rendered in the browser as HTML and the * developer is responsible for ensuring no harmful HTML is used. If set to * false, the content is rendered in the browser as plain text. * <p> * The default is false, i.e. render tab captions as plain text * * @param tabCaptionsAsHtml * true if the tab captions are rendered as HTML, false if * rendered as plain text * @since 7.4 */ public void setTabCaptionsAsHtml(boolean tabCaptionsAsHtml) { getState().tabCaptionsAsHtml = tabCaptionsAsHtml; }
3.68
zxing_BitArray_getNextSet
/** * @param from first bit to check * @return index of first bit that is set, starting from the given index, or size if none are set * at or beyond this given index * @see #getNextUnset(int) */ public int getNextSet(int from) { if (from >= size) { return size; } int bitsOffset = from / 32; int currentBits = bits[bitsOffset]; // mask off lesser bits first currentBits &= -(1 << (from & 0x1F)); while (currentBits == 0) { if (++bitsOffset == bits.length) { return size; } currentBits = bits[bitsOffset]; } int result = (bitsOffset * 32) + Integer.numberOfTrailingZeros(currentBits); return Math.min(result, size); }
3.68
hadoop_WeightedPolicyInfo_fromByteBuffer
/** * Deserializes a {@link WeightedPolicyInfo} from a byte UTF-8 JSON * representation. * * @param bb the input byte representation. * * @return the {@link WeightedPolicyInfo} represented. * * @throws FederationPolicyInitializationException if a deserialization error * occurs. */ public static WeightedPolicyInfo fromByteBuffer(ByteBuffer bb) throws FederationPolicyInitializationException { if (jsonjaxbContext == null) { throw new FederationPolicyInitializationException( "JSONJAXBContext should" + " not be null."); } try { JSONUnmarshaller unmarshaller = jsonjaxbContext.createJSONUnmarshaller(); final byte[] bytes = new byte[bb.remaining()]; bb.get(bytes); String params = new String(bytes, StandardCharsets.UTF_8); WeightedPolicyInfo weightedPolicyInfo = unmarshaller.unmarshalFromJSON( new StringReader(params), WeightedPolicyInfo.class); return weightedPolicyInfo; } catch (JAXBException j) { throw new FederationPolicyInitializationException(j); } }
3.68
AreaShop_BuyRegion_getPrice
/** * Get the price of the region. * @return The price of the region */ public double getPrice() { return Math.max(0, Utils.evaluateToDouble(getStringSetting("buy.price"), this)); }
3.68
AreaShop_AddedFriendEvent_getBy
/** * Get the CommandSender that is adding the friend. * @return null if none, a CommandSender if done by someone (likely Player or ConsoleCommandSender) */ public CommandSender getBy() { return by; }
3.68
flink_TableColumn_metadata
/** * Creates a metadata column from metadata of the given column name or from metadata of the * given alias (if not null). * * <p>Allows to specify whether the column is virtual or not. */ public static MetadataColumn metadata( String name, DataType type, @Nullable String metadataAlias, boolean isVirtual) { Preconditions.checkNotNull(name, "Column name can not be null."); Preconditions.checkNotNull(type, "Column type can not be null."); return new MetadataColumn(name, type, metadataAlias, isVirtual); }
3.68
hbase_MasterObserver_postRollBackSplitRegionAction
/** * This will be called after the roll back of the split region is completed * @param ctx the environment to interact with the framework and master */ default void postRollBackSplitRegionAction( final ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException { }
3.68
querydsl_SQLExpressions_cumeDist
/** * As an aggregate function, CUME_DIST calculates, for a hypothetical row r identified by the * arguments of the function and a corresponding sort specification, the relative position of row * r among the rows in the aggregation group. Oracle makes this calculation as if the hypothetical * row r were inserted into the group of rows to be aggregated over. The arguments of the function * identify a single hypothetical row within each aggregate group. Therefore, they must all * evaluate to constant expressions within each aggregate group. The constant argument expressions * and the expressions in the ORDER BY clause of the aggregate match by position. Therefore, * the number of arguments must be the same and their types must be compatible. * * @param args arguments * @return cume_dist(args) */ public static WithinGroup<Double> cumeDist(Expression<?>... args) { return new WithinGroup<Double>(Double.class, SQLOps.CUMEDIST2, args); }
3.68
hadoop_TypedBytesInput_readRawBytes
/** * Reads the raw bytes following a <code>Type.BYTES</code> code. * @return the obtained bytes sequence * @throws IOException */ public byte[] readRawBytes() throws IOException { return readRawBytes(Type.BYTES.code); }
3.68
zilla_HttpServerFactory_teHeader
// 8.1.2.2 TE header MUST NOT contain any value other than "trailers". private void teHeader( DirectBuffer name, DirectBuffer value) { if (!error() && name.equals(TE) && !value.equals(TRAILERS)) { streamError = Http2ErrorCode.PROTOCOL_ERROR; } }
3.68
framework_SimpleStringFilter_isOnlyMatchPrefix
/** * Returns true if the filter only applies to the beginning of the value * string, false for any location in the value. * * Note: this method is intended only for implementations of lazy string * filters and may change in the future. * * @return true if checking for matches at the beginning of the value only, * false if matching any part of value */ public boolean isOnlyMatchPrefix() { return onlyMatchPrefix; }
3.68
dubbo_RegistrySpecListener_getSize
/** * Get the exposed number of the protocol */ public static int getSize(MetricsEvent event) { return event.getAttachmentValue(ATTACHMENT_KEY_SIZE); }
3.68
hadoop_HdfsFileStatus_makeQualified
/** * Resolve the short name of the Path given the URI, parent provided. This * FileStatus reference will not contain a valid Path until it is resolved * by this method. * @param defaultUri FileSystem to fully qualify HDFS path. * @param parent Parent path of this element. * @return Reference to this instance. */ default FileStatus makeQualified(URI defaultUri, Path parent) { // fully-qualify path setPath(getFullPath(parent).makeQualified(defaultUri, null)); return (FileStatus) this; // API compatibility }
3.68
open-banking-gateway_HbciConsentInfo_isPasswordPresent
/** * Is the PSU password present in the context. */ public boolean isPasswordPresent(HbciContext ctx) { return null != ctx.getPsuPin(); }
3.68
hbase_MutableRegionInfo_setOffline
/** * The parent of a region split is offline while split daughters hold references to the parent. * Offlined regions are closed. * @param offLine Set online/offline status. */ public MutableRegionInfo setOffline(boolean offLine) { this.offLine = offLine; return this; }
3.68
framework_VCustomLayout_extractBodyAndScriptsFromTemplate
/** * Extract body part and script tags from raw html-template. * * Saves contents of all script-tags to private property: scripts. Returns * contents of the body part for the html without script-tags. Also replaces * all _UID_ tags with an unique id-string. * * @param html * Original HTML-template received from server * @return html that is used to create the HTMLPanel. */ private String extractBodyAndScriptsFromTemplate(String html) { // Replace UID:s html = html.replaceAll("_UID_", pid + "__"); // Exctract script-tags scripts = ""; int endOfPrevScript = 0; int nextPosToCheck = 0; String lc = html.toLowerCase(Locale.ROOT); String res = ""; int scriptStart = lc.indexOf("<script", nextPosToCheck); while (scriptStart > 0) { res += html.substring(endOfPrevScript, scriptStart); scriptStart = lc.indexOf(">", scriptStart); final int j = lc.indexOf("</script>", scriptStart); scripts += html.substring(scriptStart + 1, j) + ";"; endOfPrevScript = j + "</script>".length(); nextPosToCheck = endOfPrevScript; scriptStart = lc.indexOf("<script", nextPosToCheck); } res += html.substring(endOfPrevScript); // Extract body html = res; lc = html.toLowerCase(Locale.ROOT); int startOfBody = lc.indexOf("<body"); if (startOfBody < 0) { res = html; } else { res = ""; startOfBody = lc.indexOf(">", startOfBody) + 1; final int endOfBody = lc.indexOf("</body>", startOfBody); if (endOfBody > startOfBody) { res = html.substring(startOfBody, endOfBody); } else { res = html.substring(startOfBody); } } return res; }
3.68
hadoop_OracleDataDrivenDBInputFormat_getSplitter
/** * @return the DBSplitter implementation to use to divide the table/query into InputSplits. */ @Override protected DBSplitter getSplitter(int sqlDataType) { switch (sqlDataType) { case Types.DATE: case Types.TIME: case Types.TIMESTAMP: return new OracleDateSplitter(); default: return super.getSplitter(sqlDataType); } }
3.68
framework_AbstractOrderedLayoutConnector_onUnregister
/* * (non-Javadoc) * * @see com.vaadin.client.ui.AbstractComponentConnector#onUnregister() */ @Override public void onUnregister() { // Cleanup all ElementResizeListeners for (ComponentConnector child : getChildComponents()) { Slot slot = getWidget().getSlot(child.getWidget()); if (slot.hasCaption()) { slot.setCaptionResizeListener(null); } if (slot.getSpacingElement() != null) { slot.setSpacingResizeListener(null); } slot.setWidgetResizeListener(null); } super.onUnregister(); }
3.68
pulsar_AuthorizationService_canConsumeAsync
/** * Check if the specified role has permission to receive messages from the specified fully qualified topic name. * * @param topicName * the fully qualified topic name associated with the topic. * @param role * the app id used to receive messages from the topic. * @param subscription * the subscription name defined by the client */ public CompletableFuture<Boolean> canConsumeAsync(TopicName topicName, String role, AuthenticationDataSource authenticationData, String subscription) { if (!this.conf.isAuthorizationEnabled()) { return CompletableFuture.completedFuture(true); } return provider.isSuperUser(role, authenticationData, conf).thenComposeAsync(isSuperUser -> { if (isSuperUser) { return CompletableFuture.completedFuture(true); } else { return provider.canConsumeAsync(topicName, role, authenticationData, subscription); } }); }
3.68
morf_Criterion_greaterThan
/** * Helper method to create a new "GREATER THAN" expression. * * <blockquote><pre> * Criterion.greaterThan(new Field("startdate"), 20091001);</pre></blockquote> * * @param field the field to evaluate in the expression (the left hand side of the expression) * @param value the value to evaluate in the expression (the right hand side) * @return a new Criterion object */ public static Criterion greaterThan(AliasedField field, Object value) { return new Criterion(Operator.GT, field, value); }
3.68
hbase_LockStatus_isLocked
/** * Return whether this lock has already been held, * <p/> * Notice that, holding the exclusive lock or shared lock are both considered as locked, i.e, this * method usually equals to {@code hasExclusiveLock() || getSharedLockCount() > 0}. */ default boolean isLocked() { return hasExclusiveLock() || getSharedLockCount() > 0; }
3.68
hadoop_AuditSpan_isValidSpan
/** * Is the span valid? False == this is a span to indicate unbonded. * @return true if this span represents a real operation. */ default boolean isValidSpan() { return true; }
3.68
flink_NetUtils_isValidClientPort
/** * Check whether the given port is in right range when connecting to somewhere. * * @param port the port to check * @return true if the number in the range 1 to 65535 */ public static boolean isValidClientPort(int port) { return 1 <= port && port <= 65535; }
3.68
morf_AbstractSqlDialectTest_testEmptyStringLiteralIsNull
/** * Test that an empty string literal is converted to {@code NULL} * on all database platforms, following the WEB-9161 harmonisation * of empty-string/null handling across vendors. * * @see #testInsertWithNullDefaults() * @see #testUpdateWithNull() */ @Test public void testEmptyStringLiteralIsNull() { UpdateStatement updateStmt = new UpdateStatement(new TableReference(TEST_TABLE)).set(new FieldLiteral("").as(STRING_FIELD)); assertEquals("Update with literal value", "UPDATE " + tableName(TEST_TABLE) + " SET stringField = NULL", testDialect.convertStatementToSQL(updateStmt)); InsertStatement insertStmt = new InsertStatement().into(new TableReference(TEST_TABLE)).values(new FieldLiteral("").as(STRING_FIELD)); List<String> sql = testDialect.convertStatementToSQL(insertStmt, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE)); assertEquals("Insert with literal null", expectedEmptyStringInsertStatement(), sql.get(sql.size() - 1)); }
3.68
framework_ConnectorMap_isDragAndDropPaintable
/** * FIXME: What does this even do and why? * * @param pid * @return */ public boolean isDragAndDropPaintable(String pid) { return (pid.startsWith("DD")); }
3.68
flink_BoundedFIFOQueue_size
/** * Returns the number of currently stored elements. * * @return The number of currently stored elements. */ public int size() { return this.elements.size(); }
3.68
morf_AbstractSqlDialectTest_testDeleteWithWhereCriterion
/** * Tests that a delete string with a where criterion is created correctly. */ @Test public void testDeleteWithWhereCriterion() { DeleteStatement stmt = new DeleteStatement(new TableReference(TEST_TABLE)).where(eq(new FieldReference(new TableReference(TEST_TABLE), STRING_FIELD), "A001003657")); String value = varCharCast("'A001003657'"); String expectedSql = "DELETE FROM " + tableName(TEST_TABLE) + " WHERE (Test.stringField = " + stringLiteralPrefix() + value + ")"; assertEquals("Simple delete", expectedSql, testDialect.convertStatementToSQL(stmt)); }
3.68
flink_TableConfigValidation_validateTimeZone
/** Validates user configured time zone. */ public static void validateTimeZone(String zone) { boolean isValid; try { // We enforce a zone string that is compatible with both java.util.TimeZone and // java.time.ZoneId to avoid bugs. // In general, advertising either TZDB ID, GMT+xx:xx, or UTC is the best we can do. isValid = java.util.TimeZone.getTimeZone(zone).toZoneId().equals(ZoneId.of(zone)); } catch (Exception e) { isValid = false; } if (!isValid) { throw new ValidationException( "Invalid time zone. The value should be a Time Zone Database (TZDB) ID " + "such as 'America/Los_Angeles' to include daylight saving time. Fixed " + "offsets are supported using 'GMT-03:00' or 'GMT+03:00'. Or use 'UTC' " + "without time zone and daylight saving time."); } }
3.68
framework_BootstrapHandler_getBootstrapResponse
/** * Gets the bootstrap response object. * * @return the bootstrap response object */ public BootstrapFragmentResponse getBootstrapResponse() { return bootstrapResponse; }
3.68
hbase_Bytes_toDouble
/** Return double made from passed bytes. */ public static double toDouble(final byte[] bytes, final int offset) { return Double.longBitsToDouble(toLong(bytes, offset, SIZEOF_LONG)); }
3.68
shardingsphere-elasticjob_RDBJobEventRepository_addJobStatusTraceEvent
/** * Add job status trace event. * * @param event job status trace event * @return add success or not */ public boolean addJobStatusTraceEvent(final JobStatusTraceEvent event) { String originalTaskId = event.getOriginalTaskId(); if (State.TASK_STAGING != event.getState()) { originalTaskId = getOriginalTaskId(event.getTaskId()); } boolean result = false; try ( Connection connection = dataSource.getConnection(); PreparedStatement preparedStatement = connection.prepareStatement(sqlMapper.getInsertForJobStatusTraceLog())) { preparedStatement.setString(1, UUID.randomUUID().toString()); preparedStatement.setString(2, event.getJobName()); preparedStatement.setString(3, originalTaskId); preparedStatement.setString(4, event.getTaskId()); preparedStatement.setString(5, event.getSlaveId()); preparedStatement.setString(6, event.getExecutionType().name()); preparedStatement.setString(7, event.getShardingItems()); preparedStatement.setString(8, event.getState().toString()); preparedStatement.setString(9, truncateString(event.getMessage())); preparedStatement.setTimestamp(10, new Timestamp(event.getCreationTime().getTime())); preparedStatement.execute(); result = true; } catch (final SQLException ex) { // TODO log failure directly to output log, consider to be configurable in the future log.error(ex.getMessage()); } return result; }
3.68
dubbo_RpcContextAttachment_set
/** * set value. * * @param key * @param value * @return context */ @Override @Deprecated public RpcContextAttachment set(String key, Object value) { return setAttachment(key, value); }
3.68
framework_WrappedPortletSession_getPortletSession
/** * Gets the wrapped {@link PortletSession}. * * @return the wrapped portlet session */ public PortletSession getPortletSession() { return session; }
3.68
framework_SettingsView_processPendingView
// if there is a pending view change, do it now private void processPendingView() { if (pendingViewAndParameters != null) { navigator.navigateTo(pendingViewAndParameters); pendingViewAndParameters = null; } }
3.68
hbase_TagUtil_fromList
/** * Write a list of tags into a byte array Note : these are all purely internal APIs. It helps in * cases where we have set of tags and we would want to create a cell out of it. Say in Mobs we * create a reference tags to indicate the presence of mob data. Also note that these are not * exposed to CPs also * @param tags The list of tags * @return the serialized tag data as bytes */ public static byte[] fromList(List<Tag> tags) { if (tags == null || tags.isEmpty()) { return HConstants.EMPTY_BYTE_ARRAY; } int length = 0; for (Tag tag : tags) { length += tag.getValueLength() + Tag.INFRASTRUCTURE_SIZE; } byte[] b = new byte[length]; int pos = 0; int tlen; for (Tag tag : tags) { tlen = tag.getValueLength(); pos = Bytes.putAsShort(b, pos, tlen + Tag.TYPE_LENGTH_SIZE); pos = Bytes.putByte(b, pos, tag.getType()); if (tag.hasArray()) { pos = Bytes.putBytes(b, pos, tag.getValueArray(), tag.getValueOffset(), tlen); } else { ByteBufferUtils.copyFromBufferToArray(b, tag.getValueByteBuffer(), tag.getValueOffset(), pos, tlen); pos += tlen; } } return b; }
3.68
flink_CsvReader_setCharset
/** * Sets the charset of the reader. * * @param charset The character set to set. */ @PublicEvolving public void setCharset(String charset) { this.charset = Preconditions.checkNotNull(charset); }
3.68
morf_ResolvedTables_portableSqlStatementUsed
/** * Store information about usage of {@link PortableSqlStatement}. */ public void portableSqlStatementUsed() { portableSqlStatementUsed = true; }
3.68
hadoop_FederationStateStoreFacade_existsReservationHomeSubCluster
/** * Exists ReservationHomeSubCluster Mapping. * * @param reservationId reservationId * @return true - exist, false - not exist */ public boolean existsReservationHomeSubCluster(ReservationId reservationId) { try { SubClusterId subClusterId = getReservationHomeSubCluster(reservationId); if (subClusterId != null) { return true; } } catch (YarnException e) { LOG.debug("get homeSubCluster by reservationId = {} error.", reservationId, e); } return false; }
3.68
pulsar_ClearTextSecretsProvider_provideSecret
/** * Fetches a secret. * * @return The actual secret */ @Override public String provideSecret(String secretName, Object pathToSecret) { if (pathToSecret != null) { return pathToSecret.toString(); } else { return null; } }
3.68
framework_VAbstractPopupCalendar_onClose
/* * (non-Javadoc) * * @see * com.google.gwt.event.logical.shared.CloseHandler#onClose(com.google.gwt * .event.logical.shared.CloseEvent) */ @Override public void onClose(CloseEvent<PopupPanel> event) { if (event.getSource() == popup) { buildDate(); if (!BrowserInfo.get().isTouchDevice() && textFieldEnabled) { /* * Move focus to textbox, unless on touch device (avoids opening * virtual keyboard) or if textField is disabled. */ focus(); } open = false; if (cursorOverCalendarToggleButton && !toggleButtonClosesWithGuarantee) { preventOpenPopupCalendar = true; } toggleButtonClosesWithGuarantee = false; } }
3.68
hadoop_AMRMClientAsyncImpl_releaseAssignedContainer
/** * Release containers assigned by the Resource Manager. If the app cannot use * the container or wants to give up the container then it can release them. * The app needs to make new requests for the released resource capability if * it still needs it. eg. it released non-local resources * @param containerId */ public void releaseAssignedContainer(ContainerId containerId) { client.releaseAssignedContainer(containerId); }
3.68
flink_InputProperty_keepInputAsIsDistribution
/** * A special distribution which indicators the data distribution is the same as its input. * * @param inputDistribution the input distribution * @param strict whether the input distribution is strictly guaranteed */ public static KeepInputAsIsDistribution keepInputAsIsDistribution( RequiredDistribution inputDistribution, boolean strict) { return new KeepInputAsIsDistribution(inputDistribution, strict); }
3.68
flink_UploadThrottle_seizeCapacity
/** * Seize <b>bytes</b> capacity. It is the caller responsibility to ensure at least some capacity * {@link #hasCapacity() is available}. <strong>After</strong> this call, the caller is allowed * to actually use the seized capacity. When the capacity is not needed anymore, the caller is * required to {@link #releaseCapacity(long) release} it. Called by the Task thread. * * @throws IllegalStateException if capacity is unavailable. */ public void seizeCapacity(long bytes) throws IllegalStateException { checkState(hasCapacity()); inFlightBytesCounter += bytes; }
3.68
hadoop_Paths_tempDirForStaging
/** * Try to come up with a good temp directory for different filesystems. * @param fs filesystem * @param conf configuration * @return a qualified path under which temporary work can go. */ public static Path tempDirForStaging(FileSystem fs, Configuration conf) { String fallbackPath = fs.getScheme().equals("file") ? System.getProperty(JAVA_IO_TMPDIR) : FILESYSTEM_TEMP_PATH; return fs.makeQualified(new Path(conf.getTrimmed( FS_S3A_COMMITTER_STAGING_TMP_PATH, fallbackPath))); }
3.68
flink_IterativeDataSet_registerAggregationConvergenceCriterion
/** * Registers an {@link Aggregator} for the iteration together with a {@link * ConvergenceCriterion}. For a general description of aggregators, see {@link * #registerAggregator(String, Aggregator)} and {@link Aggregator}. At the end of each * iteration, the convergence criterion takes the aggregator's global aggregate value and * decided whether the iteration should terminate. A typical use case is to have an aggregator * that sums up the total error of change in an iteration step and have to have a convergence * criterion that signals termination as soon as the aggregate value is below a certain * threshold. * * @param name The name under which the aggregator is registered. * @param aggregator The aggregator class. * @param convergenceCheck The convergence criterion. * @return The IterativeDataSet itself, to allow chaining function calls. */ @PublicEvolving public <X extends Value> IterativeDataSet<T> registerAggregationConvergenceCriterion( String name, Aggregator<X> aggregator, ConvergenceCriterion<X> convergenceCheck) { this.aggregators.registerAggregationConvergenceCriterion( name, aggregator, convergenceCheck); return this; }
3.68
framework_ExpandingContainer_addContainerProperty
/** * @throws UnsupportedOperationException * always */ @Override public boolean addContainerProperty(Object propertyId, Class<?> type, Object defaultValue) { throw new UnsupportedOperationException(); }
3.68
framework_VComboBox_createSuggestionPopup
/** * This method will create the SuggestionPopup used by the VComboBox * instance. It is invoked during the Constructor and should only be * overridden if a custom SuggestionPopup shall be used. The overriding * method cannot use any instance variables. * * @since 7.1.5 * @return SuggestionPopup instance used by this VComboBox */ protected SuggestionPopup createSuggestionPopup() { return new SuggestionPopup(); }
3.68
framework_BeanPropertySet_get
/** * Gets a {@link BeanPropertySet} for the given bean type. * * @param beanType * the bean type to get a property set for, not <code>null</code> * @param checkNestedDefinitions * whether to scan for nested definitions in beanType * @param filterDefinition * filtering conditions for nested properties * @return the bean property set, not <code>null</code> * @since 8.2 */ @SuppressWarnings("unchecked") public static <T> PropertySet<T> get(Class<? extends T> beanType, boolean checkNestedDefinitions, PropertyFilterDefinition filterDefinition) { Objects.requireNonNull(beanType, "Bean type cannot be null"); InstanceKey key = new InstanceKey(beanType, false, filterDefinition.getMaxNestingDepth(), filterDefinition.getIgnorePackageNamesStartingWith()); return (PropertySet<T>) INSTANCES .computeIfAbsent(key, k -> new BeanPropertySet<>(key, checkNestedDefinitions, filterDefinition)) .copy(); }
3.68
flink_AbstractPagedInputView_getHeaderLength
/** @return header length. */ public int getHeaderLength() { return headerLength; }
3.68
flink_KeyGroupPartitioner_reportAllElementKeyGroups
/** This method iterates over the input data and reports the key-group for each element. */ protected void reportAllElementKeyGroups() { Preconditions.checkState(partitioningSource.length >= numberOfElements); for (int i = 0; i < numberOfElements; ++i) { int keyGroup = KeyGroupRangeAssignment.assignToKeyGroup( keyExtractorFunction.extractKeyFromElement(partitioningSource[i]), totalKeyGroups); reportKeyGroupOfElementAtIndex(i, keyGroup); } }
3.68
hadoop_ApplicationMaster_cleanup
/** * @return True iff the application successfully completed */ private boolean cleanup() { // Join all launched threads // needed for when we time out // and we need to release containers for (Thread launchThread : launchThreads) { try { launchThread.join(10000); } catch (InterruptedException e) { LOG.info("Exception thrown in thread join: " + e.getMessage()); e.printStackTrace(); } } // When the application completes, it should stop all running containers LOG.info("Application completed. Stopping running containers"); nmClientAsync.stop(); // When the application completes, it should send a finish application // signal to the RM LOG.info("Application completed. Signalling finish to RM"); FinalApplicationStatus appStatus; String appMessage = null; boolean success; if (numFailedDataNodeContainers.get() == 0 && numCompletedDataNodeContainers.get() == numTotalDataNodes) { appStatus = FinalApplicationStatus.SUCCEEDED; success = true; } else { appStatus = FinalApplicationStatus.FAILED; appMessage = "Diagnostics: total=" + numTotalDataNodeContainers + ", completed=" + numCompletedDataNodeContainers.get() + ", allocated=" + numAllocatedDataNodeContainers.get() + ", failed=" + numFailedDataNodeContainers.get(); success = false; } try { amRMClient.unregisterApplicationMaster(appStatus, appMessage, null); } catch (YarnException|IOException ex) { LOG.error("Failed to unregister application", ex); } amRMClient.stop(); return success; }
3.68
hbase_TableMapReduceUtil_convertStringToScan
/** * Converts the given Base64 string back into a Scan instance. * @param base64 The scan details. * @return The newly created Scan instance. * @throws IOException When reading the scan instance fails. */ public static Scan convertStringToScan(String base64) throws IOException { byte[] decoded = Base64.getDecoder().decode(base64); return ProtobufUtil.toScan(ClientProtos.Scan.parseFrom(decoded)); }
3.68
querydsl_StringExpression_startsWith
/** * Create a {@code this.startsWith(str)} expression * * <p>Return true if this starts with str</p> * * @param str string * @return this.startsWith(str) * @see java.lang.String#startsWith(String) */ public BooleanExpression startsWith(String str) { return startsWith(ConstantImpl.create(str)); }
3.68
hbase_BlockType_getId
/** * Use this instead of {@link #ordinal()}. They work exactly the same, except DATA and * ENCODED_DATA get the same id using this method (overridden for {@link #ENCODED_DATA}). * @return block type id from 0 to the number of block types - 1 */ public int getId() { // Default implementation, can be overridden for individual enum members. return ordinal(); }
3.68
hudi_AvroOrcUtils_readFromVector
/** * Read the Column vector at a given position conforming to a given ORC schema. * * @param type ORC schema of the object to read. * @param colVector The column vector to read. * @param avroSchema Avro schema of the object to read. * Only used to check logical types for timestamp unit conversion. * @param vectorPos The position in the vector where the value to read is stored at. * @return The object being read. */ public static Object readFromVector(TypeDescription type, ColumnVector colVector, Schema avroSchema, int vectorPos) { if (colVector.isRepeating) { vectorPos = 0; } if (colVector.isNull[vectorPos]) { return null; } if (avroSchema.getType().equals(Schema.Type.UNION)) { avroSchema = getActualSchemaType(avroSchema); } LogicalType logicalType = avroSchema != null ? avroSchema.getLogicalType() : null; switch (type.getCategory()) { case BOOLEAN: return ((LongColumnVector) colVector).vector[vectorPos] != 0; case BYTE: return (byte) ((LongColumnVector) colVector).vector[vectorPos]; case SHORT: return (short) ((LongColumnVector) colVector).vector[vectorPos]; case INT: return (int) ((LongColumnVector) colVector).vector[vectorPos]; case LONG: return ((LongColumnVector) colVector).vector[vectorPos]; case FLOAT: return (float) ((DoubleColumnVector) colVector).vector[vectorPos]; case DOUBLE: return ((DoubleColumnVector) colVector).vector[vectorPos]; case VARCHAR: case CHAR: int maxLength = type.getMaxLength(); String result = ((BytesColumnVector) colVector).toString(vectorPos); if (result.length() <= maxLength) { return result; } else { throw new HoodieIOException("CHAR/VARCHAR has length " + result.length() + " greater than Max Length allowed"); } case STRING: String stringType = avroSchema.getProp(GenericData.STRING_PROP); if (stringType == null || !stringType.equals(StringType.String)) { int stringLength = ((BytesColumnVector) colVector).length[vectorPos]; int stringOffset = ((BytesColumnVector) colVector).start[vectorPos]; byte[] stringBytes = new byte[stringLength]; System.arraycopy(((BytesColumnVector) colVector).vector[vectorPos], stringOffset, stringBytes, 0, stringLength); return new Utf8(stringBytes); } else { return ((BytesColumnVector) colVector).toString(vectorPos); } case DATE: // convert to daysSinceEpoch for LogicalType.Date return (int) ((LongColumnVector) colVector).vector[vectorPos]; case TIMESTAMP: // The unit of time in ORC is millis. Convert (time,nanos) to the desired unit per logicalType long time = ((TimestampColumnVector) colVector).time[vectorPos]; int nanos = ((TimestampColumnVector) colVector).nanos[vectorPos]; if (logicalType instanceof LogicalTypes.TimestampMillis) { return time; } else if (logicalType instanceof LogicalTypes.TimestampMicros) { return time * MICROS_PER_MILLI + nanos / NANOS_PER_MICRO; } else { return ((TimestampColumnVector) colVector).getTimestampAsLong(vectorPos); } case BINARY: int binaryLength = ((BytesColumnVector) colVector).length[vectorPos]; int binaryOffset = ((BytesColumnVector) colVector).start[vectorPos]; byte[] binaryBytes = new byte[binaryLength]; System.arraycopy(((BytesColumnVector) colVector).vector[vectorPos], binaryOffset, binaryBytes, 0, binaryLength); // return a ByteBuffer to be consistent with AvroRecordConverter return ByteBuffer.wrap(binaryBytes); case DECIMAL: // HiveDecimal always ignores trailing zeros, thus modifies the scale implicitly, // therefore, the scale must be enforced here. BigDecimal bigDecimal = ((DecimalColumnVector) colVector).vector[vectorPos] .getHiveDecimal().bigDecimalValue() .setScale(((LogicalTypes.Decimal) logicalType).getScale()); Schema.Type baseType = avroSchema.getType(); if (baseType.equals(Schema.Type.FIXED)) { return new Conversions.DecimalConversion().toFixed(bigDecimal, avroSchema, logicalType); } else if (baseType.equals(Schema.Type.BYTES)) { return bigDecimal.unscaledValue().toByteArray(); } else { throw new HoodieIOException(baseType.getName() + "is not a valid type for LogicalTypes.DECIMAL."); } case LIST: ArrayList<Object> list = new ArrayList<>(); ListColumnVector listVector = (ListColumnVector) colVector; int listLength = (int) listVector.lengths[vectorPos]; int listOffset = (int) listVector.offsets[vectorPos]; list.ensureCapacity(listLength); TypeDescription childType = type.getChildren().get(0); for (int i = 0; i < listLength; i++) { list.add(readFromVector(childType, listVector.child, avroSchema.getElementType(), listOffset + i)); } return list; case MAP: Map<String, Object> map = new HashMap<String, Object>(); MapColumnVector mapVector = (MapColumnVector) colVector; int mapLength = (int) mapVector.lengths[vectorPos]; int mapOffset = (int) mapVector.offsets[vectorPos]; // keys are always strings for maps in Avro Schema keySchema = Schema.create(Schema.Type.STRING); for (int i = 0; i < mapLength; i++) { map.put( readFromVector(type.getChildren().get(0), mapVector.keys, keySchema, i + mapOffset).toString(), readFromVector(type.getChildren().get(1), mapVector.values, avroSchema.getValueType(), i + mapOffset)); } return map; case STRUCT: StructColumnVector structVector = (StructColumnVector) colVector; List<TypeDescription> children = type.getChildren(); GenericData.Record record = new GenericData.Record(avroSchema); for (int i = 0; i < children.size(); i++) { record.put(i, readFromVector(children.get(i), structVector.fields[i], avroSchema.getFields().get(i).schema(), vectorPos)); } return record; case UNION: UnionColumnVector unionVector = (UnionColumnVector) colVector; int tag = unionVector.tags[vectorPos]; ColumnVector fieldVector = unionVector.fields[tag]; return readFromVector(type.getChildren().get(tag), fieldVector, avroSchema.getTypes().get(tag), vectorPos); default: throw new HoodieIOException("Unrecognized TypeDescription " + type.toString()); } }
3.68
flink_HyperLogLogPlusPlus_query
/** * Compute the HyperLogLog estimate. * * <p>Variable names in the HLL++ paper match variable names in the code. */ public long query(HllBuffer buffer) { // Compute the inverse of indicator value 'z' and count the number of zeros 'V'. double zInverse = 0.0d; double v = 0.0d; int idx = 0; int wordOffset = 0; while (wordOffset < numWords) { long word = buffer.array[wordOffset]; int i = 0; int shift = 0; while (idx < m && i < REGISTERS_PER_WORD) { long mIdx = (word >>> shift) & REGISTER_WORD_MASK; zInverse += 1.0 / (1 << mIdx); if (mIdx == 0) { v += 1.0d; } shift += REGISTER_SIZE; i += 1; idx += 1; } wordOffset += 1; } // We integrate two steps from the paper: // val Z = 1.0d / zInverse // val E = alphaM2 * Z double e = alphaM2 / zInverse; double eBiasCorrected = p < 19 && e < 5.0d * m ? e - estimateBias(e) : e; double estimate; // Estimate the cardinality. if (v > 0) { // Use linear counting for small cardinality estimates. double h = m * Math.log(m / v); // HLL++ is defined only when p < 19, otherwise we need to fallback to HLL. // The threshold `2.5 * m` is from the original HLL algorithm. if ((p < 19 && h <= THRESHOLDS[p - 4]) || e <= 2.5 * m) { estimate = h; } else { estimate = eBiasCorrected; } } else { estimate = eBiasCorrected; } // Round to the nearest long value. return Math.round(estimate); }
3.68
flink_AbstractInvokable_getTaskConfiguration
/** * Returns the task configuration object which was attached to the original {@link * org.apache.flink.runtime.jobgraph.JobVertex}. * * @return the task configuration object which was attached to the original {@link * org.apache.flink.runtime.jobgraph.JobVertex} */ public final Configuration getTaskConfiguration() { return this.environment.getTaskConfiguration(); }
3.68
flink_SqlLikeUtils_like
/** SQL {@code LIKE} function with escape. */ public static boolean like(String s, String pattern, String escape) { final String regex = sqlToRegexLike(pattern, escape); return Pattern.matches(regex, s); }
3.68
hadoop_FederationBlock_initFederationSubClusterDetailTableJs
/** * Initialize the subCluster details JavaScript of the Federation page. * * This part of the js script will control to display or hide the detailed information * of the subCluster when the user clicks on the subClusterId. * * We will obtain the specific information of a SubCluster, * including the information of Applications, Resources, and Nodes. * * @param html html object * @param subClusterDetailMap subCluster Detail Map */ private void initFederationSubClusterDetailTableJs(Block html, List<Map<String, String>> subClusterDetailMap) { Gson gson = new Gson(); html.script().$type("text/javascript"). __(" var scTableData = " + gson.toJson(subClusterDetailMap) + "; ") .__(); html.script(root_url("static/federation/federation.js")); }
3.68
hbase_Bytes_readByteArrayThrowsRuntime
/** * Read byte-array written with a WritableableUtils.vint prefix. IOException is converted to a * RuntimeException. * @param in Input to read from. * @return byte array read off <code>in</code> */ public static byte[] readByteArrayThrowsRuntime(final DataInput in) { try { return readByteArray(in); } catch (Exception e) { throw new RuntimeException(e); } }
3.68
hbase_TableInfoModel_get
/** * @param index the index * @return the region model */ public TableRegionModel get(int index) { return regions.get(index); }
3.68
flink_NonClosingCheckpointOutputStream_getDelegate
/** This method should not be public so as to not expose internals to user code. */ CheckpointStateOutputStream getDelegate() { return delegate; }
3.68
flink_ProjectOperator_projectTuple13
/** * Projects a {@link Tuple} {@link DataSet} to the previously selected fields. * * @return The projected DataSet. * @see Tuple * @see DataSet */ public <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> ProjectOperator<T, Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>> projectTuple13() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes, ds.getType()); TupleTypeInfo<Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>> tType = new TupleTypeInfo< Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>(fTypes); return new ProjectOperator< T, Tuple13<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12>>( this.ds, this.fieldIndexes, tType); }
3.68
hadoop_RouterDelegationTokenSecretManager_storeNewMasterKey
/** * The Router Supports Store the New Master Key. * During this Process, Facade will call the specific StateStore to store the MasterKey. * * @param newKey DelegationKey */ @Override public void storeNewMasterKey(DelegationKey newKey) { try { federationFacade.storeNewMasterKey(newKey); } catch (Exception e) { if (!shouldIgnoreException(e)) { LOG.error("Error in storing master key with KeyID: {}.", newKey.getKeyId()); ExitUtil.terminate(1, e); } } }
3.68
framework_VGridLayout_distributeColSpanWidths
/** * Iterates colspanned cells, ensures cols have enough space to accommodate * them */ void distributeColSpanWidths() { for (SpanList list : colSpans) { for (Cell cell : list.cells) { // cells with relative content may return non 0 here if on // subsequent renders int width = cell.hasRelativeWidth() ? 0 : cell.getWidth(); distributeSpanSize(columnWidths, cell.col, cell.colspan, getHorizontalSpacing(), width, colExpandRatioArray); } } }
3.68
hadoop_LocalJobOutputFiles_getOutputFileForWrite
/** * Create a local map output file name. * * @param size the size of the file */ public Path getOutputFileForWrite(long size) throws IOException { String path = String.format(OUTPUT_FILE_FORMAT_STRING, TASKTRACKER_OUTPUT); return lDirAlloc.getLocalPathForWrite(path, size, conf); }
3.68
framework_DragAndDropWrapper_disposeStreamVariable
/** * Calling this method has no effect. DD files are receive only once * anyway. */ @Override public void disposeStreamVariable() { }
3.68
framework_Potus_getTookOffice
/** * @return the tookOffice */ public Date getTookOffice() { return tookOffice; }
3.68
framework_AbstractRemoteDataSource_onResponse
/** * Called by the * {@link AbstractRemoteDataSource#requestRows(int, int, RequestRowsCallback)} * implementation when data has been received. * * @param rowData * a list of row objects starting at the requested offset * @param totalSize * the total number of rows available at the remote end */ public void onResponse(List<T> rowData, int totalSize) { if (source.size != totalSize) { source.resetDataAndSize(totalSize); } source.setRowData(requestedRange.getStart(), rowData); }
3.68
hbase_ZKUtil_createAndFailSilent
/** Returns a createAndFailSilent ZKUtilOp */ public static ZKUtilOp createAndFailSilent(String path, byte[] data) { return new CreateAndFailSilent(path, data); }
3.68
hudi_RocksDBDAO_delete
/** * Perform a single Delete operation. * * @param columnFamilyName Column Family name * @param key Key to be deleted */ public <K extends Serializable> void delete(String columnFamilyName, K key) { try { getRocksDB().delete(managedHandlesMap.get(columnFamilyName), SerializationUtils.serialize(key)); } catch (Exception e) { throw new HoodieException(e); } }
3.68
flink_ExistingSavepoint_readUnionState
/** * Read operator {@code UnionState} from a {@code Savepoint} when a custom serializer was used; * e.g., a different serializer than the one returned by {@code * TypeInformation#createSerializer}. * * @param uid The uid of the operator. * @param name The (unique) name for the state. * @param typeInfo The type of the elements in the state. * @param serializer The serializer used to write the elements into state. * @param <T> The type of the values that are in the union state. * @return A {@code DataSet} representing the elements in state. * @throws IOException If the savepoint path is invalid or the uid does not exist. */ public <T> DataSource<T> readUnionState( String uid, String name, TypeInformation<T> typeInfo, TypeSerializer<T> serializer) throws IOException { OperatorState operatorState = metadata.getOperatorState(uid); ListStateDescriptor<T> descriptor = new ListStateDescriptor<>(name, serializer); UnionStateInputFormat<T> inputFormat = new UnionStateInputFormat<>( operatorState, env.getConfiguration(), stateBackend, descriptor); return env.createInput(inputFormat, typeInfo); }
3.68
hbase_TableDescriptorBuilder_hasColumnFamily
/** * Checks to see if this table contains the given column family * @param familyName Family name or column name. * @return true if the table contains the specified family name */ @Override public boolean hasColumnFamily(final byte[] familyName) { return families.containsKey(familyName); }
3.68
hudi_CompactionAdminClient_runRenamingOps
/** * Execute Renaming operation. * * @param metaClient HoodieTable MetaClient * @param renameActions List of rename operations */ private List<RenameOpResult> runRenamingOps(HoodieTableMetaClient metaClient, List<Pair<HoodieLogFile, HoodieLogFile>> renameActions, int parallelism, boolean dryRun) { if (renameActions.isEmpty()) { LOG.info("No renaming of log-files needed. Proceeding to removing file-id from compaction-plan"); return new ArrayList<>(); } else { LOG.info("The following compaction renaming operations needs to be performed to un-schedule"); if (!dryRun) { context.setJobStatus(this.getClass().getSimpleName(), "Execute unschedule operations: " + config.getTableName()); return context.map(renameActions, lfPair -> { try { LOG.info("RENAME " + lfPair.getLeft().getPath() + " => " + lfPair.getRight().getPath()); renameLogFile(metaClient, lfPair.getLeft(), lfPair.getRight()); return new RenameOpResult(lfPair, true, Option.empty()); } catch (IOException e) { LOG.error("Error renaming log file", e); LOG.error("\n\n\n***NOTE Compaction is in inconsistent state. Try running \"compaction repair " + lfPair.getLeft().getDeltaCommitTime() + "\" to recover from failure ***\n\n\n"); return new RenameOpResult(lfPair, false, Option.of(e)); } }, parallelism); } else { LOG.info("Dry-Run Mode activated for rename operations"); return renameActions.parallelStream().map(lfPair -> new RenameOpResult(lfPair, false, false, Option.empty())) .collect(Collectors.toList()); } } }
3.68
hbase_RegionLocations_remove
/** * Removes location of the given replicaId from the list * @param replicaId the replicaId of the location to remove * @return an RegionLocations object with removed locations or the same object if nothing is * removed */ public RegionLocations remove(int replicaId) { if (getRegionLocation(replicaId) == null) { return this; } HRegionLocation[] newLocations = new HRegionLocation[locations.length]; System.arraycopy(locations, 0, newLocations, 0, locations.length); if (replicaId < newLocations.length) { newLocations[replicaId] = null; } return new RegionLocations(newLocations); }
3.68
hadoop_OBSBlockOutputStream_flush
/** * The flush operation does not trigger an upload; that awaits the next block * being full. What it does do is call {@code flush() } on the current block, * leaving it to choose how to react. * * @throws IOException Any IO problem. */ @Override public synchronized void flush() throws IOException { checkOpen(); OBSDataBlocks.DataBlock dataBlock = getActiveBlock(); if (dataBlock != null) { dataBlock.flush(); } }
3.68
framework_SharedUtil_join
/** * Joins the words in the input array together into a single string by * inserting the separator string between each word. * * @since 7.4 * @param parts * The array of words * @param separator * The separator string to use between words * @return The constructed string of words and separators */ public static String join(String[] parts, String separator) { if (parts.length == 0) { return ""; } StringBuilder sb = new StringBuilder(); for (String part : parts) { sb.append(part); sb.append(separator); } return sb.substring(0, sb.length() - separator.length()); }
3.68
dubbo_Configuration_getString
/** * Get a string associated with the given configuration key. * If the key doesn't map to an existing object, the default value * is returned. * * @param key The configuration key. * @param defaultValue The default value. * @return The associated string if key is found and has valid * format, default value otherwise. */ default String getString(String key, String defaultValue) { return convert(String.class, key, defaultValue); }
3.68
hmily_HmilySelectStatement_getWhere
/** * Get where. * * @return where segment */ public Optional<HmilyWhereSegment> getWhere() { return Optional.ofNullable(where); }
3.68
framework_Upload_fireUploadSuccess
/** * Emits the upload success event. * * @param filename * @param MIMEType * @param length * */ protected void fireUploadSuccess(String filename, String MIMEType, long length) { fireEvent(new Upload.SucceededEvent(this, filename, MIMEType, length)); }
3.68
hadoop_Cluster_getRootQueues
/** * Gets the root level queues. * @return array of JobQueueInfo object. * @throws IOException */ public QueueInfo[] getRootQueues() throws IOException, InterruptedException { return client.getRootQueues(); }
3.68
open-banking-gateway_HeadersBodyMapperTemplate_forExecution
/** * Converts context object into object that can be used for ASPSP API call. * @param context Context to convert * @return Object that can be used with {@code Xs2aAdapter} to perform ASPSP API calls */ public ValidatedHeadersBody<H, B> forExecution(C context) { return new ValidatedHeadersBody<>( toHeaders.map(context), toBody.map(toValidatableBody.map(context)) ); }
3.68
querydsl_CollQueryFactory_update
/** * Create a new update clause * * @param path source expression * @param col source collection * @return query */ public static <A> CollUpdateClause<A> update(Path<A> path, Iterable<A> col) { return new CollUpdateClause<A>(path, col); }
3.68
pulsar_DateFormatter_parse
/** * @param datetime * @return the parsed timestamp (in milliseconds) of the provided datetime * @throws DateTimeParseException */ public static long parse(String datetime) throws DateTimeParseException { Instant instant = Instant.from(DATE_FORMAT.parse(datetime)); return instant.toEpochMilli(); }
3.68
hadoop_RMWebAppUtil_createAppSubmissionContextResource
/** * Create the actual Resource inside the ApplicationSubmissionContextInfo to * be submitted to the RM from the information provided by the user. * * @param newApp the information provided by the user * @param conf RM configuration * @return returns the constructed Resource inside the * ApplicationSubmissionContextInfo * @throws BadRequestException */ private static Resource createAppSubmissionContextResource( ApplicationSubmissionContextInfo newApp, Configuration conf) throws BadRequestException { if (newApp.getResource().getvCores() > conf.getInt( YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES)) { String msg = "Requested more cores than configured max"; throw new BadRequestException(msg); } if (newApp.getResource().getMemorySize() > conf.getInt( YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB)) { String msg = "Requested more memory than configured max"; throw new BadRequestException(msg); } Resource r = Resource.newInstance(newApp.getResource().getMemorySize(), newApp.getResource().getvCores()); return r; }
3.68
hadoop_Utils_getMinor
/** * Get the minor version. * * @return The minor version. */ public int getMinor() { return minor; }
3.68
flink_Module_listFunctions
/** * List names of all functions in this module. * * <p>A module can decide to hide certain functions. For example, internal functions that can be * resolved via {@link #getFunctionDefinition(String)} but should not be listed by default. * * @param includeHiddenFunctions whether to list hidden functions or not * @return a set of function names */ default Set<String> listFunctions(boolean includeHiddenFunctions) { return listFunctions(); }
3.68
morf_FieldFromSelectFirst_deepCopyInternal
/** * @see org.alfasoftware.morf.sql.element.AliasedField#deepCopyInternal(DeepCopyTransformation) */ @Override protected AliasedField deepCopyInternal(DeepCopyTransformation transformer) { return new FieldFromSelectFirst(getAlias(), transformer.deepCopy(selectFirstStatement)); }
3.68
hadoop_UnmanagedApplicationManager_allocateAsync
/** * Sends the specified heart beat request to the resource manager and invokes * the callback asynchronously with the response. * * @param request the allocate request * @param callback the callback method for the request * @throws YarnException if registerAM is not called yet */ public void allocateAsync(AllocateRequest request, AsyncCallback<AllocateResponse> callback) throws YarnException { this.heartbeatHandler.allocateAsync(request, callback); // Two possible cases why the UAM is not successfully registered yet: // 1. launchUAM is not called at all. Should throw here. // 2. launchUAM is called but hasn't successfully returned. // // In case 2, we have already save the allocate request above, so if the // registration succeed later, no request is lost. if (this.userUgi == null) { if (this.connectionInitiated) { LOG.info("Unmanaged AM still not successfully launched/registered yet." + " Saving the allocate request and send later."); } else { throw new YarnException("AllocateAsync should not be called before launchUAM"); } } }
3.68
framework_VaadinSession_addBootstrapListener
/** * Adds a listener that will be invoked when the bootstrap HTML is about to * be generated. This can be used to modify the contents of the HTML that * loads the Vaadin application in the browser and the HTTP headers that are * included in the response serving the HTML. * * @see BootstrapListener#modifyBootstrapFragment(BootstrapFragmentResponse) * @see BootstrapListener#modifyBootstrapPage(BootstrapPageResponse) * * @param listener * the bootstrap listener to add * @return a registration object for removing the listener * @since 8.0 */ public Registration addBootstrapListener(BootstrapListener listener) { assert hasLock(); eventRouter.addListener(BootstrapFragmentResponse.class, listener, BOOTSTRAP_FRAGMENT_METHOD); eventRouter.addListener(BootstrapPageResponse.class, listener, BOOTSTRAP_PAGE_METHOD); return () -> { eventRouter.removeListener(BootstrapFragmentResponse.class, listener, BOOTSTRAP_FRAGMENT_METHOD); eventRouter.removeListener(BootstrapPageResponse.class, listener, BOOTSTRAP_PAGE_METHOD); }; }
3.68
flink_DefaultFailureEnricherContext_forTaskFailure
/** Factory method returning a Task failure Context for the given params. */ public static Context forTaskFailure( JobID jobID, String jobName, MetricGroup metricGroup, Executor ioExecutor, ClassLoader classLoader) { return new DefaultFailureEnricherContext( jobID, jobName, metricGroup, FailureType.TASK, ioExecutor, classLoader); }
3.68
framework_Color_getGreen
/** * Returns the green value of the color. * */ public int getGreen() { return green; }
3.68
flink_WindowedStateTransformation_apply
/** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * <p>Note that this function requires that all data in the windows is buffered until the window * is evaluated, as the function provides no means of incremental aggregation. * * @param function The window function. * @param resultType Type information for the result type of the window function * @return The data stream that is the result of applying the window function to the window. */ public <R> StateBootstrapTransformation<T> apply( WindowFunction<T, R, K, W> function, TypeInformation<R> resultType) { function = input.getExecutionEnvironment().clean(function); WindowOperator<K, T, ?, R, W> operator = builder.apply(function); SavepointWriterOperatorFactory factory = (timestamp, path) -> new StateBootstrapWrapperOperator<>(timestamp, path, operator); return new StateBootstrapTransformation<>( input, operatorMaxParallelism, factory, keySelector, keyType); }
3.68
hadoop_DataNodeVolumeMetrics_getMetadataOperationSampleCount
// Based on metadataOperationRate public long getMetadataOperationSampleCount() { return metadataOperationRate.lastStat().numSamples(); }
3.68
flink_ClusterEntrypoint_cleanupDirectories
/** * Clean up of temporary directories created by the {@link ClusterEntrypoint}. * * @param shutdownBehaviour specifying the shutdown behaviour * @throws IOException if the temporary directories could not be cleaned up */ protected void cleanupDirectories(ShutdownBehaviour shutdownBehaviour) throws IOException { IOException ioException = null; final String webTmpDir = configuration.getString(WebOptions.TMP_DIR); try { FileUtils.deleteDirectory(new File(webTmpDir)); } catch (IOException ioe) { ioException = ioe; } synchronized (lock) { if (workingDirectory != null) { // We only clean up the working directory if we gracefully shut down or if its path // is nondeterministic. If it is a process failure, then we want to keep the working // directory for potential recoveries. if (!workingDirectory.isDeterministic() || shutdownBehaviour == ShutdownBehaviour.GRACEFUL_SHUTDOWN) { try { workingDirectory.unwrap().delete(); } catch (IOException ioe) { ioException = ExceptionUtils.firstOrSuppressed(ioe, ioException); } } } } if (ioException != null) { throw ioException; } }
3.68
hbase_ZKLeaderManager_waitToBecomeLeader
/** * Blocks until this instance has claimed the leader ZNode in ZooKeeper */ public void waitToBecomeLeader() { while (!candidate.isStopped()) { try { if (ZKUtil.createEphemeralNodeAndWatch(watcher, leaderZNode, nodeId)) { // claimed the leader znode leaderExists.set(true); if (LOG.isDebugEnabled()) { LOG.debug("Claimed the leader znode as '" + Bytes.toStringBinary(nodeId) + "'"); } return; } // if claiming the node failed, there should be another existing node byte[] currentId = ZKUtil.getDataAndWatch(watcher, leaderZNode); if (currentId != null && Bytes.equals(currentId, nodeId)) { // claimed with our ID, but we didn't grab it, possibly restarted? LOG.info( "Found existing leader with our ID (" + Bytes.toStringBinary(nodeId) + "), removing"); ZKUtil.deleteNode(watcher, leaderZNode); leaderExists.set(false); } else { LOG.info("Found existing leader with ID: {}", Bytes.toStringBinary(currentId)); leaderExists.set(true); } } catch (KeeperException ke) { watcher.abort("Unexpected error from ZK, stopping candidate", ke); candidate.stop("Unexpected error from ZK: " + ke.getMessage()); return; } // wait for next chance synchronized (lock) { while (leaderExists.get() && !candidate.isStopped()) { try { lock.wait(); } catch (InterruptedException ie) { LOG.debug("Interrupted waiting on leader", ie); } } } } }
3.68
framework_TabSheet_setTabsVisible
/** * Sets whether the tab selection part should be shown in the UI. * * @since 7.5 * @param tabsVisible * true if the tabs should be shown in the UI, false otherwise */ public void setTabsVisible(boolean tabsVisible) { getState().tabsVisible = tabsVisible; }
3.68