name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_BaseResource_getUri
/** * Resource location for a service, e.g. * /app/v1/services/helloworld * **/ public String getUri() { return uri; }
3.68
framework_OracleGenerator_generateSelectQuery
/* * (non-Javadoc) * * @see com.vaadin.addon.sqlcontainer.query.generator.DefaultSQLGenerator# * generateSelectQuery(java.lang.String, java.util.List, * com.vaadin.addon.sqlcontainer.query.FilteringMode, java.util.List, int, * int, java.lang.String) */ @Override public StatementHelper generateSelectQuery(String tableName, List<Filter> filters, List<OrderBy> orderBys, int offset, int pagelength, String toSelect) { if (tableName == null || tableName.trim().equals("")) { throw new IllegalArgumentException("Table name must be given."); } /* Adjust offset and page length parameters to match "row numbers" */ offset = pagelength > 1 ? ++offset : offset; pagelength = pagelength > 1 ? --pagelength : pagelength; toSelect = toSelect == null ? "*" : toSelect; StatementHelper sh = getStatementHelper(); StringBuffer query = new StringBuffer(); /* Row count request is handled here */ if ("COUNT(*)".equalsIgnoreCase(toSelect)) { query.append(String.format( "SELECT COUNT(*) AS %s FROM (SELECT * FROM %s", QueryBuilder.quote("rowcount"), tableName)); if (filters != null && !filters.isEmpty()) { query.append( QueryBuilder.getWhereStringForFilters(filters, sh)); } query.append(')'); sh.setQueryString(query.toString()); return sh; } /* SELECT without row number constraints */ if (offset == 0 && pagelength == 0) { query.append("SELECT ").append(toSelect).append(" FROM ") .append(tableName); if (filters != null) { query.append( QueryBuilder.getWhereStringForFilters(filters, sh)); } if (orderBys != null) { for (OrderBy o : orderBys) { generateOrderBy(query, o, orderBys.indexOf(o) == 0); } } sh.setQueryString(query.toString()); return sh; } /* Remaining SELECT cases are handled here */ query.append(String.format( "SELECT * FROM (SELECT x.*, ROWNUM AS %s FROM (SELECT %s FROM %s", QueryBuilder.quote("rownum"), toSelect, tableName)); if (filters != null) { query.append(QueryBuilder.getWhereStringForFilters(filters, sh)); } if (orderBys != null) { for (OrderBy o : orderBys) { generateOrderBy(query, o, orderBys.indexOf(o) == 0); } } query.append(String.format(") x) WHERE %s BETWEEN %d AND %d", QueryBuilder.quote("rownum"), offset, offset + pagelength)); sh.setQueryString(query.toString()); return sh; }
3.68
flink_DataSet_map
/** * Applies a Map transformation on this DataSet. * * <p>The transformation calls a {@link org.apache.flink.api.common.functions.MapFunction} for * each element of the DataSet. Each MapFunction call returns exactly one element. * * @param mapper The MapFunction that is called for each element of the DataSet. * @return A MapOperator that represents the transformed DataSet. * @see org.apache.flink.api.common.functions.MapFunction * @see org.apache.flink.api.common.functions.RichMapFunction * @see MapOperator */ public <R> MapOperator<T, R> map(MapFunction<T, R> mapper) { if (mapper == null) { throw new NullPointerException("Map function must not be null."); } String callLocation = Utils.getCallLocationName(); TypeInformation<R> resultType = TypeExtractor.getMapReturnTypes(mapper, getType(), callLocation, true); return new MapOperator<>(this, resultType, clean(mapper), callLocation); }
3.68
morf_SqlServerDialect_decorateTemporaryTableName
/** * {@inheritDoc} * * @see org.alfasoftware.morf.jdbc.SqlDialect#decorateTemporaryTableName(java.lang.String) */ @Override public String decorateTemporaryTableName(String undecoratedName) { return "#" + undecoratedName; }
3.68
querydsl_CollectionUtils_unmodifiableList
/** * Return an unmodifiable copy of a list, or the same list if its already an unmodifiable type. * * @param list the list * @param <T> element type * @return unmodifiable copy of a list, or the same list if its already an unmodifiable type */ public static <T> List<T> unmodifiableList(List<T> list) { if (isUnmodifiableType(list.getClass())) { return list; } switch (list.size()) { case 0: return Collections.emptyList(); case 1: return Collections.singletonList(list.get(0)); default: return Collections.unmodifiableList(new ArrayList<>(list)); } }
3.68
hbase_AbstractProcedureScheduler_wakeEvents
/** * Wake up all of the given events. Note that we first take scheduler lock and then wakeInternal() * synchronizes on the event. Access should remain package-private. Use ProcedureEvent class to * wake/suspend events. * @param events the list of events to wake */ public void wakeEvents(ProcedureEvent[] events) { schedLock(); try { for (ProcedureEvent event : events) { if (event == null) { continue; } event.wakeInternal(this); } } finally { schedUnlock(); } }
3.68
hbase_MetricsRegionServer_incrementNumRegionSizeReportsSent
/** * @see MetricsRegionServerQuotaSource#incrementNumRegionSizeReportsSent(long) */ public void incrementNumRegionSizeReportsSent(long numReportsSent) { quotaSource.incrementNumRegionSizeReportsSent(numReportsSent); }
3.68
hadoop_ResourceUsage_getAMLimit
/* * AM-Resource Limit */ public Resource getAMLimit() { return getAMLimit(NL); }
3.68
hadoop_DataNodeVolumeMetrics_getSyncIoSampleCount
// Based on syncIoRate public long getSyncIoSampleCount() { return syncIoRate.lastStat().numSamples(); }
3.68
hadoop_CommonAuditContext_put
/** * Put a context entry dynamically evaluated on demand. * Important: as these supplier methods are long-lived, * the supplier function <i>MUST NOT</i> be part of/refer to * any object instance of significant memory size. * Applications SHOULD remove references when they are * no longer needed. * When logged at TRACE, prints the key and stack trace of the caller, * to allow for debugging of any problems. * @param key key * @param value new value * @return old value or null */ public Supplier<String> put(String key, Supplier<String> value) { if (LOG.isTraceEnabled()) { LOG.trace("Adding context entry {}", key, new Exception(key)); } return evaluatedEntries.put(key, value); }
3.68
zxing_Decoder_decode
/** * <p>Decodes a QR Code represented as a {@link BitMatrix}. A 1 or "true" is taken to mean a black module.</p> * * @param bits booleans representing white/black QR Code modules * @param hints decoding hints that should be used to influence decoding * @return text and bytes encoded within the QR Code * @throws FormatException if the QR Code cannot be decoded * @throws ChecksumException if error correction fails */ public DecoderResult decode(BitMatrix bits, Map<DecodeHintType,?> hints) throws FormatException, ChecksumException { // Construct a parser and read version, error-correction level BitMatrixParser parser = new BitMatrixParser(bits); FormatException fe = null; ChecksumException ce = null; try { return decode(parser, hints); } catch (FormatException e) { fe = e; } catch (ChecksumException e) { ce = e; } try { // Revert the bit matrix parser.remask(); // Will be attempting a mirrored reading of the version and format info. parser.setMirror(true); // Preemptively read the version. parser.readVersion(); // Preemptively read the format information. parser.readFormatInformation(); /* * Since we're here, this means we have successfully detected some kind * of version and format information when mirrored. This is a good sign, * that the QR code may be mirrored, and we should try once more with a * mirrored content. */ // Prepare for a mirrored reading. parser.mirror(); DecoderResult result = decode(parser, hints); // Success! Notify the caller that the code was mirrored. result.setOther(new QRCodeDecoderMetaData(true)); return result; } catch (FormatException | ChecksumException e) { // Throw the exception from the original reading if (fe != null) { throw fe; } throw ce; // If fe is null, this can't be } }
3.68
AreaShop_GeneralRegion_updateLastActiveTime
/** * Set the last active time of the player to the current time. */ public void updateLastActiveTime() { if(getOwner() != null) { setSetting("general.lastActive", Calendar.getInstance().getTimeInMillis()); } }
3.68
framework_BootstrapHandler_getWidgetsetName
/** * @return returns the name of the widgetset to use * @deprecated use {@link #getWidgetsetInfo()} instead */ @Deprecated public String getWidgetsetName() { return getWidgetsetInfo().getWidgetsetName(); }
3.68
flink_EmbeddedRocksDBStateBackend_setNumberOfTransferThreads
/** * Sets the number of threads used to transfer files while snapshotting/restoring. * * @param numberOfTransferThreads The number of threads used to transfer files while * snapshotting/restoring. */ public void setNumberOfTransferThreads(int numberOfTransferThreads) { Preconditions.checkArgument( numberOfTransferThreads > 0, "The number of threads used to transfer files in EmbeddedRocksDBStateBackend should be greater than zero."); this.numberOfTransferThreads = numberOfTransferThreads; }
3.68
flink_CatalogManager_listViews
/** * Returns an array of names of all views(both temporary and permanent) registered in the * namespace of the given catalog and database. * * @return names of registered views */ public Set<String> listViews(String catalogName, String databaseName) { Catalog catalog = getCatalogOrThrowException(catalogName); if (catalog == null) { throw new ValidationException(String.format("Catalog %s does not exist", catalogName)); } try { return Stream.concat( catalog.listViews(databaseName).stream(), listTemporaryViewsInternal(catalogName, databaseName) .map(e -> e.getKey().getObjectName())) .collect(Collectors.toSet()); } catch (DatabaseNotExistException e) { throw new ValidationException( String.format("Database %s does not exist", databaseName), e); } }
3.68
flink_CheckpointsCleaner_cleanSubsumedCheckpoints
/** * Clean checkpoint that is not in the given {@param stillInUse}. * * @param upTo lowest CheckpointID which is still valid. * @param stillInUse the state of those checkpoints are still referenced. * @param postCleanAction post action after cleaning. * @param executor is used to perform the cleanup logic. */ public void cleanSubsumedCheckpoints( long upTo, Set<Long> stillInUse, Runnable postCleanAction, Executor executor) { synchronized (lock) { Iterator<CompletedCheckpoint> iterator = subsumedCheckpoints.iterator(); while (iterator.hasNext()) { CompletedCheckpoint checkpoint = iterator.next(); if (checkpoint.getCheckpointID() < upTo && !stillInUse.contains(checkpoint.getCheckpointID())) { try { LOG.debug("Try to discard checkpoint {}.", checkpoint.getCheckpointID()); cleanCheckpoint( checkpoint, checkpoint.shouldBeDiscardedOnSubsume(), postCleanAction, executor); iterator.remove(); } catch (Exception e) { LOG.warn("Fail to discard the old checkpoint {}.", checkpoint); } } } } }
3.68
hbase_RegionCoprocessorHost_postScannerOpen
/** * @param scan the Scan specification * @param s the scanner * @return the scanner instance to use * @exception IOException Exception */ public RegionScanner postScannerOpen(final Scan scan, RegionScanner s) throws IOException { if (this.coprocEnvironments.isEmpty()) { return s; } return execOperationWithResult( new ObserverOperationWithResult<RegionObserver, RegionScanner>(regionObserverGetter, s) { @Override public RegionScanner call(RegionObserver observer) throws IOException { return observer.postScannerOpen(this, scan, getResult()); } }); }
3.68
framework_Panel_getTabIndex
/** * {@inheritDoc} */ @Override public int getTabIndex() { return getState(false).tabIndex; }
3.68
hadoop_StorageUnit_divide
/** * Using BigDecimal to avoid issues with overflow and underflow. * * @param value - value * @param divisor - divisor. * @return -- returns a double that represents this value */ private static double divide(double value, double divisor) { BigDecimal val = new BigDecimal(value); BigDecimal bDivisor = new BigDecimal(divisor); return val.divide(bDivisor).setScale(PRECISION, RoundingMode.HALF_UP) .doubleValue(); }
3.68
hibernate-validator_ConstraintViolationAssert_pathsAreEqual
/** * Checks that two property paths are equal. * * @param p1 The first property path. * @param p2 The second property path. * * @return {@code true} if the given paths are equal, {@code false} otherwise. */ public static boolean pathsAreEqual(Path p1, Path p2) { Iterator<Path.Node> p1Iterator = p1.iterator(); Iterator<Path.Node> p2Iterator = p2.iterator(); while ( p1Iterator.hasNext() ) { Path.Node p1Node = p1Iterator.next(); if ( !p2Iterator.hasNext() ) { return false; } Path.Node p2Node = p2Iterator.next(); // check that the nodes are of the same type if ( p1Node.getKind() != p2Node.getKind() ) { return false; } // do the comparison on the node values if ( p2Node.getName() == null ) { if ( p1Node.getName() != null ) { return false; } } else if ( !p2Node.getName().equals( p1Node.getName() ) ) { return false; } if ( p2Node.isInIterable() != p1Node.isInIterable() ) { return false; } if ( p2Node.getIndex() == null ) { if ( p1Node.getIndex() != null ) { return false; } } else if ( !p2Node.getIndex().equals( p1Node.getIndex() ) ) { return false; } if ( p2Node.getKey() == null ) { if ( p1Node.getKey() != null ) { return false; } } else if ( !p2Node.getKey().equals( p1Node.getKey() ) ) { return false; } Class<?> p1NodeContainerClass = getContainerClass( p1Node ); Class<?> p2NodeContainerClass = getContainerClass( p2Node ); if ( p2NodeContainerClass == null ) { if ( p1NodeContainerClass != null ) { return false; } } else if ( !p2NodeContainerClass.equals( p1NodeContainerClass ) ) { return false; } Integer p1NodeTypeArgumentIndex = getTypeArgumentIndex( p1Node ); Integer p2NodeTypeArgumentIndex = getTypeArgumentIndex( p2Node ); if ( p2NodeTypeArgumentIndex == null ) { if ( p1NodeTypeArgumentIndex != null ) { return false; } } else if ( !p2NodeTypeArgumentIndex.equals( p1NodeTypeArgumentIndex ) ) { return false; } if ( p1Node.getKind() == ElementKind.PARAMETER ) { int p1NodeParameterIndex = p1Node.as( Path.ParameterNode.class ).getParameterIndex(); int p2NodeParameterIndex = p2Node.as( Path.ParameterNode.class ).getParameterIndex(); if ( p1NodeParameterIndex != p2NodeParameterIndex ) { return false; } } } return !p2Iterator.hasNext(); }
3.68
morf_RecordComparator_compare
/** * @see java.util.Comparator#compare(java.lang.Object, java.lang.Object) */ @SuppressWarnings({ "rawtypes", "unchecked" }) @Override public int compare(Record o1, Record o2) { for (Column column : columnSortOrder) { Comparable value1 = RecordHelper.convertToComparableType(column, o1); Comparable value2 = RecordHelper.convertToComparableType(column, o2); if (value1 == null && value2 == null) { continue; // next column } // nulls first if (value1 == null ) return -1; if (value2 == null ) return 1; if (!value1.getClass().equals(value2.getClass())) { throw new IllegalStateException("Types do not match: ["+value1 +"] ["+value2+"]"); } int order = value1.compareTo(value2); if (order != 0) { return order; } } // if we get all the way out, the rows are identical for this comparison return 0; }
3.68
flink_RestClusterClientConfiguration_getRetryDelay
/** @see RestOptions#RETRY_DELAY */ public long getRetryDelay() { return retryDelay; }
3.68
hbase_AsyncRegionLocationCache_findForBeforeRow
/** * Finds the RegionLocations for the region with the greatest startKey strictly less than the * given row * @param row row to find locations */ public RegionLocations findForBeforeRow(byte[] row, int replicaId) { boolean isEmptyStopRow = isEmptyStopRow(row); Map.Entry<byte[], RegionLocations> entry = isEmptyStopRow ? cache.lastEntry() : cache.lowerEntry(row); if (entry == null) { return null; } RegionLocations locs = entry.getValue(); if (locs == null) { return null; } HRegionLocation loc = locs.getRegionLocation(replicaId); if (loc == null) { return null; } if ( isEmptyStopRow(loc.getRegion().getEndKey()) || (!isEmptyStopRow && Bytes.compareTo(loc.getRegion().getEndKey(), row) >= 0) ) { if (LOG.isTraceEnabled()) { LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName, Bytes.toStringBinary(row), RegionLocateType.BEFORE, replicaId); } return locs; } else { return null; } }
3.68
morf_Version2to4TransformingReader_characterReferenceToTransform
/** * Tests whether a given index in the buffer is a full null character reference. * Reads forward if required, but resets the position. */ private ReferenceInfo characterReferenceToTransform(char[] cbuf, int ampersandIndex, int remaining) throws IOException { char[] bufferToTest; int indexToTest; // the maximum we could need is enough to read &#65535; or &#xffff; so 8 chars, including the ampersand. final int maxRefSize = 8; int additionalCharsRequired = maxRefSize-remaining; if (additionalCharsRequired > 0) { bufferToTest = new char[maxRefSize]; // we need to read ahead because we don't have enough chars // first copy the remaining chars in System.arraycopy(cbuf, ampersandIndex, bufferToTest, 0, remaining); // look in the temporary buffer first int writeIdx = remaining; System.arraycopy(temporary, 0, bufferToTest, writeIdx, Math.min(maxRefSize-writeIdx, temporary.length)); writeIdx += Math.min(maxRefSize-writeIdx, temporary.length); // copy in the remainder, resetting the reader after we've read it delegateReader.mark(maxRefSize); while (writeIdx < maxRefSize) { int additionalCharsRead = delegateReader.read(bufferToTest, writeIdx, maxRefSize-writeIdx); if (additionalCharsRead < 0) { // end of stream break; } writeIdx += additionalCharsRead; } indexToTest = 0; // always reset. We'll gobble the extra chars below if we need to delegateReader.reset(); } else { // The common path - we have enough buffer to work with bufferToTest = cbuf; indexToTest = ampersandIndex; } // we know the first char is & // shortcut out if the second char is not # if (bufferToTest[indexToTest+1] != '#') { return null; } // put the rest in a string int semiColonPos = -1; for (int i = 0; i < bufferToTest.length-indexToTest; i++) { if (bufferToTest[indexToTest+i] == ';') { semiColonPos = i; break; } } // no semicolon - exit if (semiColonPos == -1) { return null; } int reference; // Hex references look like: &#x1a3; decimal ones look like &#456; if (bufferToTest[indexToTest+2] == 'x') { // it's a hex reference reference = parseReference(bufferToTest, indexToTest+3, semiColonPos-3, 16); } else { // it's a decimal reference reference = parseReference(bufferToTest, indexToTest+2, semiColonPos-2, 10); } if (reference == -1) { return null; } // we don't need to transform valid references if (Escaping.isCharValidForXml(reference)) { return null; } int referenceLength = semiColonPos+1; // If we get here, it matches and we have a reference to escape. // We now need to ensure the main parser doesn't read the rest of the character reference. // Gobble any extra chars we needed... int toGobble = referenceLength-remaining; // ...first from any temporary buffer int temporaryLength = temporary.length; if (temporaryLength > 0 && toGobble > 0) { if (toGobble >= temporaryLength) { // gobble all of it temporary = new char[0]; } else { // gobble some of it char[] newTemporary = new char[temporaryLength - toGobble]; System.arraycopy(temporary, toGobble, newTemporary, 0, temporaryLength - toGobble); temporary = newTemporary; } toGobble -= temporaryLength; } // ...then whatever's left from the main stream while (toGobble > 0) { int charsRead = delegateReader.read(new char[toGobble], 0, toGobble); if (charsRead == -1) throw new IllegalStateException("Unexpected EOF"); toGobble -= charsRead; } return new ReferenceInfo(reference, referenceLength); }
3.68
hadoop_ContainerContext_getResource
/** * Get {@link Resource} the resource capability allocated to the container * being initialized or stopped. * * @return the resource capability. */ public Resource getResource() { return resource; }
3.68
hbase_RowPrefixFixedLengthBloomContext_getRowPrefixCell
/** * @param cell the cell * @return the new cell created by row prefix */ private Cell getRowPrefixCell(Cell cell) { byte[] row = CellUtil.copyRow(cell); return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY) .setRow(row, 0, Math.min(prefixLength, row.length)).setType(Cell.Type.Put).build(); }
3.68
hbase_MasterProcedureScheduler_waitMetaExclusiveLock
// ============================================================================ // Meta Locking Helpers // ============================================================================ /** * Try to acquire the exclusive lock on meta. * @see #wakeMetaExclusiveLock(Procedure) * @param procedure the procedure trying to acquire the lock * @return true if the procedure has to wait for meta to be available * @deprecated only used for {@link RecoverMetaProcedure}. Should be removed along with * {@link RecoverMetaProcedure}. */ @Deprecated public boolean waitMetaExclusiveLock(Procedure<?> procedure) { schedLock(); try { final LockAndQueue lock = locking.getMetaLock(); if (lock.tryExclusiveLock(procedure)) { removeFromRunQueue(metaRunQueue, getMetaQueue(), () -> procedure + " held exclusive lock"); return false; } waitProcedure(lock, procedure); logLockedResource(LockedResourceType.META, TableName.META_TABLE_NAME.getNameAsString()); return true; } finally { schedUnlock(); } }
3.68
AreaShop_RentRegion_getPrice
/** * Get the price of the region. * @return The price of the region */ public double getPrice() { return Math.max(0, Utils.evaluateToDouble(getStringSetting("rent.price"), this)); }
3.68
pulsar_ProducerConfiguration_getSendTimeoutMs
/** * @return the message send timeout in ms */ public long getSendTimeoutMs() { return conf.getSendTimeoutMs(); }
3.68
flink_BuiltInFunctionDefinition_runtimeDeferred
/** * Specifies that this {@link BuiltInFunctionDefinition} will be mapped to a Calcite * function. */ public Builder runtimeDeferred() { // This method is just a marker method for clarity. It is equivalent to calling // neither {@link #runtimeProvided} nor {@link #runtimeClass}. return this; }
3.68
framework_BasicEvent_getStart
/* * (non-Javadoc) * * @see com.vaadin.addon.calendar.event.CalendarEvent#getStart() */ @Override public Date getStart() { return start; }
3.68
rocketmq-connect_StringConverter_toConnectData
/** * Convert a native object to a Rocketmq Connect data object. */ @Override public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(SchemaBuilder.string().build(), deserializer.deserialize(topic, value)); } catch (Exception e) { throw new ConnectException("Failed to deserialize string: ", e); } }
3.68
morf_UnionSetOperator_getSelectStatement
/** * {@inheritDoc} * * @see org.alfasoftware.morf.sql.SetOperator#getSelectStatement() */ @Override public SelectStatement getSelectStatement() { return selectStatement; }
3.68
framework_StreamResource_setCacheTime
/** * Sets the length of cache expiration time. * * <p> * This gives the adapter the possibility cache streams sent to the client. * The caching may be made in adapter or at the client if the client * supports caching. Zero or negative value disables the caching of this * stream. * </p> * * @param cacheTime * the cache time in milliseconds. * */ public void setCacheTime(long cacheTime) { this.cacheTime = cacheTime; }
3.68
hbase_BlockCacheKey_getHfileName
// can't avoid this unfortunately /** Returns The hfileName portion of this cache key */ public String getHfileName() { return hfileName; }
3.68
hadoop_AbfsOutputStreamStatisticsImpl_bytesToUpload
/** * Records the need to upload bytes and increments the total bytes that * needs to be uploaded. * * @param bytes total bytes to upload. Negative bytes are ignored. */ @Override public void bytesToUpload(long bytes) { bytesUpload.addAndGet(bytes); }
3.68
flink_PushFilterIntoSourceScanRuleBase_canPushdownFilter
/** * Determines wether we can pushdown the filter into the source. we can not push filter twice, * make sure FilterPushDownSpec has not been assigned as a capability. * * @param tableSourceTable Table scan to attempt to push into * @return Whether we can push or not */ protected boolean canPushdownFilter(TableSourceTable tableSourceTable) { return tableSourceTable != null && tableSourceTable.tableSource() instanceof SupportsFilterPushDown && Arrays.stream(tableSourceTable.abilitySpecs()) .noneMatch(spec -> spec instanceof FilterPushDownSpec); }
3.68
dubbo_ProtobufTypeBuilder_generateMapFieldName
/** * get map property name from setting method.<br/> * ex: putAllXXX();<br/> * * @param methodName * @return */ private String generateMapFieldName(String methodName) { return toCamelCase(methodName.substring(6)); }
3.68
hudi_SparkRDDReadClient_filterExists
/** * Filter out HoodieRecords that already exists in the output folder. This is useful in deduplication. * * @param hoodieRecords Input RDD of Hoodie records. * @return A subset of hoodieRecords RDD, with existing records filtered out. */ public JavaRDD<HoodieRecord<T>> filterExists(JavaRDD<HoodieRecord<T>> hoodieRecords) { JavaRDD<HoodieRecord<T>> recordsWithLocation = tagLocation(hoodieRecords); return recordsWithLocation.filter(v1 -> !v1.isCurrentLocationKnown()); }
3.68
hadoop_AbfsRestOperation_executeHttpOperation
/** * Executes a single HTTP operation to complete the REST operation. If it * fails, there may be a retry. The retryCount is incremented with each * attempt. */ private boolean executeHttpOperation(final int retryCount, TracingContext tracingContext) throws AzureBlobFileSystemException { AbfsHttpOperation httpOperation; try { // initialize the HTTP request and open the connection httpOperation = createHttpOperation(); incrementCounter(AbfsStatistic.CONNECTIONS_MADE, 1); tracingContext.constructHeader(httpOperation, failureReason); signRequest(httpOperation, hasRequestBody ? bufferLength : 0); } catch (IOException e) { LOG.debug("Auth failure: {}, {}", method, url); throw new AbfsRestOperationException(-1, null, "Auth failure: " + e.getMessage(), e); } try { // dump the headers AbfsIoUtils.dumpHeadersToDebugLog("Request Headers", httpOperation.getConnection().getRequestProperties()); intercept.sendingRequest(operationType, abfsCounters); if (hasRequestBody) { // HttpUrlConnection requires httpOperation.sendRequest(buffer, bufferOffset, bufferLength); incrementCounter(AbfsStatistic.SEND_REQUESTS, 1); incrementCounter(AbfsStatistic.BYTES_SENT, bufferLength); } httpOperation.processResponse(buffer, bufferOffset, bufferLength); incrementCounter(AbfsStatistic.GET_RESPONSES, 1); //Only increment bytesReceived counter when the status code is 2XX. if (httpOperation.getStatusCode() >= HttpURLConnection.HTTP_OK && httpOperation.getStatusCode() <= HttpURLConnection.HTTP_PARTIAL) { incrementCounter(AbfsStatistic.BYTES_RECEIVED, httpOperation.getBytesReceived()); } else if (httpOperation.getStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE) { incrementCounter(AbfsStatistic.SERVER_UNAVAILABLE, 1); } } catch (UnknownHostException ex) { String hostname = null; hostname = httpOperation.getHost(); failureReason = RetryReason.getAbbreviation(ex, null, null); LOG.warn("Unknown host name: {}. Retrying to resolve the host name...", hostname); if (!client.getRetryPolicy().shouldRetry(retryCount, -1)) { throw new InvalidAbfsRestOperationException(ex, retryCount); } return false; } catch (IOException ex) { if (LOG.isDebugEnabled()) { LOG.debug("HttpRequestFailure: {}, {}", httpOperation, ex); } failureReason = RetryReason.getAbbreviation(ex, -1, ""); if (!client.getRetryPolicy().shouldRetry(retryCount, -1)) { throw new InvalidAbfsRestOperationException(ex, retryCount); } return false; } finally { int status = httpOperation.getStatusCode(); /* A status less than 300 (2xx range) or greater than or equal to 500 (5xx range) should contribute to throttling metrics being updated. Less than 200 or greater than or equal to 500 show failed operations. 2xx range contributes to successful operations. 3xx range is for redirects and 4xx range is for user errors. These should not be a part of throttling backoff computation. */ boolean updateMetricsResponseCode = (status < HttpURLConnection.HTTP_MULT_CHOICE || status >= HttpURLConnection.HTTP_INTERNAL_ERROR); if (updateMetricsResponseCode) { intercept.updateMetrics(operationType, httpOperation); } } LOG.debug("HttpRequest: {}: {}", operationType, httpOperation); if (client.getRetryPolicy().shouldRetry(retryCount, httpOperation.getStatusCode())) { int status = httpOperation.getStatusCode(); failureReason = RetryReason.getAbbreviation(null, status, httpOperation.getStorageErrorMessage()); return false; } result = httpOperation; return true; }
3.68
flink_MetricConfig_getInteger
/** * Searches for the property with the specified key in this property list. If the key is not * found in this property list, the default property list, and its defaults, recursively, are * then checked. The method returns the default value argument if the property is not found. * * @param key the hashtable key. * @param defaultValue a default value. * @return the value in this property list with the specified key value parsed as an int. */ public int getInteger(String key, int defaultValue) { String argument = getProperty(key, null); return argument == null ? defaultValue : Integer.parseInt(argument); }
3.68
pulsar_ProducerConfiguration_setBatchingMaxMessages
/** * Set the maximum number of messages permitted in a batch. <i>default: 1000</i> If set to a value greater than 1, * messages will be queued until this threshold is reached or batch interval has elapsed * * @see ProducerConfiguration#setBatchingMaxPublishDelay(long, TimeUnit) All messages in batch will be published as * a single batch message. The consumer will be delivered individual messages in the batch in the same order * they were enqueued * @param batchMessagesMaxMessagesPerBatch * maximum number of messages in a batch * @return */ public ProducerConfiguration setBatchingMaxMessages(int batchMessagesMaxMessagesPerBatch) { conf.setBatchingMaxMessages(batchMessagesMaxMessagesPerBatch); return this; }
3.68
flink_DeltaIterationBase_getNextWorkset
/** * Gets the contract that has been set as the next workset. * * @return The contract that has been set as the next workset. */ public Operator<WT> getNextWorkset() { return this.nextWorkset; }
3.68
hbase_Table_delete
/** * Batch Deletes the specified cells/rows from the table. * <p> * If a specified row does not exist, {@link Delete} will report as though sucessful delete; no * exception will be thrown. If there are any failures even after retries, a * {@link RetriesExhaustedWithDetailsException} will be thrown. * RetriesExhaustedWithDetailsException contains lists of failed {@link Delete}s and corresponding * remote exceptions. * @param deletes List of things to delete. The input list gets modified by this method. All * successfully applied {@link Delete}s in the list are removed (in particular it * gets re-ordered, so the order in which the elements are inserted in the list * gives no guarantee as to the order in which the {@link Delete}s are executed). * @throws IOException if a remote or network exception occurs. In that case the {@code deletes} * argument will contain the {@link Delete} instances that have not be * successfully applied. * @since 0.20.1 * @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also, * {@link #put(List)} runs pre-flight validations on the input list on client. Currently * {@link #delete(List)} doesn't run validations on the client, there is no need * currently, but this may change in the future. An {@link IllegalArgumentException} will * be thrown in this case. */ default void delete(List<Delete> deletes) throws IOException { throw new NotImplementedException("Add an implementation!"); }
3.68
hbase_HFileBlock_createBuilder
/** * Creates a new HFileBlockBuilder from the existing block and a new ByteBuff. The builder will be * loaded with all of the original fields from blk, except now using the newBuff and setting * isSharedMem based on the source of the passed in newBuff. An existing HFileBlock may have been * an {@link ExclusiveMemHFileBlock}, but the new buffer might call for a * {@link SharedMemHFileBlock}. Or vice versa. * @param blk the block to clone from * @param newBuff the new buffer to use */ private static HFileBlockBuilder createBuilder(HFileBlock blk, ByteBuff newBuff) { return new HFileBlockBuilder().withBlockType(blk.blockType) .withOnDiskSizeWithoutHeader(blk.onDiskSizeWithoutHeader) .withUncompressedSizeWithoutHeader(blk.uncompressedSizeWithoutHeader) .withPrevBlockOffset(blk.prevBlockOffset).withByteBuff(newBuff).withOffset(blk.offset) .withOnDiskDataSizeWithHeader(blk.onDiskDataSizeWithHeader) .withNextBlockOnDiskSize(blk.nextBlockOnDiskSize).withHFileContext(blk.fileContext) .withByteBuffAllocator(blk.allocator).withShared(!newBuff.hasArray()); }
3.68
pulsar_Transactions_getTransactionBufferStats
/** * Get transaction buffer stats. * * @param topic the topic of getting transaction buffer stats * @return the stats of transaction buffer in topic. */ default TransactionBufferStats getTransactionBufferStats(String topic) throws PulsarAdminException { return getTransactionBufferStats(topic, false, false); }
3.68
morf_ResultSetComparer_compare
/** * Given 2 data sets, return the number of mismatches between them, and * callback with the details of any mismatches as they are found. See * {@link ResultSetMismatch} for definition of a mismatch. * * @param keyColumns The indexes of the key columns common to both data sets. * If this is empty, the result sets must return only one record. * @param left The left hand data set. * @param right The right hand data set. * @param callBack the mismatch callback interface implementation. * @return the number of mismatches between the two data sets. */ public int compare(int[] keyColumns, ResultSet left, ResultSet right, CompareCallback callBack) { boolean expectingSingleRowResult = keyColumns.length == 0; int misMatchCount = 0; try { // Check metaData matches ResultSetMetaData metadataLeft = left.getMetaData(); ResultSetMetaData metadataRight = right.getMetaData(); compareMetadata(metadataLeft, metadataRight); List<Integer> valueCols = getNonKeyColumns(metadataLeft, Sets.newHashSet(ArrayUtils.toObject(keyColumns))); boolean leftHasRow = left.next(); boolean rightHasRow = right.next(); while (leftHasRow || rightHasRow) { String[] keys = new String[0]; // First compare the key columns. If the key columns mismatch, advance the result set with the // lower value for the key columns until we get a match. if (!expectingSingleRowResult) { MismatchType mismatchType; do { // Check for key column mismatches mismatchType = null; List<String> keyValues = Lists.newArrayList(); for (int keyCol : keyColumns) { int columnType = metadataLeft.getColumnType(keyCol); if (mismatchType == null) { mismatchType = compareKeyColumn(left, right, keyCol, columnType, leftHasRow, rightHasRow); } keyValues.add(valueToString(columnToValue(mismatchType == MISSING_LEFT ? right : left, keyCol, columnType), columnType)); } keys = keyValues.toArray(new String[keyValues.size()]); // If we find a mismatch... if (mismatchType != null) { // Fire a callback for each missing non-key value misMatchCount += callbackValueMismatches(left, right, callBack, metadataRight, valueCols, keys, mismatchType); // Advance the recordset the missing key was found in if (mismatchType == MISSING_RIGHT) { // NOPMD leftHasRow = leftHasRow && left.next(); // NOPMD } else if (mismatchType == MISSING_LEFT) { // NOPMD rightHasRow = rightHasRow && right.next(); // NOPMD } } } while (mismatchType != null && (leftHasRow || rightHasRow)); } // Compare non-key columns after matching row is found and is not end of result set. // Remember to check the situation where a single row data set comparison might // actually have yielded no rows on one side, or the other, or both. if (expectingSingleRowResult) { if (!leftHasRow) { misMatchCount += callbackValueMismatches(left, right, callBack, metadataRight, valueCols, keys, MISSING_LEFT); } if (!rightHasRow) { misMatchCount += callbackValueMismatches(left, right, callBack, metadataRight, valueCols, keys, MISSING_RIGHT); } } // Finally actually compare the values for key matched rows. if (leftHasRow && rightHasRow) { misMatchCount += callbackValueMismatches(left, right, callBack, metadataRight, valueCols, keys, MISMATCH); } // Move cursor forward leftHasRow = leftHasRow && left.next(); rightHasRow = rightHasRow && right.next(); if ((leftHasRow || rightHasRow) && expectingSingleRowResult) { throw new IllegalStateException("Comparison can only handle one row for keyless result sets"); } if(terminatePredicate.isPresent() && terminatePredicate.get().apply(null)) { return misMatchCount; } } } catch (SQLException e) { throw new RuntimeSqlException("Error traversing result set", e); } return misMatchCount; }
3.68
druid_FilterAdapter_callableStatement_registerOutParameter
// /////////////// @Override public void callableStatement_registerOutParameter(FilterChain chain, CallableStatementProxy statement, int parameterIndex, int sqlType) throws SQLException { chain.callableStatement_registerOutParameter(statement, parameterIndex, sqlType); }
3.68
hbase_MutableSegment_first
/** * Returns the first cell in the segment * @return the first cell in the segment */ Cell first() { return this.getCellSet().first(); }
3.68
querydsl_BeanMap_convertType
/** * Converts the given value to the given type. First, reflection is * is used to find a public constructor declared by the given class * that takes one argument, which must be the precise type of the * given value. If such a constructor is found, a new object is * created by passing the given value to that constructor, and the * newly constructed object is returned.<P> * <p> * If no such constructor exists, and the given type is a primitive * type, then the given value is converted to a string using its * {@link Object#toString() toString()} method, and that string is * parsed into the correct primitive type using, for instance, * {@link Integer#valueOf(String)} to convert the string into an * {@code int}.<P> * <p> * If no special constructor exists and the given type is not a * primitive type, this method returns the original value. * * @param newType the type to convert the value to * @param value the value to convert * @return the converted value * @throws NumberFormatException if newType is a primitive type, and * the string representation of the given value cannot be converted * to that type * @throws InstantiationException if the constructor found with * reflection raises it * @throws InvocationTargetException if the constructor found with * reflection raises it * @throws IllegalAccessException never */ @SuppressWarnings({ "rawtypes", "unchecked" }) protected Object convertType(Class<?> newType, Object value) throws InstantiationException, IllegalAccessException, InvocationTargetException { // try call constructor Class<?>[] types = {value.getClass()}; try { Constructor<?> constructor = newType.getConstructor(types); Object[] arguments = {value}; return constructor.newInstance(arguments); } catch (NoSuchMethodException e) { // try using the transformers Function function = getTypeFunction(newType); if (function != null) { return function.apply(value); } return value; } }
3.68
hibernate-validator_TraversableResolvers_getDefault
/** * Initializes and returns the default {@link TraversableResolver} depending on the environment. * <p> * If JPA 2 is present in the classpath, a {@link JPATraversableResolver} instance is returned. * <p> * Otherwise, it returns an instance of the default {@link TraverseAllTraversableResolver}. */ public static TraversableResolver getDefault() { // check whether we have Persistence on the classpath Class<?> persistenceClass; try { persistenceClass = run( LoadClass.action( PERSISTENCE_CLASS_NAME, TraversableResolvers.class.getClassLoader() ) ); } catch (ValidationException e) { LOG.debugf( "Cannot find %s on classpath. Assuming non JPA 2 environment. All properties will per default be traversable.", PERSISTENCE_CLASS_NAME ); return getTraverseAllTraversableResolver(); } // check whether Persistence contains getPersistenceUtil Method persistenceUtilGetter = run( GetMethod.action( persistenceClass, PERSISTENCE_UTIL_METHOD ) ); if ( persistenceUtilGetter == null ) { LOG.debugf( "Found %s on classpath, but no method '%s'. Assuming JPA 1 environment. All properties will per default be traversable.", PERSISTENCE_CLASS_NAME, PERSISTENCE_UTIL_METHOD ); return getTraverseAllTraversableResolver(); } // try to invoke the method to make sure that we are dealing with a complete JPA2 implementation // unfortunately there are several incomplete implementations out there (see HV-374) try { Object persistence = run( NewInstance.action( persistenceClass, "persistence provider" ) ); ReflectionHelper.getValue( persistenceUtilGetter, persistence ); } catch (Exception e) { LOG.debugf( "Unable to invoke %s.%s. Inconsistent JPA environment. All properties will per default be traversable.", PERSISTENCE_CLASS_NAME, PERSISTENCE_UTIL_METHOD ); return getTraverseAllTraversableResolver(); } LOG.debugf( "Found %s on classpath containing '%s'. Assuming JPA 2 environment. Trying to instantiate JPA aware TraversableResolver", PERSISTENCE_CLASS_NAME, PERSISTENCE_UTIL_METHOD ); try { @SuppressWarnings("unchecked") Class<? extends TraversableResolver> jpaAwareResolverClass = (Class<? extends TraversableResolver>) run( LoadClass.action( JPA_AWARE_TRAVERSABLE_RESOLVER_CLASS_NAME, TraversableResolvers.class.getClassLoader() ) ); LOG.debugf( "Instantiated JPA aware TraversableResolver of type %s.", JPA_AWARE_TRAVERSABLE_RESOLVER_CLASS_NAME ); return run( NewInstance.action( jpaAwareResolverClass, "" ) ); } catch (ValidationException e) { LOG.logUnableToLoadOrInstantiateJPAAwareResolver( JPA_AWARE_TRAVERSABLE_RESOLVER_CLASS_NAME ); return getTraverseAllTraversableResolver(); } }
3.68
hbase_Constraints_remove
/** * Remove the constraint (and associated information) for the table descriptor. * @param builder {@link TableDescriptorBuilder} to modify * @param clazz {@link Constraint} class to remove */ public static TableDescriptorBuilder remove(TableDescriptorBuilder builder, Class<? extends Constraint> clazz) { String key = serializeConstraintClass(clazz); return builder.removeValue(key); }
3.68
hadoop_HadoopLogsAnalyzer_initializeHadoopLogsAnalyzer
/** * @param args * string arguments. See {@code usage()} * @throws FileNotFoundException * @throws IOException */ private int initializeHadoopLogsAnalyzer(String[] args) throws FileNotFoundException, IOException { Path jobTraceFilename = null; Path topologyFilename = null; if (args.length == 0 || args[args.length - 1].charAt(0) == '-') { throw new IllegalArgumentException("No input specified."); } else { inputFilename = args[args.length - 1]; } for (int i = 0; i < args.length - (inputFilename == null ? 0 : 1); ++i) { if (StringUtils.equalsIgnoreCase("-h", args[i]) || StringUtils.equalsIgnoreCase("-help", args[i])) { usage(); return 0; } if (StringUtils.equalsIgnoreCase("-c", args[i]) || StringUtils.equalsIgnoreCase("-collect-prefixes", args[i])) { collecting = true; continue; } // these control the job digest if (StringUtils.equalsIgnoreCase("-write-job-trace", args[i])) { ++i; jobTraceFilename = new Path(args[i]); continue; } if (StringUtils.equalsIgnoreCase("-single-line-job-traces", args[i])) { prettyprintTrace = false; continue; } if (StringUtils.equalsIgnoreCase("-omit-task-details", args[i])) { omitTaskDetails = true; continue; } if (StringUtils.equalsIgnoreCase("-write-topology", args[i])) { ++i; topologyFilename = new Path(args[i]); continue; } if (StringUtils.equalsIgnoreCase("-job-digest-spectra", args[i])) { ArrayList<Integer> values = new ArrayList<Integer>(); ++i; while (i < args.length && Character.isDigit(args[i].charAt(0))) { values.add(Integer.parseInt(args[i])); ++i; } if (values.size() == 0) { throw new IllegalArgumentException("Empty -job-digest-spectra list"); } attemptTimesPercentiles = new int[values.size()]; int lastValue = 0; for (int j = 0; j < attemptTimesPercentiles.length; ++j) { if (values.get(j) <= lastValue || values.get(j) >= 100) { throw new IllegalArgumentException( "Bad -job-digest-spectra percentiles list"); } attemptTimesPercentiles[j] = values.get(j); } --i; continue; } if (StringUtils.equalsIgnoreCase("-d", args[i]) || StringUtils.equalsIgnoreCase("-debug", args[i])) { debug = true; continue; } if (StringUtils.equalsIgnoreCase("-spreads", args[i])) { int min = Integer.parseInt(args[i + 1]); int max = Integer.parseInt(args[i + 2]); if (min < max && min < 1000 && max < 1000) { spreadMin = min; spreadMax = max; spreading = true; i += 2; } continue; } // These control log-wide CDF outputs if (StringUtils.equalsIgnoreCase("-delays", args[i])) { delays = true; continue; } if (StringUtils.equalsIgnoreCase("-runtimes", args[i])) { runtimes = true; continue; } if (StringUtils.equalsIgnoreCase("-tasktimes", args[i])) { collectTaskTimes = true; continue; } if (StringUtils.equalsIgnoreCase("-v1", args[i])) { version = 1; continue; } throw new IllegalArgumentException("Unrecognized argument: " + args[i]); } runTimeDists = newDistributionBlock(); delayTimeDists = newDistributionBlock(); mapTimeSpreadDists = newDistributionBlock("map-time-spreads"); shuffleTimeSpreadDists = newDistributionBlock(); sortTimeSpreadDists = newDistributionBlock(); reduceTimeSpreadDists = newDistributionBlock(); mapTimeDists = newDistributionBlock(); shuffleTimeDists = newDistributionBlock(); sortTimeDists = newDistributionBlock(); reduceTimeDists = newDistributionBlock(); taskAttemptStartTimes = new HashMap<String, Long>(); taskReduceAttemptShuffleEndTimes = new HashMap<String, Long>(); taskReduceAttemptSortEndTimes = new HashMap<String, Long>(); taskMapAttemptFinishTimes = new HashMap<String, Long>(); taskReduceAttemptFinishTimes = new HashMap<String, Long>(); final Path inputPath = new Path(inputFilename); inputIsDirectory = pathIsDirectory(inputPath); if (jobTraceFilename != null && attemptTimesPercentiles == null) { attemptTimesPercentiles = new int[19]; for (int i = 0; i < 19; ++i) { attemptTimesPercentiles[i] = (i + 1) * 5; } } if (!inputIsDirectory) { input = maybeUncompressedPath(inputPath); } else { inputDirectoryPath = inputPath; FileSystem fs = inputPath.getFileSystem(getConf()); FileStatus[] statuses = fs.listStatus(inputPath); inputDirectoryFiles = new String[statuses.length]; for (int i = 0; i < statuses.length; ++i) { inputDirectoryFiles[i] = statuses[i].getPath().getName(); } // filter out the .crc files, if any int dropPoint = 0; for (int i = 0; i < inputDirectoryFiles.length; ++i) { String name = inputDirectoryFiles[i]; if (!(name.length() >= 4 && ".crc".equals(name .substring(name.length() - 4)))) { inputDirectoryFiles[dropPoint++] = name; } } LOG.info("We dropped " + (inputDirectoryFiles.length - dropPoint) + " crc files."); String[] new_inputDirectoryFiles = new String[dropPoint]; System.arraycopy(inputDirectoryFiles, 0, new_inputDirectoryFiles, 0, dropPoint); inputDirectoryFiles = new_inputDirectoryFiles; Arrays.sort(inputDirectoryFiles); if (!setNextDirectoryInputStream()) { throw new FileNotFoundException("Empty directory specified."); } } if (jobTraceFilename != null) { jobTraceGen = new DefaultOutputter<LoggedJob>(); jobTraceGen.init(jobTraceFilename, getConf()); if (topologyFilename != null) { topologyGen = new DefaultOutputter<LoggedNetworkTopology>(); topologyGen.init(topologyFilename, getConf()); } } return 0; }
3.68
flink_Tuple25_toString
/** * Creates a string representation of the tuple in the form (f0, f1, f2, f3, f4, f5, f6, f7, f8, * f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24), where the * individual fields are the value returned by calling {@link Object#toString} on that field. * * @return The string representation of the tuple. */ @Override public String toString() { return "(" + StringUtils.arrayAwareToString(this.f0) + "," + StringUtils.arrayAwareToString(this.f1) + "," + StringUtils.arrayAwareToString(this.f2) + "," + StringUtils.arrayAwareToString(this.f3) + "," + StringUtils.arrayAwareToString(this.f4) + "," + StringUtils.arrayAwareToString(this.f5) + "," + StringUtils.arrayAwareToString(this.f6) + "," + StringUtils.arrayAwareToString(this.f7) + "," + StringUtils.arrayAwareToString(this.f8) + "," + StringUtils.arrayAwareToString(this.f9) + "," + StringUtils.arrayAwareToString(this.f10) + "," + StringUtils.arrayAwareToString(this.f11) + "," + StringUtils.arrayAwareToString(this.f12) + "," + StringUtils.arrayAwareToString(this.f13) + "," + StringUtils.arrayAwareToString(this.f14) + "," + StringUtils.arrayAwareToString(this.f15) + "," + StringUtils.arrayAwareToString(this.f16) + "," + StringUtils.arrayAwareToString(this.f17) + "," + StringUtils.arrayAwareToString(this.f18) + "," + StringUtils.arrayAwareToString(this.f19) + "," + StringUtils.arrayAwareToString(this.f20) + "," + StringUtils.arrayAwareToString(this.f21) + "," + StringUtils.arrayAwareToString(this.f22) + "," + StringUtils.arrayAwareToString(this.f23) + "," + StringUtils.arrayAwareToString(this.f24) + ")"; }
3.68
hudi_HoodieBackedTableMetadataWriter_performTableServices
/** * Optimize the metadata table by running compaction, clean and archive as required. * <p> * Don't perform optimization if there are inflight operations on the dataset. This is for two reasons: * - The compaction will contain the correct data as all failed operations have been rolled back. * - Clean/compaction etc. will have the highest timestamp on the MDT and we won't be adding new operations * with smaller timestamps to metadata table (makes for easier debugging) * <p> * This adds the limitations that long-running async operations (clustering, etc.) may cause delay in such MDT * optimizations. We will relax this after MDT code has been hardened. */ @Override public void performTableServices(Option<String> inFlightInstantTimestamp) { HoodieTimer metadataTableServicesTimer = HoodieTimer.start(); boolean allTableServicesExecutedSuccessfullyOrSkipped = true; BaseHoodieWriteClient<?, I, ?, ?> writeClient = getWriteClient(); try { // Run any pending table services operations. runPendingTableServicesOperations(writeClient); Option<HoodieInstant> lastInstant = metadataMetaClient.reloadActiveTimeline().getDeltaCommitTimeline() .filterCompletedInstants() .lastInstant(); if (!lastInstant.isPresent()) { return; } // Check and run clean operations. String latestDeltacommitTime = lastInstant.get() .getTimestamp(); LOG.info("Latest deltacommit time found is " + latestDeltacommitTime + ", running clean operations."); cleanIfNecessary(writeClient, latestDeltacommitTime); // Do timeline validation before scheduling compaction/logCompaction operations. if (validateTimelineBeforeSchedulingCompaction(inFlightInstantTimestamp, latestDeltacommitTime)) { compactIfNecessary(writeClient, latestDeltacommitTime); } writeClient.archive(); LOG.info("All the table services operations on MDT completed successfully"); } catch (Exception e) { LOG.error("Exception in running table services on metadata table", e); allTableServicesExecutedSuccessfullyOrSkipped = false; throw e; } finally { long timeSpent = metadataTableServicesTimer.endTimer(); metrics.ifPresent(m -> m.updateMetrics(HoodieMetadataMetrics.TABLE_SERVICE_EXECUTION_DURATION, timeSpent)); if (allTableServicesExecutedSuccessfullyOrSkipped) { metrics.ifPresent(m -> m.incrementMetric(HoodieMetadataMetrics.TABLE_SERVICE_EXECUTION_STATUS, 1)); } else { metrics.ifPresent(m -> m.incrementMetric(HoodieMetadataMetrics.TABLE_SERVICE_EXECUTION_STATUS, -1)); } } }
3.68
hadoop_StartupProgress_setSize
/** * Sets the optional size in bytes associated with the specified phase. For * example, this can be used while loading fsimage to indicate the size of the * fsimage file. * * @param phase Phase to set * @param size long to set */ public void setSize(Phase phase, long size) { if (!isComplete()) { phases.get(phase).size = size; } }
3.68
hbase_Bytes_secureRandom
/** * Fill given array with random bytes at the specified position using a strong random number * generator. * @param b array which needs to be filled with random bytes * @param offset staring offset in array * @param length number of bytes to fill */ public static void secureRandom(byte[] b, int offset, int length) { checkPositionIndex(offset, b.length, "offset"); checkArgument(length > 0, "length must be greater than 0"); checkPositionIndex(offset + length, b.length, "offset + length"); byte[] buf = new byte[length]; SECURE_RNG.nextBytes(buf); System.arraycopy(buf, 0, b, offset, length); }
3.68
hadoop_SaslInputStream_available
/** * Returns the number of bytes that can be read from this input stream without * blocking. The <code>available</code> method of <code>InputStream</code> * returns <code>0</code>. This method <B>should</B> be overridden by * subclasses. * * @return the number of bytes that can be read from this input stream without * blocking. * @exception IOException * if an I/O error occurs. */ @Override public int available() throws IOException { if (!useWrap) { return inStream.available(); } return (ofinish - ostart); }
3.68
shardingsphere-elasticjob_ServerService_isAvailableServer
/** * Judge is available server or not. * * @param ip job server IP address * @return is available server or not */ public boolean isAvailableServer(final String ip) { return isEnableServer(ip) && hasOnlineInstances(ip); }
3.68
hadoop_BlockPoolTokenSecretManager_addKeys
/** * See {@link BlockTokenSecretManager#addKeys(ExportedBlockKeys)}. */ public void addKeys(String bpid, ExportedBlockKeys exportedKeys) throws IOException { get(bpid).addKeys(exportedKeys); }
3.68
hudi_InternalSchemaBuilder_index2Parents
/** * Build a mapping which maintain the relation between child field id and it's parent field id. * if a child field y(which id is 9) belong to a nest field x(which id is 6), then (9 -> 6) will be added to the result map. * if a field has no parent field, nothings will be added. * * @param record hoodie record type. * @return a mapping from id to parentId for a record Type */ public Map<Integer, Integer> index2Parents(Types.RecordType record) { Map<Integer, Integer> result = new HashMap<>(); Deque<Integer> parentIds = new LinkedList<>(); index2Parents(record, parentIds, result); return result; }
3.68
pulsar_TripleLongPriorityQueue_clear
/** * Clear all items. */ public void clear() { this.tuplesCount = 0; shrinkCapacity(); }
3.68
hadoop_ProtoTranslatorFactory_getTranslator
/** * Get a {@link ProtoTranslator} based on the given input message * types. If the type is not supported, a IllegalArgumentException * will be thrown. When adding more transformers to this factory class, * note each transformer works exactly for one message to another * (and vice versa). For each type of the message, make sure there is * a corresponding unit test added, such as * TestValidateVolumeCapabilitiesRequest. * * @param yarnProto yarn proto message * @param csiProto CSI proto message * @param <A> yarn proto message * @param <B> CSI proto message * @throws IllegalArgumentException * when given types are not supported * @return * a proto message transformer that transforms * YARN internal proto message to CSI */ public static <A, B> ProtoTranslator<A, B> getTranslator( Class<A> yarnProto, Class<B> csiProto) { if (yarnProto == ValidateVolumeCapabilitiesRequest.class && csiProto == Csi.ValidateVolumeCapabilitiesRequest.class) { return new ValidateVolumeCapabilitiesRequestProtoTranslator(); } else if (yarnProto == ValidateVolumeCapabilitiesResponse.class && csiProto == Csi.ValidateVolumeCapabilitiesResponse.class) { return new ValidationVolumeCapabilitiesResponseProtoTranslator(); } else if (yarnProto == NodePublishVolumeRequest.class && csiProto == Csi.NodePublishVolumeRequest.class) { return new NodePublishVolumeRequestProtoTranslator(); } else if (yarnProto == GetPluginInfoResponse.class && csiProto == Csi.GetPluginInfoResponse.class) { return new GetPluginInfoResponseProtoTranslator(); } else if (yarnProto == NodeUnpublishVolumeRequest.class && csiProto == Csi.NodeUnpublishVolumeRequest.class) { return new NodeUnpublishVolumeRequestProtoTranslator(); } throw new IllegalArgumentException("A problem is found while processing" + " proto message translating. Unexpected message types," + " no transformer is found can handle the transformation from type " + yarnProto.getName() + " <-> " + csiProto.getName()); }
3.68
hadoop_CombinedHostsFileWriter_writeFile
/** * Serialize a set of DatanodeAdminProperties to a json file. * @param hostsFile the json file name. * @param allDNs the set of DatanodeAdminProperties * @throws IOException */ public static void writeFile(final String hostsFile, final Set<DatanodeAdminProperties> allDNs) throws IOException { final ObjectMapper objectMapper = new ObjectMapper(); try (Writer output = new OutputStreamWriter(Files.newOutputStream(Paths.get(hostsFile)), StandardCharsets.UTF_8)) { objectMapper.writeValue(output, allDNs); } }
3.68
hbase_StorageClusterStatusModel_setAverageLoad
/** * @param averageLoad the average load of region servers in the cluster */ public void setAverageLoad(double averageLoad) { this.averageLoad = averageLoad; }
3.68
framework_VAbstractCalendarPanel_focusNextDay
/** * Moves the focus forward the given number of days. */ @SuppressWarnings("deprecation") private void focusNextDay(int days) { if (focusedDate == null) { return; } Date focusCopy = ((Date) focusedDate.clone()); focusCopy.setDate(focusedDate.getDate() + days); if (!isDateInsideRange(focusCopy, getResolution())) { // If not inside allowed range, then do not move anything return; } int oldMonth = focusedDate.getMonth(); int oldYear = focusedDate.getYear(); focusedDate.setDate(focusedDate.getDate() + days); if (focusedDate.getMonth() == oldMonth && focusedDate.getYear() == oldYear) { // Month did not change, only move the selection focusDay(focusedDate); } else { // If the month changed we need to re-render the calendar displayedMonth.setMonth(focusedDate.getMonth()); displayedMonth.setYear(focusedDate.getYear()); renderCalendar(); } }
3.68
flink_DeclarativeSlotManager_reportSlotStatus
/** * Reports the current slot allocations for a task manager identified by the given instance id. * * @param instanceId identifying the task manager for which to report the slot status * @param slotReport containing the status for all of its slots * @return true if the slot status has been updated successfully, otherwise false */ @Override public boolean reportSlotStatus(InstanceID instanceId, SlotReport slotReport) { checkInit(); LOG.debug("Received slot report from instance {}: {}.", instanceId, slotReport); if (taskExecutorManager.isTaskManagerRegistered(instanceId)) { if (slotTracker.notifySlotStatus(slotReport)) { checkResourceRequirementsWithDelay(); } return true; } else { LOG.debug( "Received slot report for unknown task manager with instance id {}. Ignoring this report.", instanceId); return false; } }
3.68
hudi_LocalRegistry_getAllCounts
/** * Get all Counter type metrics. */ @Override public Map<String, Long> getAllCounts(boolean prefixWithRegistryName) { HashMap<String, Long> countersMap = new HashMap<>(); counters.forEach((k, v) -> { String key = prefixWithRegistryName ? name + "." + k : k; countersMap.put(key, v.getValue()); }); return countersMap; }
3.68
hbase_AdaptiveLifoCoDelCallQueue_take
/** * Behaves as {@link LinkedBlockingQueue#take()}, except it will silently skip all calls which it * thinks should be dropped. * @return the head of this queue * @throws InterruptedException if interrupted while waiting */ @Override public CallRunner take() throws InterruptedException { CallRunner cr; while (true) { if (((double) queue.size() / this.maxCapacity) > lifoThreshold) { numLifoModeSwitches.increment(); cr = queue.takeLast(); } else { cr = queue.takeFirst(); } if (needToDrop(cr)) { numGeneralCallsDropped.increment(); cr.drop(); } else { return cr; } } }
3.68
hbase_TableDescriptorBuilder_toByteArray
/** Returns the bytes in pb format */ private byte[] toByteArray() { return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray()); }
3.68
hadoop_FederationStateStoreUtils_setPassword
/** * Sets a specific password for <code>HikariDataSource</code> SQL connections. * * @param dataSource the <code>HikariDataSource</code> connections * @param password the value to set */ public static void setPassword(HikariDataSource dataSource, String password) { if (password != null) { dataSource.setPassword(password); LOG.debug("Setting non NULL Credentials for Store connection"); } else { LOG.debug("NULL Credentials specified for Store connection, so ignoring"); } }
3.68
flink_SegmentsUtil_getByte
/** * get byte from segments. * * @param segments target segments. * @param offset value offset. */ public static byte getByte(MemorySegment[] segments, int offset) { if (inFirstSegment(segments, offset, 1)) { return segments[0].get(offset); } else { return getByteMultiSegments(segments, offset); } }
3.68
dubbo_XdsRouter_getXdsRouteRuleMap
/** * for ut only */ @Deprecated ConcurrentHashMap<String, List<XdsRouteRule>> getXdsRouteRuleMap() { return xdsRouteRuleMap; }
3.68
streampipes_SwingingDoorTrendingFilter_reset
/** * if current point to the last stored point's time distance >= compressionMaxTimeInterval, will store current * point and reset upperDoor and lowerDoor * * @param time current time * @param value current value * @param event current event */ private void reset(long time, double value, Event event) { lastStoredTimestamp = time; lastStoredDouble = value; lastStoredEvent = event; upperDoor = Integer.MIN_VALUE; lowerDoor = Integer.MAX_VALUE; }
3.68
framework_SortEvent_isUserOriginated
/** * Returns whether this event originated from actions done by the user. * * @return true if sort event originated from user interaction */ public boolean isUserOriginated() { return userOriginated; }
3.68
framework_DefaultDeploymentConfiguration_checkResourceCacheTime
/** * Log a warning if resource cache time is set but is not an integer. */ private void checkResourceCacheTime() { try { resourceCacheTime = Integer.parseInt(getApplicationOrSystemProperty( Constants.SERVLET_PARAMETER_RESOURCE_CACHE_TIME, Integer.toString(DEFAULT_RESOURCE_CACHE_TIME))); } catch (NumberFormatException e) { getLogger().warning( Constants.WARNING_RESOURCE_CACHING_TIME_NOT_NUMERIC); resourceCacheTime = DEFAULT_RESOURCE_CACHE_TIME; } }
3.68
hbase_JSONBean_open
/** * Notice that, closing the return {@link Writer} will not close the {@code writer} passed in, you * still need to close the {@code writer} by yourself. * <p/> * This is because that, we can only finish the json after you call {@link Writer#close()}. So if * we just close the {@code writer}, you can write nothing after finished the json. */ public Writer open(final PrintWriter writer) throws IOException { JsonWriter jsonWriter = GSON.newJsonWriter(new java.io.Writer() { @Override public void write(char[] cbuf, int off, int len) throws IOException { writer.write(cbuf, off, len); } @Override public void flush() throws IOException { writer.flush(); } @Override public void close() throws IOException { // do nothing } }); jsonWriter.setIndent(" "); jsonWriter.beginObject(); return new Writer() { @Override public void flush() throws IOException { jsonWriter.flush(); } @Override public void close() throws IOException { jsonWriter.endObject(); jsonWriter.close(); } @Override public void write(String key, String value) throws IOException { jsonWriter.name(key).value(value); } @Override public int write(MBeanServer mBeanServer, ObjectName qry, String attribute, boolean description, ObjectName excluded) throws IOException { return JSONBean.write(jsonWriter, mBeanServer, qry, attribute, description, excluded); } }; }
3.68
framework_LayoutManager_delayOverflowFix
/* * Delay the overflow fix if the involved connectors might still change */ private boolean delayOverflowFix(ComponentConnector componentConnector) { if (!currentDependencyTree.noMoreChangesExpected(componentConnector)) { return true; } ServerConnector parent = componentConnector.getParent(); if (parent instanceof ComponentConnector && !currentDependencyTree .noMoreChangesExpected((ComponentConnector) parent)) { return true; } return false; }
3.68
framework_AbstractBeanContainer_setBeanIdResolver
/** * Sets the resolver that finds the item id for a bean, or null not to use * automatic resolving. * * Methods that add a bean without specifying an id must not be called if no * resolver has been set. * * Note that methods taking an explicit id can be used whether a resolver * has been defined or not. * * @param beanIdResolver * to use or null to disable automatic id resolution */ protected void setBeanIdResolver( BeanIdResolver<IDTYPE, BEANTYPE> beanIdResolver) { this.beanIdResolver = beanIdResolver; }
3.68
hadoop_JobTokenSecretManager_retrievePassword
/** * Look up the token password/secret for the given job token identifier. * @param identifier the job token identifier to look up * @return token password/secret as byte[] * @throws InvalidToken */ @Override public byte[] retrievePassword(JobTokenIdentifier identifier) throws InvalidToken { return retrieveTokenSecret(identifier.getJobId().toString()).getEncoded(); }
3.68
flink_Tuple4_toString
/** * Creates a string representation of the tuple in the form (f0, f1, f2, f3), where the * individual fields are the value returned by calling {@link Object#toString} on that field. * * @return The string representation of the tuple. */ @Override public String toString() { return "(" + StringUtils.arrayAwareToString(this.f0) + "," + StringUtils.arrayAwareToString(this.f1) + "," + StringUtils.arrayAwareToString(this.f2) + "," + StringUtils.arrayAwareToString(this.f3) + ")"; }
3.68
morf_XmlPullProcessor_readNextTagInsideParent
/** * Reads the next tag name from the XML parser so long as it lies within the parent tag name. * If the close tag event for the parent is read this method will return null. Otherwise it * returns the name of the tag read. * * @param parentTagName The enclosing tag that forms the limit for the read operation. * @return The next tag name or null if there are no more tags to read inside the specified parent. */ protected String readNextTagInsideParent(String parentTagName) { int event; try { do { event = xmlStreamReader.next(); } while (event == XMLStreamReader.CHARACTERS || event == XMLStreamReader.COMMENT || event == XMLStreamReader.END_ELEMENT && !xmlStreamReader.getLocalName().equals(parentTagName)); } catch (Exception e) { throw new RuntimeException("Error reading data from the XML pull parser", e); } if (event == XMLStreamReader.START_ELEMENT) { return xmlStreamReader.getLocalName(); } else if (event == XMLStreamReader.END_ELEMENT) { return null; } else if (event == XMLStreamReader.END_DOCUMENT) { throw new IllegalStateException("Unexpected end of document while looking for a tag inside [" + parentTagName + "]"); } else { throw new IllegalStateException("Expecting a tag inside [" + parentTagName + "] but got [" + event + "]"); } }
3.68
hadoop_OBSFileSystem_getBoundedListThreadPool
/** * Return bounded thread pool for list. * * @return bounded thread pool for list */ ThreadPoolExecutor getBoundedListThreadPool() { return boundedListThreadPool; }
3.68
framework_Table_isSelectable
/** * Returns whether table is selectable. * * <p> * The table is not selectable until it's explicitly set as selectable or at * least one {@link ValueChangeListener} is added. * </p> * * @return whether table is selectable. */ public boolean isSelectable() { if (selectable == null) { return hasListeners(ValueChangeEvent.class); } return selectable; }
3.68
hadoop_AzureBlobFileSystemStore_hashCode
/** * Returns a hash code value for the object, which is defined as * the hash code of the path name. * * @return a hash code value for the path name and version */ @Override public int hashCode() { int hash = getPath().hashCode(); hash = 89 * hash + (this.version != null ? this.version.hashCode() : 0); return hash; }
3.68
hbase_ExecutorService_getExecutorLazily
/** * Initialize the executor lazily, Note if an executor need to be initialized lazily, then all * paths should use this method to get the executor, should not start executor by using * {@link ExecutorService#startExecutorService(ExecutorConfig)} */ public ThreadPoolExecutor getExecutorLazily(ExecutorConfig config) { return executorMap.computeIfAbsent(config.getName(), (executorName) -> new Executor(config)) .getThreadPoolExecutor(); }
3.68
hudi_HoodieRecord_setNewLocation
/** * Sets the new currentLocation of the record, after being written. This again should happen exactly-once. */ public void setNewLocation(HoodieRecordLocation location) { checkState(); assert newLocation == null; this.newLocation = location; }
3.68
hbase_StaticUserWebFilter_getUsernameFromConf
/** * Retrieve the static username from the configuration. */ static String getUsernameFromConf(Configuration conf) { String oldStyleUgi = conf.get(DEPRECATED_UGI_KEY); if (oldStyleUgi != null) { // We can't use the normal configuration deprecation mechanism here // since we need to split out the username from the configured UGI. LOG.warn( DEPRECATED_UGI_KEY + " should not be used. Instead, use " + HBASE_HTTP_STATIC_USER + "."); return Iterables.get(Splitter.on(',').split(oldStyleUgi), 0); } else { return conf.get(HBASE_HTTP_STATIC_USER, DEFAULT_HBASE_HTTP_STATIC_USER); } }
3.68
flink_MailboxProcessor_getMailboxMetricsControl
/** * Gets {@link MailboxMetricsController} for control and access to mailbox metrics. * * @return {@link MailboxMetricsController}. */ @VisibleForTesting public MailboxMetricsController getMailboxMetricsControl() { return this.mailboxMetricsControl; }
3.68
morf_SchemaUtils_view
/** * Create a view. * * @param viewName The name of the view. * @param selectStatement The underlying {@link SelectStatement}. This can be null e.g. if loading from database metadata or in testing. * @param dependencies names of any views that this view depends on (and therefore need to be deployed first). * @return {@link View} implementation based on the parameters provided. */ public static View view(String viewName, SelectStatement selectStatement, String... dependencies) { return new ViewBean(viewName, selectStatement, dependencies); }
3.68
hadoop_TaskRuntimeEstimator_hasStagnatedProgress
/** * * Returns true if the estimator has no updates records for a threshold time * window. This helps to identify task attempts that are stalled at the * beginning of execution. * * @param id the {@link TaskAttemptId} of the attempt we are asking about * @param timeStamp the time of the report we compare with * @return true if the task attempt has no progress for a given time window * */ default boolean hasStagnatedProgress(TaskAttemptId id, long timeStamp) { return false; }
3.68
hbase_ZKUtil_deleteNode
/** * Delete the specified node with the specified version. Sets no watches. Throws all exceptions. */ public static boolean deleteNode(ZKWatcher zkw, String node, int version) throws KeeperException { try { zkw.getRecoverableZooKeeper().delete(node, version); return true; } catch (KeeperException.BadVersionException bve) { return false; } catch (InterruptedException ie) { zkw.interruptedException(ie); return false; } }
3.68
hadoop_DynamicIOStatistics_addMaximumFunction
/** * add a mapping of a key to a maximum function. * @param key the key * @param eval the evaluator */ void addMaximumFunction(String key, Function<String, Long> eval) { maximums.addFunction(key, eval); }
3.68
flink_ListView_newListViewDataType
/** Utility method for creating a {@link DataType} of {@link ListView} explicitly. */ public static DataType newListViewDataType(DataType elementDataType) { return DataTypes.STRUCTURED( ListView.class, DataTypes.FIELD("list", DataTypes.ARRAY(elementDataType).bridgedTo(List.class))); }
3.68
framework_LayoutManager_setThoroughSizeChck
/** * Set whether the measuring should use a thorough size check that evaluates * the presence of the element and uses calculated size, or default to a * slightly faster check that can result in incorrect size information if * the check is triggered while a transform animation is ongoing. This can * happen e.g. when a PopupView is opened. * <p> * By default, the thorough size check is enabled. * * @param thoroughSizeCheck * {@code true} if thorough size check enabled, {@code false} if * not * @since 8.13 */ public void setThoroughSizeChck(boolean thoroughSizeCheck) { this.thoroughSizeCheck = thoroughSizeCheck; }
3.68
dubbo_AdaptiveClassCodeGenerator_generateMethodContent
/** * generate method content */ private String generateMethodContent(Method method) { Adaptive adaptiveAnnotation = method.getAnnotation(Adaptive.class); StringBuilder code = new StringBuilder(512); if (adaptiveAnnotation == null) { return generateUnsupported(method); } else { int urlTypeIndex = getUrlTypeIndex(method); // found parameter in URL type if (urlTypeIndex != -1) { // Null Point check code.append(generateUrlNullCheck(urlTypeIndex)); } else { // did not find parameter in URL type code.append(generateUrlAssignmentIndirectly(method)); } String[] value = getMethodAdaptiveValue(adaptiveAnnotation); boolean hasInvocation = hasInvocationArgument(method); code.append(generateInvocationArgumentNullCheck(method)); code.append(generateExtNameAssignment(value, hasInvocation)); // check extName == null? code.append(generateExtNameNullCheck(value)); code.append(generateScopeModelAssignment()); code.append(generateExtensionAssignment()); // return statement code.append(generateReturnAndInvocation(method)); } return code.toString(); }
3.68
morf_FieldReference_asc
/** * sets ascending order on this field * @return this */ public Builder asc() { this.direction = Direction.ASCENDING; return this; }
3.68
flink_AccumulatorHelper_compareAccumulatorTypes
/** Compare both classes and throw {@link UnsupportedOperationException} if they differ. */ @SuppressWarnings("rawtypes") public static void compareAccumulatorTypes( Object name, Class<? extends Accumulator> first, Class<? extends Accumulator> second) throws UnsupportedOperationException { if (first == null || second == null) { throw new NullPointerException(); } if (first != second) { if (!first.getName().equals(second.getName())) { throw new UnsupportedOperationException( "The accumulator object '" + name + "' was created with two different types: " + first.getName() + " and " + second.getName()); } else { // damn, name is the same, but different classloaders throw new UnsupportedOperationException( "The accumulator object '" + name + "' was created with two different classes: " + first + " and " + second + " Both have the same type (" + first.getName() + ") but different classloaders: " + first.getClassLoader() + " and " + second.getClassLoader()); } } }
3.68
framework_SelectorPath_getNameWithCount
/** * Get variable name with counter for given component name. * * @param name * Component name * @return name followed by count */ protected String getNameWithCount(String name) { if (!counter.containsKey(name)) { counter.put(name, 0); } counter.put(name, counter.get(name) + 1); name += counter.get(name); return name; }
3.68
hibernate-validator_ConstraintAnnotationVisitor_visitTypeAsAnnotationType
/** * <p> * Checks whether the given annotations are correctly specified at the given * annotation type declaration. The following checks are performed: * </p> * <ul> * <li> * The only annotation types allowed to be annotated with other constraint * annotations are composed constraint annotation type declarations.</li> * </ul> */ @Override public Void visitTypeAsAnnotationType( TypeElement annotationType, List<AnnotationMirror> mirrors) { checkConstraints( annotationType, mirrors ); return null; }
3.68