name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hbase_DefaultVisibilityLabelServiceImpl_mutateLabelsRegion
/** * Adds the mutations to labels region and set the results to the finalOpStatus. finalOpStatus * might have some entries in it where the OpStatus is FAILURE. We will leave those and set in * others in the order. * @return whether we need a ZK update or not. */ private boolean mutateLabelsRegion(List<Mutation> mutations, OperationStatus[] finalOpStatus) throws IOException { OperationStatus[] opStatus = this.labelsRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()])); int i = 0; boolean updateZk = false; for (OperationStatus status : opStatus) { // Update the zk when atleast one of the mutation was added successfully. updateZk = updateZk || (status.getOperationStatusCode() == OperationStatusCode.SUCCESS); for (; i < finalOpStatus.length; i++) { if (finalOpStatus[i] == null) { finalOpStatus[i] = status; break; } } } return updateZk; }
3.68
pulsar_ManagedCursorContainer_getSlowestReaderPosition
/** * Get the slowest reader position for the cursors that are ordered. * * @return the slowest reader position */ public PositionImpl getSlowestReaderPosition() { long stamp = rwLock.readLock(); try { return heap.isEmpty() ? null : heap.get(0).position; } finally { rwLock.unlockRead(stamp); } }
3.68
shardingsphere-elasticjob_QueryParameterMap_add
/** * Add value. * * @param parameterName parameter name * @param value value */ public void add(final String parameterName, final String value) { List<String> values = queryMap.get(parameterName); if (null == values) { values = new LinkedList<>(); } values.add(value); put(parameterName, values); }
3.68
streampipes_SpGeometryBuilder_getPrecisionModel
/** * Creates a {@link org.locationtech.jts.geom.PrecisionModel} with a specific precision. * WGS84/WGS84 will be created a {@link org.locationtech.jts.geom.PrecisionModel#FIXED} with * 7 decimal positions (scale 1000000). Any other epsg code will create a precision * with {@link org.locationtech.jts.geom.PrecisionModel#FLOATING}. * * @param epsg EPSG Code representing SRID * @return {@link org.locationtech.jts.geom.PrecisionModel} */ protected static PrecisionModel getPrecisionModel(Integer epsg) { PrecisionModel precisionModel; if (epsg == 4326) { // use scale precision with 7 decimal positions like default OSM precisionModel = new PrecisionModel(1000000); } else { // use default constructor precisionModel = new PrecisionModel(); } return precisionModel; }
3.68
framework_BarInUIDL_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { return "Verify that there is no problem with messages containing | by clicking the button repeatedly"; }
3.68
morf_OracleDialect_primaryKeyConstraintName
/** * Form the standard name for a table's primary key constraint. * * @param tableName Name of the table for which the primary key constraint name is required. * @return Name of constraint. */ private String primaryKeyConstraintName(String tableName) { return truncatedTableNameWithSuffix(tableName, "_PK"); }
3.68
hadoop_RouterQuotaUpdateService_getMountTableStore
/** * Get mount table store management interface. * @return MountTableStore instance. * @throws IOException */ private MountTableStore getMountTableStore() throws IOException { if (this.mountTableStore == null) { this.mountTableStore = router.getStateStore().getRegisteredRecordStore( MountTableStore.class); if (this.mountTableStore == null) { throw new IOException("Mount table state store is not available."); } } return this.mountTableStore; }
3.68
hudi_SpillableMapUtils_convertToHoodieRecordPayload
/** * Utility method to convert bytes to HoodieRecord using schema and payload class. */ public static <R> HoodieRecord<R> convertToHoodieRecordPayload(GenericRecord record, String payloadClazz, String preCombineField, Pair<String, String> recordKeyPartitionPathFieldPair, boolean withOperationField, Option<String> partitionName, Option<Schema> schemaWithoutMetaFields) { final String recKey = record.get(recordKeyPartitionPathFieldPair.getKey()).toString(); final String partitionPath = (partitionName.isPresent() ? partitionName.get() : record.get(recordKeyPartitionPathFieldPair.getRight()).toString()); Object preCombineVal = getPreCombineVal(record, preCombineField); HoodieOperation operation = withOperationField ? HoodieOperation.fromName(getNullableValAsString(record, HoodieRecord.OPERATION_METADATA_FIELD)) : null; if (schemaWithoutMetaFields.isPresent()) { Schema schema = schemaWithoutMetaFields.get(); GenericRecord recordWithoutMetaFields = new GenericData.Record(schema); for (Schema.Field f : schema.getFields()) { recordWithoutMetaFields.put(f.name(), record.get(f.name())); } record = recordWithoutMetaFields; } HoodieRecord<? extends HoodieRecordPayload> hoodieRecord = new HoodieAvroRecord<>(new HoodieKey(recKey, partitionPath), HoodieRecordUtils.loadPayload(payloadClazz, new Object[] {record, preCombineVal}, GenericRecord.class, Comparable.class), operation); return (HoodieRecord<R>) hoodieRecord; }
3.68
flink_ProcessWindowFunction_clear
/** * Deletes any state in the {@code Context} when the Window expires (the watermark passes its * {@code maxTimestamp} + {@code allowedLateness}). * * @param context The context to which the window is being evaluated * @throws Exception The function may throw exceptions to fail the program and trigger recovery. */ public void clear(Context context) throws Exception {}
3.68
hbase_RestoreSnapshotHelper_cloneHdfsRegions
/** * Clone specified regions. For each region create a new region and create a HFileLink for each * hfile. */ private RegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec, final Map<String, SnapshotRegionManifest> regionManifests, final List<RegionInfo> regions) throws IOException { if (regions == null || regions.isEmpty()) return null; final Map<String, RegionInfo> snapshotRegions = new HashMap<>(regions.size()); final String snapshotName = snapshotDesc.getName(); // clone region info (change embedded tableName with the new one) RegionInfo[] clonedRegionsInfo = new RegionInfo[regions.size()]; for (int i = 0; i < clonedRegionsInfo.length; ++i) { // clone the region info from the snapshot region info RegionInfo snapshotRegionInfo = regions.get(i); clonedRegionsInfo[i] = cloneRegionInfo(snapshotRegionInfo); // add the region name mapping between snapshot and cloned String snapshotRegionName = snapshotRegionInfo.getEncodedName(); String clonedRegionName = clonedRegionsInfo[i].getEncodedName(); regionsMap.put(Bytes.toBytes(snapshotRegionName), Bytes.toBytes(clonedRegionName)); LOG.info("clone region=" + snapshotRegionName + " as " + clonedRegionName + " in snapshot " + snapshotName); // Add mapping between cloned region name and snapshot region info snapshotRegions.put(clonedRegionName, snapshotRegionInfo); } // create the regions on disk ModifyRegionUtils.createRegions(exec, conf, rootDir, tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() { @Override public void fillRegion(final HRegion region) throws IOException { RegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName()); cloneRegion(region, snapshotHri, regionManifests.get(snapshotHri.getEncodedName())); } }); return clonedRegionsInfo; }
3.68
querydsl_Expressions_timePath
/** * Create a new Path expression * * @param type type of expression * @param metadata path metadata * @param <T> type of expression * @return path expression */ public static <T extends Comparable<?>> TimePath<T> timePath(Class<? extends T> type, PathMetadata metadata) { return new TimePath<T>(type, metadata); }
3.68
hbase_HBCKServerCrashProcedure_isMatchingRegionLocation
/** * The RegionStateNode will not have a location if a confirm of an OPEN fails. On fail, the * RegionStateNode regionLocation is set to null. This is 'looser' than the test done in the * superclass. The HBCKSCP has been scheduled by an operator via hbck2 probably at the behest of a * report of an 'Unknown Server' in the 'HBCK Report'. Let the operators operation succeed even in * case where the region location in the RegionStateNode is null. */ @Override protected boolean isMatchingRegionLocation(RegionStateNode rsn) { return super.isMatchingRegionLocation(rsn) || rsn.getRegionLocation() == null; }
3.68
flink_TimeUtils_getStringInMillis
/** * @param duration to convert to string * @return duration string in millis */ public static String getStringInMillis(final Duration duration) { return duration.toMillis() + TimeUnit.MILLISECONDS.labels.get(0); }
3.68
hbase_BooleanStateStore_get
/** * Returns true if the flag is on, otherwise false */ public boolean get() { return on; }
3.68
hudi_MarkerHandler_getCreateAndMergeMarkers
/** * @param markerDir marker directory path * @return all marker paths of write IO type "CREATE" and "MERGE" */ public Set<String> getCreateAndMergeMarkers(String markerDir) { return getAllMarkers(markerDir).stream() .filter(markerName -> !markerName.endsWith(IOType.APPEND.name())) .collect(Collectors.toSet()); }
3.68
hadoop_RouterDelegationTokenSecretManager_updateStoredToken
/** * The Router Supports Update Token. * * @param identifier RMDelegationToken. * @param tokenInfo DelegationTokenInformation. */ public void updateStoredToken(RMDelegationTokenIdentifier identifier, DelegationTokenInformation tokenInfo) { try { long renewDate = tokenInfo.getRenewDate(); String token = RouterDelegationTokenSupport.encodeDelegationTokenInformation(tokenInfo); federationFacade.updateStoredToken(identifier, renewDate, token); } catch (Exception e) { if (!shouldIgnoreException(e)) { LOG.error("Error in updating persisted RMDelegationToken with sequence number: {}.", identifier.getSequenceNumber()); ExitUtil.terminate(1, e); } } }
3.68
hadoop_SnappyDecompressor_finished
/** * Returns true if the end of the decompressed * data output stream has been reached. * * @return <code>true</code> if the end of the decompressed * data output stream has been reached. */ @Override public boolean finished() { return (finished && uncompressedDirectBuf.remaining() == 0); }
3.68
flink_CheckpointConfig_isFailOnCheckpointingErrors
/** * This determines the behaviour when meeting checkpoint errors. If this returns true, which is * equivalent to get tolerableCheckpointFailureNumber as zero, job manager would fail the whole * job once it received a decline checkpoint message. If this returns false, which is equivalent * to get tolerableCheckpointFailureNumber as the maximum of integer (means unlimited), job * manager would not fail the whole job no matter how many declined checkpoints it received. * * @deprecated Use {@link #getTolerableCheckpointFailureNumber()}. */ @Deprecated public boolean isFailOnCheckpointingErrors() { return getTolerableCheckpointFailureNumber() == 0; }
3.68
morf_ConnectionResourcesBean_getUserName
/** * @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#getUserName() */ @Override public String getUserName() { return userName; }
3.68
morf_SchemaChangeSequence_getUpgradeTableResolution
/** * @return {@link UpgradeTableResolution} for this upgrade */ public UpgradeTableResolution getUpgradeTableResolution() { return upgradeTableResolution; }
3.68
hbase_ConnectionCache_shutdown
/** * Called when cache is no longer needed so that it can perform cleanup operations */ public void shutdown() { if (choreService != null) choreService.shutdown(); }
3.68
querydsl_AbstractDomainExporter_execute
/** * Export the contents * * @throws IOException */ public void execute() throws IOException { // collect types try { collectTypes(); } catch (Exception e) { throw new QueryException(e); } // go through supertypes Set<Supertype> additions = new HashSet<>(); for (Map.Entry<Class<?>, EntityType> entry : allTypes.entrySet()) { EntityType entityType = entry.getValue(); if (entityType.getSuperType() != null && !allTypes.containsKey(entityType.getSuperType().getType().getJavaClass())) { additions.add(entityType.getSuperType()); } } for (Supertype type : additions) { type.setEntityType(createEntityType(type.getType(), this.superTypes)); } // merge supertype fields into subtypes Set<EntityType> handled = new HashSet<EntityType>(); for (EntityType type : superTypes.values()) { addSupertypeFields(type, allTypes, handled); } for (EntityType type : entityTypes.values()) { addSupertypeFields(type, allTypes, handled); } for (EntityType type : embeddableTypes.values()) { addSupertypeFields(type, allTypes, handled); } // serialize them serialize(superTypes, supertypeSerializer); serialize(embeddableTypes, embeddableSerializer); serialize(entityTypes, entitySerializer); }
3.68
hadoop_WorkReport_getSuccess
/** * @return True if the work was processed successfully. */ public boolean getSuccess() { return success; }
3.68
hadoop_AzureBlobFileSystem_getOwnerUserPrimaryGroup
/** * Get the group name of the owner of the FS. * @return primary group name */ public String getOwnerUserPrimaryGroup() { return abfsStore.getPrimaryGroup(); }
3.68
hbase_RestoreSnapshotHelper_restoreStoreFile
/** * Create a new {@link HFileLink} to reference the store file. * <p> * The store file in the snapshot can be a simple hfile, an HFileLink or a reference. * <ul> * <li>hfile: abc -> table=region-abc * <li>reference: abc.1234 -> table=region-abc.1234 * <li>hfilelink: table=region-hfile -> table=region-hfile * </ul> * @param familyDir destination directory for the store file * @param regionInfo destination region info for the table * @param createBackRef - Whether back reference should be created. Defaults to true. * @param storeFile store file name (can be a Reference, HFileLink or simple HFile) */ private String restoreStoreFile(final Path familyDir, final RegionInfo regionInfo, final SnapshotRegionManifest.StoreFile storeFile, final boolean createBackRef) throws IOException { String hfileName = storeFile.getName(); if (HFileLink.isHFileLink(hfileName)) { return HFileLink.createFromHFileLink(conf, fs, familyDir, hfileName, createBackRef); } else if (StoreFileInfo.isReference(hfileName)) { return restoreReferenceFile(familyDir, regionInfo, storeFile); } else { return HFileLink.create(conf, fs, familyDir, regionInfo, hfileName, createBackRef); } }
3.68
hudi_TableHeader_getFieldNames
/** * Get all field names. */ public List<String> getFieldNames() { return fieldNames; }
3.68
dubbo_ProtobufTypeBuilder_generateSimpleFiledName
/** * get unCollection unMap property name from setting method.<br/> * ex:setXXX();<br/> * * @param methodName * @return */ private String generateSimpleFiledName(String methodName) { return toCamelCase(methodName.substring(3)); }
3.68
hbase_BlockCacheUtil_toJSON
/** Returns JSON string of <code>bc</code> content. */ public static String toJSON(BlockCache bc) throws IOException { return GSON.toJson(bc); }
3.68
hadoop_RouterWebServices_initializePipeline
/** * Initializes the request interceptor pipeline for the specified user. * * @param user specified user. */ private RequestInterceptorChainWrapper initializePipeline(String user) { synchronized (this.userPipelineMap) { if (this.userPipelineMap.containsKey(user)) { LOG.info("Request to start an already existing user: {}" + " was received, so ignoring.", user); return userPipelineMap.get(user); } RequestInterceptorChainWrapper chainWrapper = new RequestInterceptorChainWrapper(); try { // We should init the pipeline instance after it is created and then // add to the map, to ensure thread safe. LOG.info("Initializing request processing pipeline for user: {}.", user); RESTRequestInterceptor interceptorChain = this.createRequestInterceptorChain(); interceptorChain.init(user); RouterClientRMService routerClientRMService = router.getClientRMProxyService(); interceptorChain.setRouterClientRMService(routerClientRMService); chainWrapper.init(interceptorChain); } catch (Exception e) { LOG.error("Init RESTRequestInterceptor error for user: {}", user, e); throw e; } this.userPipelineMap.put(user, chainWrapper); return chainWrapper; } }
3.68
hmily_HmilyTacLocalParticipantExecutor_confirm
/** * Do confirm. * * @param participant hmily participant */ public static void confirm(final HmilyParticipant participant) { List<HmilyParticipantUndo> undoList = HmilyParticipantUndoCacheManager.getInstance().get(participant.getParticipantId()); for (HmilyParticipantUndo undo : undoList) { cleanUndo(undo); } cleanHmilyParticipant(participant); }
3.68
framework_GridMultiSelect_selectItems
/** * Adds the given items to the set of currently selected items. * <p> * By default this does not clear any previous selection. To do that, use * {@link #deselectAll()}. * <p> * If the all the items were already selected, this is a NO-OP. * <p> * This is a short-hand for {@link #updateSelection(Set, Set)} with nothing * to deselect. * * @param items * to add to selection, not {@code null} */ public void selectItems(T... items) { model.selectItems(items); }
3.68
hadoop_FileIoProvider_getShareDeleteFileInputStream
/** * Create a FileInputStream using * {@link NativeIO#getShareDeleteFileDescriptor}. * Wraps the created input stream to intercept input calls * before delegating to the wrapped stream. * * @param volume target volume. null if unavailable. * @param f File object. * @param offset the offset position, measured in bytes from the * beginning of the file, at which to set the file * pointer. * @return FileOutputStream to the given file object. * @throws FileNotFoundException */ public FileInputStream getShareDeleteFileInputStream( @Nullable FsVolumeSpi volume, File f, long offset) throws IOException { final long begin = profilingEventHook.beforeMetadataOp(volume, OPEN); FileInputStream fis = null; try { faultInjectorEventHook.beforeMetadataOp(volume, OPEN); fis = new WrappedFileInputStream(volume, NativeIO.getShareDeleteFileDescriptor(f, offset)); profilingEventHook.afterMetadataOp(volume, OPEN, begin); return fis; } catch(Exception e) { IOUtils.closeStream(fis); onFailure(volume, begin); throw e; } }
3.68
flink_GSFileSystemOptions_getWriterTemporaryBucketName
/** * The temporary bucket name to use for recoverable writes, if different from the final bucket * name. */ public Optional<String> getWriterTemporaryBucketName() { return flinkConfig.getOptional(WRITER_TEMPORARY_BUCKET_NAME); }
3.68
framework_VRadioButtonGroup_setHtmlContentAllowed
/** * Sets whether HTML is allowed in the item captions. If set to * {@code true}, the captions are displayed as HTML and the developer is * responsible for ensuring no harmful HTML is used. If set to * {@code false}, the content is displayed as plain text. * <p> * This value is delegated from the RadioButtonGroupState. * * @param htmlContentAllowed * {@code true} if the captions are used as HTML, {@code false} * if used as plain text */ public void setHtmlContentAllowed(boolean htmlContentAllowed) { this.htmlContentAllowed = htmlContentAllowed; }
3.68
hadoop_OBSDataBlocks_enterClosedState
/** * Enter the closed state. * * @return true if the class was in any other state, implying that the * subclass should do its close operations */ protected synchronized boolean enterClosedState() { if (!state.equals(DestState.Closed)) { enterState(null, DestState.Closed); return true; } else { return false; } }
3.68
hbase_MasterProcedureScheduler_getMetaQueue
// ============================================================================ // Meta Queue Lookup Helpers // ============================================================================ private MetaQueue getMetaQueue() { MetaQueue node = AvlTree.get(metaMap, TableName.META_TABLE_NAME, META_QUEUE_KEY_COMPARATOR); if (node != null) { return node; } node = new MetaQueue(locking.getMetaLock()); metaMap = AvlTree.insert(metaMap, node); return node; }
3.68
hudi_AvroSchemaCompatibility_recursionInProgress
/** * Returns a details object representing a state indicating that recursion is in * progress. * * @return a SchemaCompatibilityDetails object with RECURSION_IN_PROGRESS * SchemaCompatibilityType, and no other state. */ public static SchemaCompatibilityResult recursionInProgress() { return RECURSION_IN_PROGRESS; }
3.68
flink_AbstractIterativeTask_createWorksetUpdateOutputCollector
/** * Creates a new {@link WorksetUpdateOutputCollector}. * * <p>This collector is used by {@link IterationIntermediateTask} or {@link IterationTailTask} * to update the workset. * * <p>If a non-null delegate is given, the new {@link Collector} will write to the solution set * and also call collect(T) of the delegate. * * @param delegate null -OR- the delegate on which to call collect() by the newly created * collector * @return a new {@link WorksetUpdateOutputCollector} */ protected Collector<OT> createWorksetUpdateOutputCollector(Collector<OT> delegate) { DataOutputView outputView = worksetBackChannel.getWriteEnd(); TypeSerializer<OT> serializer = getOutputSerializer(); return new WorksetUpdateOutputCollector<OT>(outputView, serializer, delegate); }
3.68
hadoop_WordList_add
/** * Adds the specified word to the list if the word is not already added. */ public void add(String word) { if (!contains(word)) { int index = getSize(); list.put(word, index); isUpdated = true; } }
3.68
framework_LegacyWindow_executeJavaScript
/** * Executes JavaScript in this window. * * <p> * This method allows one to inject javascript from the server to client. A * client implementation is not required to implement this functionality, * but currently all web-based clients do implement this. * </p> * * <p> * Executing javascript this way often leads to cross-browser compatibility * issues and regressions that are hard to resolve. Use of this method * should be avoided and instead it is recommended to create new widgets * with GWT. For more info on creating own, reusable client-side widgets in * Java, read the corresponding chapter in Book of Vaadin. * </p> * * @param script * JavaScript snippet that will be executed. * * @deprecated As of 7.0, use JavaScript.getCurrent().execute(String) * instead */ @Deprecated public void executeJavaScript(String script) { getPage().getJavaScript().execute(script); }
3.68
morf_InlineTableUpgrader_writeStatement
/** * Write out SQL */ private void writeStatement(String statement) { writeStatements(Collections.singletonList(statement)); }
3.68
flink_FlinkBushyJoinReorderRule_reorderInnerJoin
/** * Reorder all the inner join type input factors in the multiJoin. * * <p>The result contains the selected join order of each layer and is stored in a HashMap. The * number of layers is equals to the number of inner join type input factors in the multiJoin. * E.g. for inner join case ((A IJ B) IJ C): * * <p>The stored HashMap of first layer in the result list is: [(Set(0), JoinPlan(Set(0), A)), * (Set(1), JoinPlan(Set(1), B)), (Set(2), JoinPlan(Set(2), C))]. * * <p>The stored HashMap of second layer is [(Set(0, 1), JoinPlan(Set(0, 1), (A J B))), (Set(0, * 2), JoinPlan(Set(0, 2), (A J C))), (Set(1, 2), JoinPlan(Set(1, 2), (B J C)))]. * * <p>The stored HashMap of third layer is [(Set(1, 0, 2), JoinPlan(Set(1, 0, 2), ((B J A) J * C)))]. */ private static List<Map<Set<Integer>, JoinPlan>> reorderInnerJoin( RelBuilder relBuilder, LoptMultiJoin multiJoin) { int numJoinFactors = multiJoin.getNumJoinFactors(); List<Map<Set<Integer>, JoinPlan>> foundPlans = new ArrayList<>(); // First, we put all join factors in MultiJoin into level 0. Map<Set<Integer>, JoinPlan> firstLevelJoinPlanMap = new LinkedHashMap<>(); for (int i = 0; i < numJoinFactors; i++) { if (!multiJoin.isNullGenerating(i)) { Set<Integer> set1 = new HashSet<>(); Set<Integer> set2 = new LinkedHashSet<>(); set1.add(i); set2.add(i); RelNode joinFactor = multiJoin.getJoinFactor(i); firstLevelJoinPlanMap.put(set1, new JoinPlan(set2, joinFactor)); } } foundPlans.add(firstLevelJoinPlanMap); // If multiJoin is full outer join, we will reorder it in method addOuterJoinToTop(). if (multiJoin.getMultiJoinRel().isFullOuterJoin()) { return foundPlans; } // Build plans for next levels until the found plans size equals the number of join factors, // or no possible plan exists for next level. while (foundPlans.size() < numJoinFactors) { Map<Set<Integer>, JoinPlan> nextLevelJoinPlanMap = foundNextLevel(relBuilder, new ArrayList<>(foundPlans), multiJoin); if (nextLevelJoinPlanMap.size() == 0) { break; } foundPlans.add(nextLevelJoinPlanMap); } return foundPlans; }
3.68
hadoop_RBFMetrics_getActiveNamenodeRegistrations
/** * Fetches the most active namenode memberships for all known nameservices. * The fetched membership may or may not be active. Excludes expired * memberships. * @throws IOException if the query could not be performed. * @return List of the most active NNs from each known nameservice. */ private List<MembershipState> getActiveNamenodeRegistrations() throws IOException { List<MembershipState> resultList = new ArrayList<>(); if (membershipStore == null) { return resultList; } GetNamespaceInfoRequest request = GetNamespaceInfoRequest.newInstance(); GetNamespaceInfoResponse response = membershipStore.getNamespaceInfo(request); for (FederationNamespaceInfo nsInfo : response.getNamespaceInfo()) { // Fetch the most recent namenode registration String nsId = nsInfo.getNameserviceId(); List<? extends FederationNamenodeContext> nns = namenodeResolver.getNamenodesForNameserviceId(nsId, false); if (nns != null) { FederationNamenodeContext nn = nns.get(0); if (nn instanceof MembershipState) { resultList.add((MembershipState) nn); } } } return resultList; }
3.68
rocketmq-connect_ProcessingContext_executingClass
/** * @param klass set the class which is currently executing. */ public void executingClass(Class<?> klass) { this.klass = klass; }
3.68
flink_PlanNode_getBroadcastInputs
/** Gets a list of all broadcast inputs attached to this node. */ public List<NamedChannel> getBroadcastInputs() { return this.broadcastInputs; }
3.68
hadoop_ErasureCodec_getCodecOptions
/** * Get a {@link ErasureCodecOptions}. * @return erasure codec options */ public ErasureCodecOptions getCodecOptions() { return codecOptions; }
3.68
hadoop_GetContentSummaryOperation_getDirSummary
/** * Return the {@link ContentSummary} of a given directory. * * @param dir dir to scan * @throws FileNotFoundException if the path does not resolve * @throws IOException IO failure * @return the content summary * @throws FileNotFoundException the path does not exist * @throws IOException failure */ public ContentSummary getDirSummary(Path dir) throws IOException { long totalLength = 0; long fileCount = 0; long dirCount = 1; RemoteIterator<S3ALocatedFileStatus> it = callbacks.listFilesIterator(dir, true); Set<Path> dirSet = new HashSet<>(); Set<Path> pathsTraversed = new HashSet<>(); while (it.hasNext()) { S3ALocatedFileStatus fileStatus = it.next(); Path filePath = fileStatus.getPath(); if (fileStatus.isDirectory() && !filePath.equals(dir)) { dirSet.add(filePath); buildDirectorySet(dirSet, pathsTraversed, dir, filePath.getParent()); } else if (!fileStatus.isDirectory()) { fileCount += 1; totalLength += fileStatus.getLen(); buildDirectorySet(dirSet, pathsTraversed, dir, filePath.getParent()); } } // Add the list's IOStatistics iostatistics.aggregate(retrieveIOStatistics(it)); return new ContentSummary.Builder().length(totalLength). fileCount(fileCount).directoryCount(dirCount + dirSet.size()). spaceConsumed(totalLength).build(); }
3.68
hadoop_RouterAuditLogger_logSuccess
/** * Create a readable and parseable audit log string for a successful event. * * @param user User who made the service request to the Router * @param operation Operation requested by the user. * @param target The target on which the operation is being performed. * @param appId Application Id in which operation was performed. * @param subClusterId Subcluster Id in which operation is performed. * * <br><br> * Note that the {@link RouterAuditLogger} uses tabs ('\t') as a key-val * delimiter and hence the value fields should not contains tabs ('\t'). */ public static void logSuccess(String user, String operation, String target, ApplicationId appId, SubClusterId subClusterId) { if (LOG.isInfoEnabled()) { LOG.info(createSuccessLog(user, operation, target, appId, subClusterId)); } }
3.68
flink_MurmurHashUtil_hashUnsafeBytes
/** * Hash unsafe bytes. * * @param base base unsafe object * @param offset offset for unsafe object * @param lengthInBytes length in bytes * @return hash code */ public static int hashUnsafeBytes(Object base, long offset, int lengthInBytes) { return hashUnsafeBytes(base, offset, lengthInBytes, DEFAULT_SEED); }
3.68
hbase_AbstractHBaseTool_parseLong
/** * Parse a number and enforce a range. */ public static long parseLong(String s, long minValue, long maxValue) { long l = Long.parseLong(s); if (l < minValue || l > maxValue) { throw new IllegalArgumentException( "The value " + l + " is out of range [" + minValue + ", " + maxValue + "]"); } return l; }
3.68
hadoop_Quota_andByStorageType
/** * Invoke predicate by each storage type and bitwise AND the results. * * @param predicate the function test the storage type. * @return true if bitwise AND by all storage type returns true, false otherwise. */ public static boolean andByStorageType(Predicate<StorageType> predicate) { boolean res = true; for (StorageType type : StorageType.values()) { res &= predicate.test(type); } return res; }
3.68
framework_HierarchyRenderer_setStyleNames
/** * Set the style name prefix for the node, expander and cell-content * elements. * * @param styleName * the style name to set */ public void setStyleNames(String styleName) { nodeStyleName = styleName + "-node"; expanderStyleName = styleName + "-expander"; cellContentStyleName = styleName + "-cell-content"; }
3.68
flink_RocksDBNativeMetricOptions_enableNumDeletesActiveMemTable
/** Returns total number of delete entries in the active memtable. */ public void enableNumDeletesActiveMemTable() { this.properties.add(RocksDBProperty.NumDeletesActiveMemTable.getRocksDBProperty()); }
3.68
framework_VaadinSession_getUIById
/** * Returns a UI with the given id. * <p> * This is meant for framework internal use. * </p> * * @param uiId * The UI id * @return The UI with the given id or null if not found */ public UI getUIById(int uiId) { assert hasLock(); return uIs.get(uiId); }
3.68
pulsar_ManagedLedgerConfig_setMinimumBacklogEntriesForCaching
/** * Set Minimum backlog after that broker will start caching backlog reads. * * @param minimumBacklogEntriesForCaching */ public void setMinimumBacklogEntriesForCaching(int minimumBacklogEntriesForCaching) { this.minimumBacklogEntriesForCaching = minimumBacklogEntriesForCaching; }
3.68
hbase_WALKeyImpl_getOriginatingClusterId
/** * @return the cluster id on which the change has originated. It there is no such cluster, it * returns DEFAULT_CLUSTER_ID (cases where replication is not enabled) */ @Override public UUID getOriginatingClusterId() { return clusterIds.isEmpty() ? HConstants.DEFAULT_CLUSTER_ID : clusterIds.get(0); }
3.68
hadoop_MapReduceJobPropertiesParser_fromString
// Maps the value of the specified key. private DataType<?> fromString(String key, String value) { DefaultDataType defaultValue = new DefaultDataType(value); if (value != null) { // check known configs // job-name String latestKey = getLatestKeyName(key); if (MRJobConfig.JOB_NAME.equals(latestKey)) { return new JobName(value); } // user-name if (MRJobConfig.USER_NAME.equals(latestKey)) { return new UserName(value); } // queue-name if (MRJobConfig.QUEUE_NAME.equals(latestKey)) { return new QueueName(value); } if (MRJobConfig.MAP_JAVA_OPTS.equals(latestKey) || MRJobConfig.REDUCE_JAVA_OPTS.equals(latestKey)) { List<String> heapOptions = new ArrayList<String>(); extractMaxHeapOpts(value, heapOptions, new ArrayList<String>()); extractMinHeapOpts(value, heapOptions, new ArrayList<String>()); return new DefaultDataType(StringUtils.join(heapOptions, ' ')); } //TODO compression? //TODO Other job configs like FileOutputFormat/FileInputFormat etc // check if the config parameter represents a number try { format.parse(value); return defaultValue; } catch (ParseException pe) {} // check if the config parameters represents a boolean // avoiding exceptions if ("true".equals(value) || "false".equals(value)) { return defaultValue; } // check if the config parameter represents a class if (latestKey.endsWith(".class") || latestKey.endsWith(".codec")) { return new ClassName(value); } // handle distributed cache sizes and timestamps if (latestKey.endsWith("sizes") || latestKey.endsWith(".timestamps")) { return defaultValue; } // check if the config parameter represents a file-system path //TODO: Make this concrete .location .path .dir .jar? if (latestKey.endsWith(".dir") || latestKey.endsWith(".location") || latestKey.endsWith(".jar") || latestKey.endsWith(".path") || latestKey.endsWith(".logfile") || latestKey.endsWith(".file") || latestKey.endsWith(".files") || latestKey.endsWith(".archives")) { try { return new FileName(value); } catch (Exception ioe) {} } } return null; }
3.68
flink_BashJavaUtils_getJmResourceParams
/** Generate and print JVM parameters of Flink Master resources as one line. */ @VisibleForTesting static List<String> getJmResourceParams(Configuration configuration) { JobManagerProcessSpec jobManagerProcessSpec = JobManagerProcessUtils.processSpecFromConfigWithNewOptionToInterpretLegacyHeap( configuration, JobManagerOptions.JVM_HEAP_MEMORY); logMasterConfiguration(jobManagerProcessSpec); return Arrays.asList( JobManagerProcessUtils.generateJvmParametersStr( jobManagerProcessSpec, configuration), JobManagerProcessUtils.generateDynamicConfigsStr(jobManagerProcessSpec)); }
3.68
morf_Function_some
/** * Helper method to create an instance of the "some" SQL function. * * @param fieldToEvaluate the field to evaluate in the some function. * @return an instance of the some function */ public static Function some(AliasedField fieldToEvaluate) { return new Function(FunctionType.SOME, fieldToEvaluate); }
3.68
hudi_HadoopConfigurations_getHiveConf
/** * Creates a Hive configuration with configured dir path or empty if no Hive conf dir is set. */ public static org.apache.hadoop.conf.Configuration getHiveConf(Configuration conf) { String explicitDir = conf.getString(FlinkOptions.HIVE_SYNC_CONF_DIR, System.getenv("HIVE_CONF_DIR")); org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration(); if (explicitDir != null) { hadoopConf.addResource(new Path(explicitDir, "hive-site.xml")); } return hadoopConf; }
3.68
flink_PekkoRpcActor_sendErrorIfSender
/** * Send throwable to sender if the sender is specified. * * @param throwable to send to the sender */ protected void sendErrorIfSender(Throwable throwable) { if (!getSender().equals(ActorRef.noSender())) { getSender().tell(new Status.Failure(throwable), getSelf()); } }
3.68
hbase_HttpServer_addUnprivilegedServlet
/** * Adds a servlet in the server that any user can access. This method differs from * {@link #addPrivilegedServlet(String, ServletHolder)} in that any authenticated user can * interact with the servlet added by this method. * @param pathSpec The path spec for the servlet * @param holder The servlet holder */ public void addUnprivilegedServlet(String pathSpec, ServletHolder holder) { addServletWithAuth(pathSpec, holder, false); }
3.68
streampipes_EpProperties_listStringEp
/** * Creates a new list-based event property of type string and with the assigned domain property. * * @param label A human-readable label of the property * @param runtimeName The field identifier of the event property at runtime. * @param domainProperty The semantics of the list property as a String. The string should correspond to a URI * provided by a vocabulary. Use one of the vocabularies provided in * {@link org.apache.streampipes.vocabulary} or create your own domain-specific vocabulary. * @return {@link org.apache.streampipes.model.schema.EventPropertyPrimitive} */ public static EventPropertyList listStringEp(Label label, String runtimeName, String domainProperty) { return listEp(label, runtimeName, Datatypes.String, domainProperty); }
3.68
dubbo_ObjectUtils_of
/** * Convert from variable arguments to array * * @param values variable arguments * @param <T> The class * @return array */ public static <T> T[] of(T... values) { return values; }
3.68
dubbo_DubboAbstractTDigest_weightedAverageSorted
/** * Compute the weighted average between <code>x1</code> with a weight of * <code>w1</code> and <code>x2</code> with a weight of <code>w2</code>. * This expects <code>x1</code> to be less than or equal to <code>x2</code> * and is guaranteed to return a number in <code>[x1, x2]</code>. An * explicit check is required since this isn't guaranteed with floating-point * numbers. */ private static double weightedAverageSorted(double x1, double w1, double x2, double w2) { assert x1 <= x2; final double x = (x1 * w1 + x2 * w2) / (w1 + w2); return Math.max(x1, Math.min(x, x2)); }
3.68
hbase_StochasticLoadBalancer_getRandomGenerator
/** * Select the candidate generator to use based on the cost of cost functions. The chance of * selecting a candidate generator is propotional to the share of cost of all cost functions among * all cost functions that benefit from it. */ protected CandidateGenerator getRandomGenerator() { double sum = 0; for (int i = 0; i < weightsOfGenerators.length; i++) { sum += weightsOfGenerators[i]; weightsOfGenerators[i] = sum; } if (sum == 0) { return candidateGenerators.get(0); } for (int i = 0; i < weightsOfGenerators.length; i++) { weightsOfGenerators[i] /= sum; } double rand = ThreadLocalRandom.current().nextDouble(); for (int i = 0; i < weightsOfGenerators.length; i++) { if (rand <= weightsOfGenerators[i]) { return candidateGenerators.get(i); } } return candidateGenerators.get(candidateGenerators.size() - 1); }
3.68
hadoop_NameCache_put
/** * Add a given name to the cache or track use count. * exist. If the name already exists, then the internal value is returned. * * @param name name to be looked up * @return internal value for the name if found; otherwise null */ K put(final K name) { K internal = cache.get(name); if (internal != null) { lookups++; return internal; } // Track the usage count only during initialization if (!initialized) { UseCount useCount = transientMap.get(name); if (useCount != null) { useCount.increment(); if (useCount.get() >= useThreshold) { promote(name); } return useCount.value; } useCount = new UseCount(name); transientMap.put(name, useCount); } return null; }
3.68
dubbo_ExpiringMap_startExpiryIfNotStarted
/** * start thread */ public void startExpiryIfNotStarted() { if (running && timeToLiveMillis <= 0) { return; } startExpiring(); }
3.68
framework_NotificationElement_getDescription
/** * Returns description of the Notification element. * * @return description of the Notification element */ public String getDescription() { WebElement popup = findElement(By.className("popupContent")); WebElement caption = popup.findElement(By.tagName("p")); return caption.getText(); }
3.68
hbase_TableName_getADummyTableName
/** * It is used to create table names for old META, and ROOT table. These tables are not really * legal tables. They are not added into the cache. * @return a dummy TableName instance (with no validation) for the passed qualifier */ private static TableName getADummyTableName(String qualifier) { return new TableName(qualifier); }
3.68
hadoop_Cluster_getStagingAreaDir
/** * Grab the jobtracker's view of the staging directory path where * job-specific files will be placed. * * @return the staging directory where job-specific files are to be placed. */ public Path getStagingAreaDir() throws IOException, InterruptedException { if (stagingAreaDir == null) { stagingAreaDir = new Path(client.getStagingAreaDir()); } return stagingAreaDir; }
3.68
hbase_FileArchiverNotifierImpl_persistSnapshotSizeChanges
/** * Reads the current size for each snapshot to update, generates a new update based on that value, * and then writes the new update. * @param snapshotSizeChanges A map of snapshot name to size change */ void persistSnapshotSizeChanges(Map<String, Long> snapshotSizeChanges) throws IOException { try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { // Create a list (with a more typical ordering implied) final List<Entry<String, Long>> snapshotSizeEntries = new ArrayList<>(snapshotSizeChanges.entrySet()); // Create the Gets for each snapshot we need to update final List<Get> snapshotSizeGets = snapshotSizeEntries.stream() .map((e) -> QuotaTableUtil.makeGetForSnapshotSize(tn, e.getKey())) .collect(Collectors.toList()); final Iterator<Entry<String, Long>> iterator = snapshotSizeEntries.iterator(); // A List to store each Put we'll create from the Get's we retrieve final List<Put> updates = new ArrayList<>(snapshotSizeEntries.size()); // TODO Push this down to the RegionServer with a coprocessor: // // We would really like to piggy-back on the row-lock already being grabbed // to handle the update of the row in the quota table. However, because the value // is a serialized protobuf, the standard Increment API doesn't work for us. With a CP, we // can just send the size deltas to the RS and atomically update the serialized PB object // while relying on the row-lock for synchronization. // // Synchronizing on the namespace string is a "minor smell" but passable as this is // only invoked via a single caller (the active Master). Using the namespace name lets us // have some parallelism without worry of on caller seeing stale data from the quota table. synchronized (getLockForNamespace(tn.getNamespaceAsString())) { final Result[] existingSnapshotSizes = quotaTable.get(snapshotSizeGets); long totalSizeChange = 0; // Read the current size values (if they exist) to generate the new value for (Result result : existingSnapshotSizes) { Entry<String, Long> entry = iterator.next(); String snapshot = entry.getKey(); Long size = entry.getValue(); // Track the total size change for the namespace this table belongs in totalSizeChange += size; // Get the size of the previous value (or zero) long previousSize = getSnapshotSizeFromResult(result); // Create an update. A file was archived from the table, so the table's size goes // down, but the snapshot's size goes up. updates.add(QuotaTableUtil.createPutForSnapshotSize(tn, snapshot, previousSize + size)); } // Create an update for the summation of all snapshots in the namespace if (totalSizeChange != 0) { long previousSize = getPreviousNamespaceSnapshotSize(quotaTable, tn.getNamespaceAsString()); updates.add(QuotaTableUtil.createPutForNamespaceSnapshotSize(tn.getNamespaceAsString(), previousSize + totalSizeChange)); } // Send all of the quota table updates in one batch. List<Object> failures = new ArrayList<>(); final Object[] results = new Object[updates.size()]; quotaTable.batch(updates, results); for (Object result : results) { // A null result is an error condition (all RPC attempts failed) if (!(result instanceof Result)) { failures.add(result); } } // Propagate a failure if any updates failed if (!failures.isEmpty()) { throw new QuotaSnapshotSizeSerializationException( "Failed to write some snapshot size updates: " + failures); } } } catch (InterruptedException e) { Thread.currentThread().interrupt(); return; } }
3.68
hbase_ServerCrashProcedure_zkCoordinatedSplitMetaLogs
/** * Split hbase:meta logs using 'classic' zk-based coordination. Superceded by procedure-based WAL * splitting. * @see #createSplittingWalProcedures(MasterProcedureEnv, boolean) */ private void zkCoordinatedSplitMetaLogs(MasterProcedureEnv env) throws IOException { LOG.debug("Splitting meta WALs {}", this); MasterWalManager mwm = env.getMasterServices().getMasterWalManager(); AssignmentManager am = env.getMasterServices().getAssignmentManager(); am.getRegionStates().metaLogSplitting(serverName); mwm.splitMetaLog(serverName); am.getRegionStates().metaLogSplit(serverName); LOG.debug("Done splitting meta WALs {}", this); }
3.68
hudi_HoodieCopyOnWriteTableInputFormat_doListStatus
/** * Abstracts and exposes {@link FileInputFormat#listStatus(JobConf)} operation to subclasses that * lists files (returning an array of {@link FileStatus}) corresponding to the input paths specified * as part of provided {@link JobConf} */ protected final FileStatus[] doListStatus(JobConf job) throws IOException { return super.listStatus(job); }
3.68
hudi_HoodieAvroUtils_recordToBytes
/** * TODO serialize other type of record. */ public static Option<byte[]> recordToBytes(HoodieRecord record, Schema schema) throws IOException { return Option.of(HoodieAvroUtils.indexedRecordToBytes(record.toIndexedRecord(schema, new Properties()).get().getData())); }
3.68
hbase_KeyValue_createEmptyByteArray
/** * Create an empty byte[] representing a KeyValue All lengths are preset and can be filled in * later. * @param rlength row length * @param flength family length * @param qlength qualifier length * @param timestamp version timestamp * @param type key type * @param vlength value length * @return The newly created byte array. */ private static byte[] createEmptyByteArray(final int rlength, int flength, int qlength, final long timestamp, final Type type, int vlength, int tagsLength) { if (rlength > Short.MAX_VALUE) { throw new IllegalArgumentException("Row > " + Short.MAX_VALUE); } if (flength > Byte.MAX_VALUE) { throw new IllegalArgumentException("Family > " + Byte.MAX_VALUE); } // Qualifier length if (qlength > Integer.MAX_VALUE - rlength - flength) { throw new IllegalArgumentException("Qualifier > " + Integer.MAX_VALUE); } RawCell.checkForTagsLength(tagsLength); // Key length long longkeylength = getKeyDataStructureSize(rlength, flength, qlength); if (longkeylength > Integer.MAX_VALUE) { throw new IllegalArgumentException("keylength " + longkeylength + " > " + Integer.MAX_VALUE); } int keylength = (int) longkeylength; // Value length if (vlength > HConstants.MAXIMUM_VALUE_LENGTH) { // FindBugs INT_VACUOUS_COMPARISON throw new IllegalArgumentException("Valuer > " + HConstants.MAXIMUM_VALUE_LENGTH); } // Allocate right-sized byte array. byte[] bytes = new byte[(int) getKeyValueDataStructureSize(rlength, flength, qlength, vlength, tagsLength)]; // Write the correct size markers int pos = 0; pos = Bytes.putInt(bytes, pos, keylength); pos = Bytes.putInt(bytes, pos, vlength); pos = Bytes.putShort(bytes, pos, (short) (rlength & 0x0000ffff)); pos += rlength; pos = Bytes.putByte(bytes, pos, (byte) (flength & 0x0000ff)); pos += flength + qlength; pos = Bytes.putLong(bytes, pos, timestamp); pos = Bytes.putByte(bytes, pos, type.getCode()); pos += vlength; if (tagsLength > 0) { pos = Bytes.putAsShort(bytes, pos, tagsLength); } return bytes; }
3.68
incubator-hugegraph-toolchain_DataTypeUtils_checkDataType
/** * Check type of the value valid */ private static boolean checkDataType(String key, Object value, DataType dataType) { if (value instanceof Number && dataType.isNumber()) { return parseNumber(key, value, dataType) != null; } return dataType.clazz().isInstance(value); }
3.68
hbase_ShadedAccessControlUtil_toPermission
/** * Convert a client Permission to a Permission shaded proto * @param perm the client Permission * @return the protobuf Permission */ public static AccessControlProtos.Permission toPermission(Permission perm) { AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); if (perm instanceof NamespacePermission) { NamespacePermission nsPerm = (NamespacePermission) perm; ret.setType(AccessControlProtos.Permission.Type.Namespace); AccessControlProtos.NamespacePermission.Builder builder = AccessControlProtos.NamespacePermission.newBuilder(); builder.setNamespaceName(org.apache.hbase.thirdparty.com.google.protobuf.ByteString .copyFromUtf8(nsPerm.getNamespace())); Permission.Action[] actions = perm.getActions(); if (actions != null) { for (Permission.Action a : actions) { builder.addAction(toPermissionAction(a)); } } ret.setNamespacePermission(builder); } else if (perm instanceof TablePermission) { TablePermission tablePerm = (TablePermission) perm; ret.setType(AccessControlProtos.Permission.Type.Table); AccessControlProtos.TablePermission.Builder builder = AccessControlProtos.TablePermission.newBuilder(); builder.setTableName(toProtoTableName(tablePerm.getTableName())); if (tablePerm.hasFamily()) { builder.setFamily(ByteString.copyFrom(tablePerm.getFamily())); } if (tablePerm.hasQualifier()) { builder.setQualifier(ByteString.copyFrom(tablePerm.getQualifier())); } Permission.Action[] actions = perm.getActions(); if (actions != null) { for (Permission.Action a : actions) { builder.addAction(toPermissionAction(a)); } } ret.setTablePermission(builder); } else { // perm.getAccessScope() == Permission.Scope.GLOBAL ret.setType(AccessControlProtos.Permission.Type.Global); AccessControlProtos.GlobalPermission.Builder builder = AccessControlProtos.GlobalPermission.newBuilder(); Permission.Action[] actions = perm.getActions(); if (actions != null) { for (Permission.Action a : actions) { builder.addAction(toPermissionAction(a)); } } ret.setGlobalPermission(builder); } return ret.build(); }
3.68
framework_ExpandingContainer_addItem
/** * @throws UnsupportedOperationException * always */ @Override public Item addItem() { throw new UnsupportedOperationException(); }
3.68
hadoop_StageConfig_getOperations
/** * Callbacks to update store. * This is not made visible to the stages; they must * go through the wrapper classes in this class, which * add statistics and logging. */ public ManifestStoreOperations getOperations() { return operations; }
3.68
flink_HiveParserQBParseInfo_isPartialScanAnalyzeCommand
/** @return the isPartialScanAnalyzeCommand */ public boolean isPartialScanAnalyzeCommand() { return isPartialScanAnalyzeCommand; }
3.68
dubbo_Server_stop
/** * close server */ public void stop() { logger.info("qos-server stopped."); if (boss != null) { boss.shutdownGracefully(); } if (worker != null) { worker.shutdownGracefully(); } started.set(false); }
3.68
framework_ServerRpcHandler_checkWidgetsetVersion
/** * Checks that the version reported by the client (widgetset) matches that * of the server. * * @param widgetsetVersion * the widget set version reported by the client or null */ private void checkWidgetsetVersion(String widgetsetVersion) { if (widgetsetVersion == null) { // Only check when the widgetset version is reported. It is reported // in the first UIDL request (not the initial request as it is a // plain GET /) return; } if (!Version.getFullVersion().equals(widgetsetVersion)) { getLogger().warning(String.format(Constants.WIDGETSET_MISMATCH_INFO, Version.getFullVersion(), widgetsetVersion)); } }
3.68
morf_UpgradeGraph_isVersionAnnotationValid
//Determine if the Version annotation value is valid private boolean isVersionAnnotationValid(Class<? extends UpgradeStep> stepClass) { return stepClass.getAnnotation(Version.class).value().matches("[0-9]+\\.[0-9]+(\\.[0-9]+[a-z]?)*$"); }
3.68
hbase_AuthUtil_getGroupName
/** * Returns the actual name for a group principal (stripped of the group prefix). */ @InterfaceAudience.Private public static String getGroupName(String aclKey) { if (!isGroupPrincipal(aclKey)) { return aclKey; } return aclKey.substring(GROUP_PREFIX.length()); }
3.68
pulsar_PulsarConfigurationLoader_convertFrom
/** * Converts a PulsarConfiguration object to a ServiceConfiguration object. * * @param conf * @param ignoreNonExistMember * @return * @throws IllegalArgumentException * if conf has the field whose name is not contained in ServiceConfiguration and ignoreNonExistMember * is false. * @throws RuntimeException */ public static ServiceConfiguration convertFrom(PulsarConfiguration conf, boolean ignoreNonExistMember) throws RuntimeException { try { final ServiceConfiguration convertedConf = ServiceConfiguration.class .getDeclaredConstructor().newInstance(); Field[] confFields = conf.getClass().getDeclaredFields(); Properties sourceProperties = conf.getProperties(); Properties targetProperties = convertedConf.getProperties(); Arrays.stream(confFields).forEach(confField -> { try { confField.setAccessible(true); Field convertedConfField = ServiceConfiguration.class.getDeclaredField(confField.getName()); if (!Modifier.isStatic(convertedConfField.getModifiers()) && convertedConfField.getDeclaredAnnotation(FieldContext.class) != null) { convertedConfField.setAccessible(true); convertedConfField.set(convertedConf, confField.get(conf)); } } catch (NoSuchFieldException e) { if (!ignoreNonExistMember) { throw new IllegalArgumentException( "Exception caused while converting configuration: " + e.getMessage()); } // add unknown fields to properties try { String propertyName = confField.getName(); if (!sourceProperties.containsKey(propertyName) && confField.get(conf) != null) { targetProperties.put(propertyName, confField.get(conf)); } } catch (Exception ignoreException) { // should not happen } } catch (IllegalAccessException e) { throw new RuntimeException("Exception caused while converting configuration: " + e.getMessage()); } }); // Put the rest of properties to new config targetProperties.putAll(sourceProperties); return convertedConf; } catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException e) { throw new RuntimeException("Exception caused while converting configuration: " + e.getMessage()); } }
3.68
dubbo_RouterChain_route
/** * @deprecated use {@link RouterChain#getSingleChain(URL, BitList, Invocation)} and {@link SingleRouterChain#route(URL, BitList, Invocation)} instead */ @Deprecated public List<Invoker<T>> route(URL url, BitList<Invoker<T>> availableInvokers, Invocation invocation) { return getSingleChain(url, availableInvokers, invocation).route(url, availableInvokers, invocation); }
3.68
framework_DropTargetExtension_addDropListener
/** * Attaches drop listener for the current drop target. * {@link DropListener#drop(DropEvent)} is called when drop event happens on * the client side. * * @param listener * Listener to handle drop event. * @return Handle to be used to remove this listener. */ public Registration addDropListener(DropListener<T> listener) { return addListener(DropEvent.class, listener, DropListener.DROP_METHOD); }
3.68
hadoop_MetricsSystem_register
/** * Register a metrics source (deriving name and description from the object) * @param <T> the actual type of the source object * @param source object to register * @return the source object * @exception MetricsException Metrics Exception. */ public <T> T register(T source) { return register(null, null, source); }
3.68
morf_DataSourceAdapter_setLogWriter
/** * @see javax.sql.CommonDataSource#setLogWriter(java.io.PrintWriter) */ @Override public void setLogWriter(PrintWriter out) throws SQLException { throw new UnsupportedOperationException("Log writer not supported"); }
3.68
dubbo_ModuleServiceRepository_registerConsumer
/** * @deprecated Replaced to {@link ModuleServiceRepository#registerConsumer(ConsumerModel)} */ @Deprecated public void registerConsumer( String serviceKey, ServiceDescriptor serviceDescriptor, ReferenceConfigBase<?> rc, Object proxy, ServiceMetadata serviceMetadata) { ClassLoader classLoader = null; if (rc != null) { classLoader = rc.getInterfaceClassLoader(); } ConsumerModel consumerModel = new ConsumerModel( serviceMetadata.getServiceKey(), proxy, serviceDescriptor, serviceMetadata, null, classLoader); this.registerConsumer(consumerModel); }
3.68
pulsar_ConsumerBase_isValidConsumerEpoch
// If message consumer epoch is smaller than consumer epoch present that // it has been sent to the client before the user calls redeliverUnacknowledgedMessages, this message is invalid. // so we should release this message and receive again protected boolean isValidConsumerEpoch(MessageImpl<T> message) { if ((getSubType() == CommandSubscribe.SubType.Failover || getSubType() == CommandSubscribe.SubType.Exclusive) && message.getConsumerEpoch() != DEFAULT_CONSUMER_EPOCH && message.getConsumerEpoch() < CONSUMER_EPOCH.get(this)) { log.info("Consumer filter old epoch message, topic : [{}], messageId : [{}], messageConsumerEpoch : [{}], " + "consumerEpoch : [{}]", topic, message.getMessageId(), message.getConsumerEpoch(), consumerEpoch); message.release(); message.recycle(); return false; } return true; }
3.68
hbase_VisibilityClient_setAuths
/** * Sets given labels globally authorized for the user. */ public static VisibilityLabelsResponse setAuths(Connection connection, final String[] auths, final String user) throws Throwable { return setOrClearAuths(connection, auths, user, true); }
3.68
flink_Schema_fromRowDataType
/** Adopts all fields of the given row as physical columns of the schema. */ public Builder fromRowDataType(DataType dataType) { Preconditions.checkNotNull(dataType, "Data type must not be null."); Preconditions.checkArgument( dataType.getLogicalType().is(LogicalTypeRoot.ROW), "Data type of ROW expected."); final List<DataType> fieldDataTypes = dataType.getChildren(); final List<String> fieldNames = ((RowType) dataType.getLogicalType()).getFieldNames(); IntStream.range(0, fieldDataTypes.size()) .forEach(i -> column(fieldNames.get(i), fieldDataTypes.get(i))); return this; }
3.68
open-banking-gateway_FacadeOptionalService_execute
/** * Execute the request by passing it to protocol if available, or default to no protocol result, if protocol * is missing. * @param request Request to execute * @return Result of request execution */ @Override public CompletableFuture<FacadeResult<RESULT>> execute(REQUEST request) { ProtocolWithCtx<ACTION, REQUEST> protocolWithCtx = createContextAndFindProtocol(request); if (null == protocolWithCtx) { throw new IllegalStateException("Unable to create context"); } CompletableFuture<Result<RESULT>> result; if (protocolWithCtx.getProtocol() != null) { result = execute(protocolWithCtx.getProtocol(), protocolWithCtx.getServiceContext()); } else { result = supplyNoProtocolResult(request, protocolWithCtx.getServiceContext()); } return handleProtocolResult(request, protocolWithCtx, result); }
3.68
framework_CalendarTargetDetails_setHasDropTime
/** * Does the dropped item have a time associated with it. * * @param hasDropTime */ public void setHasDropTime(boolean hasDropTime) { this.hasDropTime = hasDropTime; }
3.68
hibernate-validator_ReturnValueMethodOverrideCheck_methodIsAnnotatedWithValid
/** * Check if there is a {@code @Valid} annotation present on the method. * * @param method a method to check for annotation presence * @return {@code true} if {@code @Valid} annotation is present on return value of a given method, {@code false} * otherwise */ private boolean methodIsAnnotatedWithValid(ExecutableElement method) { for ( AnnotationMirror annotationMirror : method.getAnnotationMirrors() ) { if ( ConstraintHelper.AnnotationType.GRAPH_VALIDATION_ANNOTATION.equals( constraintHelper.getAnnotationType( annotationMirror ) ) ) { return true; } } return false; }
3.68
framework_QuerySortOrder_desc
/** * Creates a new query sort builder with given sorting using descending sort * direction. * * @param by * the string to sort by * * @return the query sort builder */ public static QuerySortOrderBuilder desc(String by) { return new QuerySortOrderBuilder().thenDesc(by); }
3.68
morf_SqlDialect_getSqlForPower
/** * Converts the power function into SQL. * * @param function the function to convert. * @return a string representation of the SQL. * @see org.alfasoftware.morf.sql.element.Function#power(AliasedField, * AliasedField) */ protected String getSqlForPower(Function function) { return String.format("POWER(%s, %s)", getSqlFrom(function.getArguments().get(0)), getSqlFrom(function.getArguments().get(1))); }
3.68
hbase_MasterCoprocessorHost_preMergeRegionsAction
/** * Invoked just before a merge * @param regionsToMerge the regions to merge * @param user the user */ public void preMergeRegionsAction(final RegionInfo[] regionsToMerge, final User user) throws IOException { execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { @Override public void call(MasterObserver observer) throws IOException { observer.preMergeRegionsAction(this, regionsToMerge); } }); }
3.68