name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_HiveStatsUtil_getCatalogPartitionColumnStats
/** Get column statistic for partition columns. */ public static Map<String, CatalogColumnStatisticsDataBase> getCatalogPartitionColumnStats( HiveMetastoreClientWrapper client, HiveShim hiveShim, Table hiveTable, String partitionName, List<FieldSchema> partitionColsSchema, String defaultPartitionName) { Map<String, CatalogColumnStatisticsDataBase> partitionColumnStats = new HashMap<>(); List<String> partitionCols = new ArrayList<>(partitionColsSchema.size()); List<LogicalType> partitionColsType = new ArrayList<>(partitionColsSchema.size()); for (FieldSchema fieldSchema : partitionColsSchema) { partitionCols.add(fieldSchema.getName()); partitionColsType.add( HiveTypeUtil.toFlinkType( TypeInfoUtils.getTypeInfoFromTypeString(fieldSchema.getType())) .getLogicalType()); } // the partition column and values for the partition column Map<String, Object> partitionColValues = new HashMap<>(); CatalogPartitionSpec partitionSpec = HivePartitionUtils.createPartitionSpec(partitionName, defaultPartitionName); for (int i = 0; i < partitionCols.size(); i++) { String partitionCol = partitionCols.get(i); String partitionStrVal = partitionSpec.getPartitionSpec().get(partitionCols.get(i)); if (partitionStrVal == null) { partitionColValues.put(partitionCol, null); } else { partitionColValues.put( partitionCol, HivePartitionUtils.restorePartitionValueFromType( hiveShim, partitionStrVal, partitionColsType.get(i), defaultPartitionName)); } } // calculate statistic for each partition column for (int i = 0; i < partitionCols.size(); i++) { Object partitionValue = partitionColValues.get(partitionCols.get(i)); LogicalType logicalType = partitionColsType.get(i); CatalogColumnStatisticsDataBase catalogColumnStatistics = getPartitionColumnStats( client, hiveTable, logicalType, partitionValue, i, defaultPartitionName); if (catalogColumnStatistics != null) { partitionColumnStats.put(partitionCols.get(i), catalogColumnStatistics); } } return partitionColumnStats; }
3.68
hbase_Scan_toMap
/** * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a * Map along with the fingerprinted information. Useful for debugging, logging, and administration * tools. * @param maxCols a limit on the number of columns output prior to truncation */ @Override public Map<String, Object> toMap(int maxCols) { // start with the fingerpring map and build on top of it Map<String, Object> map = getFingerprint(); // map from families to column list replaces fingerprint's list of families Map<String, List<String>> familyColumns = new HashMap<>(); map.put("families", familyColumns); // add scalar information first map.put("startRow", Bytes.toStringBinary(this.startRow)); map.put("stopRow", Bytes.toStringBinary(this.stopRow)); map.put("maxVersions", this.maxVersions); map.put("batch", this.batch); map.put("caching", this.caching); map.put("maxResultSize", this.maxResultSize); map.put("cacheBlocks", this.cacheBlocks); map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand); List<Long> timeRange = new ArrayList<>(2); timeRange.add(this.tr.getMin()); timeRange.add(this.tr.getMax()); map.put("timeRange", timeRange); int colCount = 0; // iterate through affected families and list out up to maxCols columns for (Map.Entry<byte[], NavigableSet<byte[]>> entry : this.familyMap.entrySet()) { List<String> columns = new ArrayList<>(); familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns); if (entry.getValue() == null) { colCount++; --maxCols; columns.add("ALL"); } else { colCount += entry.getValue().size(); if (maxCols <= 0) { continue; } for (byte[] column : entry.getValue()) { if (--maxCols <= 0) { continue; } columns.add(Bytes.toStringBinary(column)); } } } map.put("totalColumns", colCount); if (this.filter != null) { map.put("filter", this.filter.toString()); } // add the id if set if (getId() != null) { map.put("id", getId()); } return map; }
3.68
pulsar_LoadSimulationController_changeOrCreate
// Change an existing topic, or create it if it does not exist. private int changeOrCreate(final ShellArguments arguments, final String topic) throws Exception { final int client = find(topic); if (client == -1) { trade(arguments, topic, random.nextInt(clients.length)); } else { change(arguments, topic, client); } return client; }
3.68
hadoop_MutableStat_setUpdateTimeStamp
/** * Set whether to update the snapshot time or not. * @param updateTimeStamp enable update stats snapshot timestamp */ public synchronized void setUpdateTimeStamp(boolean updateTimeStamp) { this.updateTimeStamp = updateTimeStamp; }
3.68
flink_PathPattern_tokens
/** * Returns the pattern given at the constructor, without slashes at both ends, and split by * {@code '/'}. */ public String[] tokens() { return tokens; }
3.68
hadoop_TupleWritable_clearWritten
/** * Clear any record of which writables have been written to, without * releasing storage. */ void clearWritten() { written.clear(); }
3.68
hadoop_AHSClient_createAHSClient
/** * Create a new instance of AHSClient. */ @Public public static AHSClient createAHSClient() { return new AHSClientImpl(); }
3.68
hbase_WALObserver_preWALWrite
/** * Called before a {@link WALEdit} is writen to WAL. Do not amend the WALKey. It is * InterfaceAudience.Private. Changing the WALKey will cause damage. * @deprecated Since hbase-2.0.0. To be replaced with an alternative that does not expose * InterfaceAudience classes such as WALKey and WALEdit. Will be removed in * hbase-3.0.0. */ @Deprecated default void preWALWrite(ObserverContext<? extends WALCoprocessorEnvironment> ctx, RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { }
3.68
dubbo_AsyncRpcResult_get
/** * This method will always return after a maximum 'timeout' waiting: * 1. if value returns before timeout, return normally. * 2. if no value returns after timeout, throw TimeoutException. * * @return * @throws InterruptedException * @throws ExecutionException */ @Override public Result get() throws InterruptedException, ExecutionException { if (executor instanceof ThreadlessExecutor) { ThreadlessExecutor threadlessExecutor = (ThreadlessExecutor) executor; try { while (!responseFuture.isDone() && !threadlessExecutor.isShutdown()) { threadlessExecutor.waitAndDrain(Long.MAX_VALUE); } } finally { threadlessExecutor.shutdown(); } } return responseFuture.get(); }
3.68
hadoop_AllocateResponse_completedContainersStatuses
/** * Set the <code>completedContainersStatuses</code> of the response. * @see AllocateResponse#setCompletedContainersStatuses(List) * @param completedContainersStatuses * <code>completedContainersStatuses</code> of the response * @return {@link AllocateResponseBuilder} */ @Private @Unstable public AllocateResponseBuilder completedContainersStatuses( List<ContainerStatus> completedContainersStatuses) { allocateResponse .setCompletedContainersStatuses(completedContainersStatuses); return this; }
3.68
hadoop_AzureBlobFileSystem_removeAclEntries
/** * Removes ACL entries from files and directories. Other ACL entries are * retained. * * @param path Path to modify * @param aclSpec List of AclEntry describing entries to remove * @throws IOException if an ACL could not be modified */ @Override public void removeAclEntries(final Path path, final List<AclEntry> aclSpec) throws IOException { LOG.debug("AzureBlobFileSystem.removeAclEntries path: {}", path); TracingContext tracingContext = new TracingContext(clientCorrelationId, fileSystemId, FSOperationType.REMOVE_ACL_ENTRIES, true, tracingHeaderFormat, listener); if (!getIsNamespaceEnabled(tracingContext)) { throw new UnsupportedOperationException( "removeAclEntries is only supported by storage accounts with the " + "hierarchical namespace enabled."); } if (aclSpec == null || aclSpec.isEmpty()) { throw new IllegalArgumentException("The aclSpec argument is invalid."); } Path qualifiedPath = makeQualified(path); try { abfsStore.removeAclEntries(qualifiedPath, aclSpec, tracingContext); } catch (AzureBlobFileSystemException ex) { checkException(path, ex); } }
3.68
framework_IndexedContainer_removeSinglePropertyChangeListener
/** * Removes a previously registered single Property change listener. * * @param propertyId * the ID of the Property to remove. * @param itemId * the ID of the Item. * @param listener * the listener to be removed. */ private void removeSinglePropertyChangeListener(Object propertyId, Object itemId, Property.ValueChangeListener listener) { if (listener != null && singlePropertyValueChangeListeners != null) { final Map<Object, List<Property.ValueChangeListener>> propertySetToListenerListMap = singlePropertyValueChangeListeners .get(propertyId); if (propertySetToListenerListMap != null) { final List<Property.ValueChangeListener> listenerList = propertySetToListenerListMap .get(itemId); if (listenerList != null) { listenerList.remove(listener); if (listenerList.isEmpty()) { propertySetToListenerListMap.remove(itemId); } } if (propertySetToListenerListMap.isEmpty()) { singlePropertyValueChangeListeners.remove(propertyId); } } if (singlePropertyValueChangeListeners.isEmpty()) { singlePropertyValueChangeListeners = null; } } }
3.68
AreaShop_RentRegion_isRented
/** * Check if the region is rented. * @return true if the region is rented, otherwise false */ public boolean isRented() { return getRenter() != null; }
3.68
morf_SqlInternalUtils_transformOrderByToAscending
/** * Sets the fields in an ORDER BY to use ascending order if not specified. * * @param orderBys The order by criteria */ static Iterable<AliasedField> transformOrderByToAscending(Iterable<AliasedField> orderBys) { return FluentIterable.from(orderBys).transform(o -> transformFieldReference(o)).toList(); }
3.68
framework_VDragAndDropManager_updateDragImagePosition
/** * Updates drag image position. * * @param gwtEvent * the event whose coordinates should be used * @param dragImage * the image to position */ protected void updateDragImagePosition(NativeEvent gwtEvent, Element dragImage) { if (gwtEvent != null && dragImage != null) { Style style = dragImage.getStyle(); int clientY = WidgetUtil.getTouchOrMouseClientY(gwtEvent); int clientX = WidgetUtil.getTouchOrMouseClientX(gwtEvent); style.setTop(clientY, Unit.PX); style.setLeft(clientX, Unit.PX); } }
3.68
hadoop_NameCache_getLookupCount
/** * Lookup count when a lookup for a name returned cached object * @return number of successful lookups */ int getLookupCount() { return lookups; }
3.68
hbase_HealthReport_getStatus
/** * Gets the status of the region server. */ HealthCheckerExitStatus getStatus() { return status; }
3.68
hudi_KeyRangeNode_addFiles
/** * Adds a new file name list to existing list of file names. * * @param newFiles {@link List} of file names to be added */ void addFiles(List<String> newFiles) { this.fileNameList.addAll(newFiles); }
3.68
framework_VComboBox_getStyle
/** * Gets the style set for this suggestion item. Styles are typically set * by a server-side {@link com.vaadin.ui.ComboBox.ItemStyleProvider}. * The returned style is prefixed by <code>v-filterselect-item-</code>. * * @since 7.5.6 * @return the style name to use, or <code>null</code> to not apply any * custom style. */ public String getStyle() { return style; }
3.68
flink_PythonShellParser_parseYarn
/** * Parses Python shell yarn options and transfer to yarn options which will be used in `flink * run` to submit flink job. * * @param args Python shell yarn options. * @return Yarn options usrd in `flink run`. */ static List<String> parseYarn(String[] args) { String[] params = new String[args.length - 1]; System.arraycopy(args, 1, params, 0, params.length); CommandLine commandLine = parse(YARN_OPTIONS, params); if (commandLine.hasOption(OPTION_HELP.getOpt())) { printYarnHelp(); System.exit(0); } List<String> options = new ArrayList<>(); options.add(args[0]); options.add("-m"); options.add("yarn-cluster"); constructYarnOption(options, OPTION_JM_MEMORY, commandLine); constructYarnOption(options, OPTION_NAME, commandLine); constructYarnOption(options, OPTION_QUEUE, commandLine); constructYarnOption(options, OPTION_SLOTS, commandLine); constructYarnOption(options, OPTION_TM_MEMORY, commandLine); return options; }
3.68
framework_CalendarMonthDropHandler_dragAccepted
/* * (non-Javadoc) * * @see * com.vaadin.terminal.gwt.client.ui.dd.VAbstractDropHandler#dragAccepted * (com.vaadin.terminal.gwt.client.ui.dd.VDragEvent) */ @Override protected void dragAccepted(VDragEvent drag) { deEmphasis(); currentTargetElement = drag.getElementOver(); currentTargetDay = WidgetUtil.findWidget(currentTargetElement, SimpleDayCell.class); emphasis(); }
3.68
hadoop_SliderFileSystem_getComponentPublicResourceDir
/** * Returns the component public resource directory path. * * @param serviceVersion service version * @param compName component name * @return component public resource directory */ public Path getComponentPublicResourceDir(String serviceVersion, String compName) { return new Path(new Path(getBasePath(), getAppDir().getName() + "/" + "components"), serviceVersion + "/" + compName); }
3.68
flink_SourceOperatorFactory_instantiateSourceOperator
/** * This is a utility method to conjure up a "SplitT" generics variable binding so that we can * construct the SourceOperator without resorting to "all raw types". That way, this methods * puts all "type non-safety" in one place and allows to maintain as much generics safety in the * main code as possible. */ @SuppressWarnings("unchecked") private static <T, SplitT extends SourceSplit> SourceOperator<T, SplitT> instantiateSourceOperator( FunctionWithException<SourceReaderContext, SourceReader<T, ?>, Exception> readerFactory, OperatorEventGateway eventGateway, SimpleVersionedSerializer<?> splitSerializer, WatermarkStrategy<T> watermarkStrategy, ProcessingTimeService timeService, Configuration config, String localHostName, boolean emitProgressiveWatermarks, CanEmitBatchOfRecordsChecker canEmitBatchOfRecords) { // jumping through generics hoops: cast the generics away to then cast them back more // strictly typed final FunctionWithException<SourceReaderContext, SourceReader<T, SplitT>, Exception> typedReaderFactory = (FunctionWithException< SourceReaderContext, SourceReader<T, SplitT>, Exception>) (FunctionWithException<?, ?, ?>) readerFactory; final SimpleVersionedSerializer<SplitT> typedSplitSerializer = (SimpleVersionedSerializer<SplitT>) splitSerializer; return new SourceOperator<>( typedReaderFactory, eventGateway, typedSplitSerializer, watermarkStrategy, timeService, config, localHostName, emitProgressiveWatermarks, canEmitBatchOfRecords); }
3.68
flink_ExecEdge_translateToFusionCodegenSpec
/** * Translates this edge into operator fusion codegen spec generator. * * @param planner The {@link Planner} of the translated Table. */ public OpFusionCodegenSpecGenerator translateToFusionCodegenSpec(Planner planner) { return source.translateToFusionCodegenSpec(planner); }
3.68
hadoop_TypedBytesOutput_writeMapHeader
/** * Writes a map header. * * @param length the number of key-value pairs in the map * @throws IOException */ public void writeMapHeader(int length) throws IOException { out.write(Type.MAP.code); out.writeInt(length); }
3.68
hbase_MetricsConnection_getMetaCacheNumClearRegion
/** metaCacheNumClearRegion metric */ public Counter getMetaCacheNumClearRegion() { return metaCacheNumClearRegion; }
3.68
hbase_ZKReplicationStorageBase_toByteArray
/** * Serialized protobuf of <code>state</code> with pb magic prefix prepended suitable for use as * content of a peer-state znode under a peer cluster id as in * /hbase/replication/peers/PEER_ID/peer-state. */ protected static byte[] toByteArray(final ReplicationProtos.ReplicationState.State state) { ReplicationProtos.ReplicationState msg = ReplicationProtos.ReplicationState.newBuilder().setState(state).build(); // There is no toByteArray on this pb Message? // 32 bytes is default which seems fair enough here. try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) { CodedOutputStream cos = CodedOutputStream.newInstance(baos, 16); msg.writeTo(cos); cos.flush(); baos.flush(); return ProtobufUtil.prependPBMagic(baos.toByteArray()); } catch (IOException e) { throw new RuntimeException(e); } }
3.68
flink_Configuration_setDouble
/** * Adds the given value to the configuration object. The main key of the config option will be * used to map the value. * * @param key the option specifying the key to be added * @param value the value of the key/value pair to be added */ @PublicEvolving public void setDouble(ConfigOption<Double> key, double value) { setValueInternal(key.key(), value); }
3.68
hadoop_Base64_validateIsBase64String
/** * Determines whether the given string contains only Base64 characters. * * @param data * the string, as a <code>String</code> object, to validate * @return <code>true</code> if <code>data</code> is a valid Base64 string, otherwise <code>false</code> */ public static boolean validateIsBase64String(final String data) { if (data == null || data.length() % 4 != 0) { return false; } for (int m = 0; m < data.length(); m++) { final byte charByte = (byte) data.charAt(m); // pad char detected if (DECODE_64[charByte] == -2) { if (m < data.length() - 2) { return false; } else if (m == data.length() - 2 && DECODE_64[(byte) data.charAt(m + 1)] != -2) { return false; } } if (charByte < 0 || DECODE_64[charByte] == -1) { return false; } } return true; }
3.68
hbase_AsyncTable_fromRow
/** * Specify a start row * @param startKey start region selection with region containing this row, inclusive. */ default CoprocessorServiceBuilder<S, R> fromRow(byte[] startKey) { return fromRow(startKey, true); }
3.68
hadoop_ComponentContainers_setContainers
/** * Sets the containers. * @param containers containers of the component. */ public void setContainers(List<Container> containers) { this.containers = containers; }
3.68
pulsar_ConsumerConfiguration_getConsumerName
/** * @return the consumer name */ public String getConsumerName() { return conf.getConsumerName(); }
3.68
hbase_BulkLoadHFilesTool_bulkLoadPhase
/** * This takes the LQI's grouped by likely regions and attempts to bulk load them. Any failures are * re-queued for another pass with the groupOrSplitPhase. * <p/> * protected for testing. */ @InterfaceAudience.Private protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, Deque<LoadQueueItem> queue, Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFiles, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException { // atomically bulk load the groups. List<Future<Collection<LoadQueueItem>>> loadingFutures = new ArrayList<>(); for (Entry<ByteBuffer, ? extends Collection<LoadQueueItem>> entry : regionGroups.asMap() .entrySet()) { byte[] first = entry.getKey().array(); final Collection<LoadQueueItem> lqis = entry.getValue(); if (bulkLoadByFamily) { groupByFamilies(lqis).values().forEach(familyQueue -> loadingFutures .add(tryAtomicRegionLoad(conn, tableName, copyFiles, first, familyQueue))); } else { loadingFutures.add(tryAtomicRegionLoad(conn, tableName, copyFiles, first, lqis)); } if (item2RegionMap != null) { for (LoadQueueItem lqi : lqis) { item2RegionMap.put(lqi, entry.getKey()); } } } // get all the results. for (Future<Collection<LoadQueueItem>> future : loadingFutures) { try { Collection<LoadQueueItem> toRetry = future.get(); if (item2RegionMap != null) { for (LoadQueueItem lqi : toRetry) { item2RegionMap.remove(lqi); } } // LQIs that are requeued to be regrouped. queue.addAll(toRetry); } catch (ExecutionException e1) { Throwable t = e1.getCause(); if (t instanceof IOException) { // At this point something unrecoverable has happened. // TODO Implement bulk load recovery throw new IOException("BulkLoad encountered an unrecoverable problem", t); } LOG.error("Unexpected execution exception during bulk load", e1); throw new IllegalStateException(t); } catch (InterruptedException e1) { LOG.error("Unexpected interrupted exception during bulk load", e1); throw (InterruptedIOException) new InterruptedIOException().initCause(e1); } } }
3.68
hudi_DataPruner_getValAsJavaObj
/** * Returns the value as Java object at position {@code pos} of row {@code indexRow}. */ private static Object getValAsJavaObj(RowData indexRow, int pos, LogicalType colType) { switch (colType.getTypeRoot()) { // NOTE: Since we can't rely on Avro's "date", and "timestamp-micros" logical-types, we're // manually encoding corresponding values as int and long w/in the Column Stats Index and // here we have to decode those back into corresponding logical representation. case TIMESTAMP_WITHOUT_TIME_ZONE: TimestampType tsType = (TimestampType) colType; return indexRow.getTimestamp(pos, tsType.getPrecision()).getMillisecond(); case TIME_WITHOUT_TIME_ZONE: case DATE: case BIGINT: return indexRow.getLong(pos); // NOTE: All integral types of size less than Int are encoded as Ints in MT case BOOLEAN: return indexRow.getBoolean(pos); case TINYINT: case SMALLINT: case INTEGER: return indexRow.getInt(pos); case FLOAT: return indexRow.getFloat(pos); case DOUBLE: return indexRow.getDouble(pos); case BINARY: case VARBINARY: return indexRow.getBinary(pos); case CHAR: case VARCHAR: return indexRow.getString(pos).toString(); case DECIMAL: DecimalType decimalType = (DecimalType) colType; return indexRow.getDecimal(pos, decimalType.getPrecision(), decimalType.getScale()).toBigDecimal(); default: throw new UnsupportedOperationException("Unsupported type: " + colType); } }
3.68
flink_TwoInputTransformation_getInput1
/** Returns the first input {@code Transformation} of this {@code TwoInputTransformation}. */ public Transformation<IN1> getInput1() { return input1; }
3.68
hbase_AccessControlClient_isCellAuthorizationEnabled
/** * Return true if cell authorization is supported and enabled * @param connection The connection to use * @return true if cell authorization is supported and enabled, false otherwise */ public static boolean isCellAuthorizationEnabled(Connection connection) throws IOException { return connection.getAdmin().getSecurityCapabilities() .contains(SecurityCapability.CELL_AUTHORIZATION); }
3.68
framework_VLayoutSlot_getCaption
/** * Returns the caption element for this slot. * * @return the caption element, can be {@code null} */ public VCaption getCaption() { return caption; }
3.68
flink_SharedBufferAccessor_put
/** * Stores given value (value + timestamp) under the given state. It assigns a preceding element * relation to the previous entry. * * @param stateName name of the state that the event should be assigned to * @param eventId unique id of event assigned by this SharedBuffer * @param previousNodeId id of previous entry (might be null if start of new run) * @param version Version of the previous relation * @return assigned id of this element */ public NodeId put( final String stateName, final EventId eventId, @Nullable final NodeId previousNodeId, final DeweyNumber version) { if (previousNodeId != null) { lockNode(previousNodeId, version); } NodeId currentNodeId = new NodeId(eventId, getOriginalNameFromInternal(stateName)); Lockable<SharedBufferNode> currentNode = sharedBuffer.getEntry(currentNodeId); if (currentNode == null) { currentNode = new Lockable<>(new SharedBufferNode(), 0); lockEvent(eventId); } currentNode.getElement().addEdge(new SharedBufferEdge(previousNodeId, version)); sharedBuffer.upsertEntry(currentNodeId, currentNode); return currentNodeId; }
3.68
framework_AbstractSplitPanel_setSecondComponent
/** * Sets the second component of this split panel. Depending on the direction * the second component is shown at the bottom or to the right. * * @param c * The component to use as second component */ public void setSecondComponent(Component c) { if (getSecondComponent() == c) { // Nothing to do return; } if (getSecondComponent() != null) { // detach old removeComponent(getSecondComponent()); } getState().secondChild = c; if (c != null) { super.addComponent(c); } }
3.68
flink_FileChannelMemoryMappedBoundedData_create
/** * Creates new FileChannelMemoryMappedBoundedData, creating a memory mapped file at the given * path. */ public static FileChannelMemoryMappedBoundedData create(Path memMappedFilePath) throws IOException { return createWithRegionSize(memMappedFilePath, Integer.MAX_VALUE); }
3.68
hadoop_OBSCommonUtils_pathToKey
/** * Turns a path (relative or otherwise) into an OBS key. * * @param owner the owner OBSFileSystem instance * @param path input path, may be relative to the working dir * @return a key excluding the leading "/", or, if it is the root path, "" */ static String pathToKey(final OBSFileSystem owner, final Path path) { Path absolutePath = path; if (!path.isAbsolute()) { absolutePath = new Path(owner.getWorkingDirectory(), path); } if (absolutePath.toUri().getScheme() != null && absolutePath.toUri() .getPath() .isEmpty()) { return ""; } return absolutePath.toUri().getPath().substring(1); }
3.68
dubbo_StringUtils_parseParameters
/** * Decode parameters string to map * * @param rawParameters format like '[{a:b},{c:d}]' * @return */ public static Map<String, String> parseParameters(String rawParameters) { if (StringUtils.isBlank(rawParameters)) { return Collections.emptyMap(); } Matcher matcher = PARAMETERS_PATTERN.matcher(rawParameters); if (!matcher.matches()) { return Collections.emptyMap(); } String pairs = matcher.group(1); String[] pairArr = pairs.split("\\s*,\\s*"); Map<String, String> parameters = new HashMap<>(); for (String pair : pairArr) { Matcher pairMatcher = PAIR_PARAMETERS_PATTERN.matcher(pair); if (pairMatcher.matches()) { parameters.put(pairMatcher.group(1), pairMatcher.group(2)); } } return parameters; }
3.68
rocketmq-connect_AbstractKafkaSinkConnector_taskConfigs
/** * Returns a set of configurations for Tasks based on the current configuration, * producing at most count configurations. * * @param maxTasks maximum number of configurations to generate * @return configurations for Tasks */ @Override public List<KeyValue> taskConfigs(int maxTasks) { List<Map<String, String>> groupConnectors = sinkConnector.taskConfigs(maxTasks); List<KeyValue> configs = new ArrayList<>(); for (Map<String, String> configMaps : groupConnectors) { KeyValue keyValue = new DefaultKeyValue(); configMaps.forEach((k, v) -> { keyValue.put(k, v); }); configs.add(keyValue); } return configs; }
3.68
hadoop_Trash_getCurrentTrashDir
/** * get the current working directory. * * @throws IOException on raised on errors performing I/O. * @return Trash Dir. */ Path getCurrentTrashDir() throws IOException { return trashPolicy.getCurrentTrashDir(); }
3.68
rocketmq-connect_JdbcSinkConnector_taskConfigs
/** * Returns a set of configurations for Tasks based on the current configuration, * producing at most count configurations. * * @param maxTasks maximum number of configurations to generate * @return configurations for Tasks */ @Override public List<KeyValue> taskConfigs(int maxTasks) { log.info("Starting task config !!! "); List<KeyValue> configs = new ArrayList<>(); for (int i = 0; i < maxTasks; i++) { configs.add(this.connectConfig); } return configs; }
3.68
hudi_HoodieMetaSyncOperations_createTable
/** * Create the table. * * @param tableName The table name. * @param storageSchema The table schema. * @param inputFormatClass The input format class of this table. * @param outputFormatClass The output format class of this table. * @param serdeClass The serde class of this table. * @param serdeProperties The serde properties of this table. * @param tableProperties The table properties for this table. */ default void createTable(String tableName, MessageType storageSchema, String inputFormatClass, String outputFormatClass, String serdeClass, Map<String, String> serdeProperties, Map<String, String> tableProperties) { }
3.68
framework_GridDropTargetConnector_getDropLocation
/** * Returns the location of the event within the row. * * @param target * drop target element * @param event * drop event * @return the drop location to use */ protected DropLocation getDropLocation(Element target, NativeEvent event) { if (!isDroppingOnRowsPossible()) { return DropLocation.EMPTY; } if (TableRowElement.is(target)) { if (getState().dropMode == DropMode.BETWEEN) { if (getRelativeY(target, event) < (target.getOffsetHeight() / 2)) { return DropLocation.ABOVE; } else { return DropLocation.BELOW; } } else if (getState().dropMode == DropMode.ON_TOP_OR_BETWEEN) { if (getRelativeY(target, event) < getState().dropThreshold) { return DropLocation.ABOVE; } else if (target.getOffsetHeight() - getRelativeY(target, event) < getState().dropThreshold) { return DropLocation.BELOW; } else { return DropLocation.ON_TOP; } } else { return DropLocation.ON_TOP; } } return DropLocation.EMPTY; }
3.68
hudi_ManifestFileWriter_writeManifestFile
/** * Write all the latest base file names to the manifest file. */ public synchronized void writeManifestFile(boolean useAbsolutePath) { try { List<String> baseFiles = fetchLatestBaseFilesForAllPartitions(metaClient, useFileListingFromMetadata, useAbsolutePath) .collect(Collectors.toList()); if (baseFiles.isEmpty()) { LOG.warn("No base file to generate manifest file."); return; } else { LOG.info("Writing base file names to manifest file: " + baseFiles.size()); } final Path manifestFilePath = getManifestFilePath(useAbsolutePath); try (FSDataOutputStream outputStream = metaClient.getFs().create(manifestFilePath, true); BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(outputStream, StandardCharsets.UTF_8))) { for (String f : baseFiles) { writer.write(f); writer.write("\n"); } } } catch (Exception e) { throw new HoodieException("Error in writing manifest file.", e); } }
3.68
flink_AbstractStreamOperatorV2_isUsingCustomRawKeyedState
/** * Indicates whether or not implementations of this class is writing to the raw keyed state * streams on snapshots, using {@link #snapshotState(StateSnapshotContext)}. If yes, subclasses * should override this method to return {@code true}. * * <p>Subclasses need to explicitly indicate the use of raw keyed state because, internally, the * {@link AbstractStreamOperator} may attempt to read from it as well to restore heap-based * timers and ultimately fail with read errors. By setting this flag to {@code true}, this * allows the {@link AbstractStreamOperator} to know that the data written in the raw keyed * states were not written by the timer services, and skips the timer restore attempt. * * <p>Please refer to FLINK-19741 for further details. * * <p>TODO: this method can be removed once all timers are moved to be managed by state * backends. * * @return flag indicating whether or not this operator is writing to raw keyed state via {@link * #snapshotState(StateSnapshotContext)}. */ @Internal protected boolean isUsingCustomRawKeyedState() { return false; }
3.68
hadoop_Utils_upperBound
/** * Upper bound binary search. Find the index to the first element in the list * that compares greater than the input key. * * @param <T> * Type of the input key. * @param list * The list * @param key * The input key. * @return The index to the desired element if it exists; or list.size() * otherwise. */ public static <T> int upperBound(List<? extends Comparable<? super T>> list, T key) { int low = 0; int high = list.size(); while (low < high) { int mid = (low + high) >>> 1; Comparable<? super T> midVal = list.get(mid); int ret = midVal.compareTo(key); if (ret <= 0) low = mid + 1; else high = mid; } return low; }
3.68
hbase_BloomFilterFactory_isGeneralBloomEnabled
/** * Returns true if general Bloom (Row or RowCol) filters are enabled in the given configuration */ public static boolean isGeneralBloomEnabled(Configuration conf) { return conf.getBoolean(IO_STOREFILE_BLOOM_ENABLED, true); }
3.68
flink_SingleOutputStreamOperator_setMaxParallelism
/** * Sets the maximum parallelism of this operator. * * <p>The maximum parallelism specifies the upper bound for dynamic scaling. It also defines the * number of key groups used for partitioned state. * * @param maxParallelism Maximum parallelism * @return The operator with set maximum parallelism */ @PublicEvolving public SingleOutputStreamOperator<T> setMaxParallelism(int maxParallelism) { OperatorValidationUtils.validateMaxParallelism(maxParallelism, canBeParallel()); transformation.setMaxParallelism(maxParallelism); return this; }
3.68
dubbo_TripleClientStream_createTransportListener
/** * @return transport listener */ H2TransportListener createTransportListener() { return new ClientTransportListener(); }
3.68
dubbo_AbstractZookeeperTransporter_getZookeeperClientMap
/** * for unit test * * @return */ public Map<String, ZookeeperClient> getZookeeperClientMap() { return zookeeperClientMap; }
3.68
hmily_Coordinator_addCoordinators
/** * Add coordinators boolean. * * @param resource the remote * @return the boolean */ public synchronized boolean addCoordinators(final Resource resource) { if (coordinators.contains(resource)) { return true; } boolean add = this.coordinators.add(resource); if (add) { hmilyTimer.put(resource); } return add; }
3.68
hbase_HBaseTestingUtility_createTable
/** * Create a table. * @return A Table instance for the created table. */ public Table createTable(TableName tableName, byte[] family, byte[][] splitRows) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family); if (isNewVersionBehaviorEnabled()) { cfBuilder.setNewVersionBehavior(true); } builder.setColumnFamily(cfBuilder.build()); getAdmin().createTable(builder.build(), splitRows); // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are // assigned waitUntilAllRegionsAssigned(tableName); return getConnection().getTable(tableName); }
3.68
flink_TableFactoryService_filterBySupportedProperties
/** Filters the matching class factories by supported properties. */ private static <T extends TableFactory> List<T> filterBySupportedProperties( Class<T> factoryClass, Map<String, String> properties, List<T> classFactories, List<T> contextFactories) { final List<String> plainGivenKeys = new LinkedList<>(); properties .keySet() .forEach( k -> { // replace arrays with wildcard String key = k.replaceAll(".\\d+", ".#"); // ignore duplicates if (!plainGivenKeys.contains(key)) { plainGivenKeys.add(key); } }); List<T> supportedFactories = new LinkedList<>(); Tuple2<T, List<String>> bestMatched = null; for (T factory : contextFactories) { Set<String> requiredContextKeys = normalizeContext(factory).keySet(); Tuple2<List<String>, List<String>> tuple2 = normalizeSupportedProperties(factory); // ignore context keys List<String> givenContextFreeKeys = plainGivenKeys.stream() .filter(p -> !requiredContextKeys.contains(p)) .collect(Collectors.toList()); boolean allTrue = true; List<String> unsupportedKeys = new ArrayList<>(); for (String k : givenContextFreeKeys) { if (!(tuple2.f0.contains(k) || tuple2.f1.stream().anyMatch(k::startsWith))) { allTrue = false; unsupportedKeys.add(k); } } if (allTrue) { supportedFactories.add(factory); } else { if (bestMatched == null || unsupportedKeys.size() < bestMatched.f1.size()) { bestMatched = new Tuple2<>(factory, unsupportedKeys); } } } if (supportedFactories.isEmpty()) { String bestMatchedMessage = null; if (bestMatched != null) { bestMatchedMessage = String.format( "%s\nUnsupported property keys:\n%s", bestMatched.f0.getClass().getName(), String.join("\n", bestMatched.f1)); } //noinspection unchecked throw new NoMatchingTableFactoryException( "No factory supports all properties.", bestMatchedMessage, factoryClass, (List<TableFactory>) classFactories, properties); } return supportedFactories; }
3.68
hadoop_TFile_get
/** * Copy the key and value in one shot into BytesWritables. This is * equivalent to getKey(key); getValue(value); * * @param key * BytesWritable to hold key. * @param value * BytesWritable to hold value * @throws IOException raised on errors performing I/O. */ public void get(BytesWritable key, BytesWritable value) throws IOException { getKey(key); getValue(value); }
3.68
dubbo_MetadataService_getExportedURLs
/** * Get the {@link SortedSet sorted set} of String that presents the specified Dubbo exported {@link URL urls} by the * <code>serviceInterface</code>, <code>group</code> and <code>version</code> * * @param serviceInterface The class name of Dubbo service interface * @param group the Dubbo Service Group (optional) * @param version the Dubbo Service Version (optional) * @return the non-null read-only {@link SortedSet sorted set} of {@link URL#toFullString() strings} presenting the {@link URL URLs} * @see #toSortedStrings(Stream) * @see URL#toFullString() */ default SortedSet<String> getExportedURLs(String serviceInterface, String group, String version) { return getExportedURLs(serviceInterface, group, version, null); }
3.68
graphhopper_Distributions_exponentialDistribution
/** * @param beta =1/lambda with lambda being the standard exponential distribution rate parameter */ static double exponentialDistribution(double beta, double x) { return 1.0 / beta * exp(-x / beta); }
3.68
flink_CoProcessFunction_onTimer
/** * Called when a timer set using {@link TimerService} fires. * * @param timestamp The timestamp of the firing timer. * @param ctx An {@link OnTimerContext} that allows querying the timestamp of the firing timer, * querying the {@link TimeDomain} of the firing timer and getting a {@link TimerService} * for registering timers and querying the time. The context is only valid during the * invocation of this method, do not store it. * @param out The collector for returning result values. * @throws Exception This method may throw exceptions. Throwing an exception will cause the * operation to fail and may trigger recovery. */ public void onTimer(long timestamp, OnTimerContext ctx, Collector<OUT> out) throws Exception {}
3.68
hbase_ByteBufferUtils_putLong
/** * Put a long value out to the given ByteBuffer's current position in big-endian format. This also * advances the position in buffer by long size. * @param buffer the ByteBuffer to write to * @param val long to write out */ public static void putLong(ByteBuffer buffer, long val) { ConverterHolder.BEST_CONVERTER.putLong(buffer, val); }
3.68
flink_AbstractServerBase_shutdownServer
/** * Shuts down the server and all related thread pools. * * @return A {@link CompletableFuture} that will be completed upon termination of the shutdown * process. */ public CompletableFuture<Void> shutdownServer() { CompletableFuture<Void> shutdownFuture = new CompletableFuture<>(); if (serverShutdownFuture.compareAndSet(null, shutdownFuture)) { log.info("Shutting down {} @ {}", serverName, serverAddress); final CompletableFuture<Void> groupShutdownFuture = new CompletableFuture<>(); if (bootstrap != null) { EventLoopGroup group = bootstrap.config().group(); if (group != null && !group.isShutdown()) { group.shutdownGracefully(0L, 0L, TimeUnit.MILLISECONDS) .addListener( finished -> { if (finished.isSuccess()) { groupShutdownFuture.complete(null); } else { groupShutdownFuture.completeExceptionally( finished.cause()); } }); } else { groupShutdownFuture.complete(null); } } else { groupShutdownFuture.complete(null); } final CompletableFuture<Void> handlerShutdownFuture = new CompletableFuture<>(); if (handler == null) { handlerShutdownFuture.complete(null); } else { handler.shutdown() .whenComplete( (result, throwable) -> { if (throwable != null) { handlerShutdownFuture.completeExceptionally(throwable); } else { handlerShutdownFuture.complete(null); } }); } final CompletableFuture<Void> queryExecShutdownFuture = CompletableFuture.runAsync( () -> { if (queryExecutor != null) { ExecutorUtils.gracefulShutdown( 10L, TimeUnit.MINUTES, queryExecutor); } }); CompletableFuture.allOf( queryExecShutdownFuture, groupShutdownFuture, handlerShutdownFuture) .whenComplete( (result, throwable) -> { if (throwable != null) { shutdownFuture.completeExceptionally(throwable); } else { shutdownFuture.complete(null); } }); } return serverShutdownFuture.get(); }
3.68
morf_NamedParameterPreparedStatement_executeQuery
/** * @see PreparedStatement#executeQuery() * @return a <code>ResultSet</code> object that contains the data produced by the * query; never <code>null</code> * @exception SQLException if a database access error occurs; * this method is called on a closed <code>PreparedStatement</code> or the SQL * statement does not return a <code>ResultSet</code> object * @throws SQLTimeoutException when the driver has determined that the * timeout value that was specified by the {@code setQueryTimeout} * method has been exceeded and has at least attempted to cancel * the currently running {@code Statement} */ public ResultSet executeQuery() throws SQLException { this.statement.setFetchDirection(ResultSet.FETCH_FORWARD); return statement.executeQuery(); }
3.68
flink_KeyGroupRange_of
/** * Factory method that also handles creation of empty key-groups. * * @param startKeyGroup start of the range (inclusive) * @param endKeyGroup end of the range (inclusive) * @return the key-group from start to end or an empty key-group range. */ public static KeyGroupRange of(int startKeyGroup, int endKeyGroup) { return startKeyGroup <= endKeyGroup ? new KeyGroupRange(startKeyGroup, endKeyGroup) : EMPTY_KEY_GROUP_RANGE; }
3.68
AreaShop_GeneralRegion_getConfig
/** * Get the config file that is used to store the region information. * @return The config file that stores the region information */ public YamlConfiguration getConfig() { return config; }
3.68
dubbo_RpcServiceContext_getLocalHostName
/** * get local host name. * * @return local host name */ @Override public String getLocalHostName() { String host = localAddress == null ? null : localAddress.getHostName(); if (StringUtils.isEmpty(host)) { return getLocalHost(); } return host; }
3.68
hadoop_DiskBalancerDataNode_getVolumeCount
/** * Returns how many volumes are in the DataNode. * * @return int */ public int getVolumeCount() { return volumeCount; }
3.68
zxing_InvertedLuminanceSource_invert
/** * @return original delegate {@link LuminanceSource} since invert undoes itself */ @Override public LuminanceSource invert() { return delegate; }
3.68
hadoop_TaskInfo_getTaskVCores
/** * @return Vcores used by the task. */ public long getTaskVCores() { return maxVcores; }
3.68
hadoop_BufferPool_numCreated
// Number of ByteBuffers created so far. public synchronized int numCreated() { return pool.numCreated(); }
3.68
morf_SqlServerDialect_dropDefaultForColumn
/** * Returns SQL to drop the DEFAULT constraint for a particular column on a * particular table. * * @param table The name of the table on which the column resides. * @param column The name of the column. * @return SQL to drop the DEFAULT constraint for the specified column on the * specified table. */ private String dropDefaultForColumn(final Table table, final Column column) { // This SQL came from http://stackoverflow.com/questions/8641954/how-to-drop-column-with-constraint return dropDefaultForColumnSql .replace("{table}", table.getName()) .replace("{column}", column.getName()); }
3.68
hudi_HoodieTableConfig_update
/** * Upserts the table config with the set of properties passed in. We implement a fail-safe backup protocol * here for safely updating with recovery and also ensuring the table config continues to be readable. */ public static void update(FileSystem fs, Path metadataFolder, Properties updatedProps) { modify(fs, metadataFolder, updatedProps, ConfigUtils::upsertProperties); }
3.68
framework_ListDataProvider_getItems
/** * Returns the underlying data items. * * @return the underlying data items */ public Collection<T> getItems() { return backend; }
3.68
hbase_Branch1CoprocessorMethods_addMethods
/* * This list of methods was generated from HBase 1.4.4. */ private void addMethods() { /* BulkLoadObserver */ addMethod("prePrepareBulkLoad", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest"); addMethod("preCleanupBulkLoad", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest"); /* EndpointObserver */ addMethod("postEndpointInvocation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "com.google.protobuf.Service", "java.lang.String", "com.google.protobuf.Message", "com.google.protobuf.Message.Builder"); addMethod("preEndpointInvocation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "com.google.protobuf.Service", "java.lang.String", "com.google.protobuf.Message"); /* MasterObserver */ addMethod("preCreateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); addMethod("postCreateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); addMethod("preDeleteTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("postDeleteTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("preDeleteTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("preMove", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.ServerName", "org.apache.hadoop.hbase.ServerName"); addMethod("preCreateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); addMethod("postCreateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HTableDescriptor", "org.apache.hadoop.hbase.HRegionInfo[]"); addMethod("postMove", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.ServerName", "org.apache.hadoop.hbase.ServerName"); addMethod("postDeleteTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("preTruncateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("postTruncateTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("preTruncateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("postTruncateTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("preModifyTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); addMethod("postModifyTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); addMethod("preModifyTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); addMethod("postModifyTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HTableDescriptor"); addMethod("preAddColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); addMethod("postAddColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); addMethod("preAddColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); addMethod("postAddColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); addMethod("preModifyColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); addMethod("postModifyColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); addMethod("preModifyColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); addMethod("postModifyColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.HColumnDescriptor"); addMethod("preDeleteColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "byte[]"); addMethod("postDeleteColumn", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "byte[]"); addMethod("preDeleteColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "byte[]"); addMethod("postDeleteColumnHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "byte[]"); addMethod("preEnableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("postEnableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("preEnableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("postEnableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("preDisableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("postDisableTable", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("preDisableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("postDisableTableHandler", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("preAbortProcedure", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.procedure2.ProcedureExecutor", "long"); addMethod("postAbortProcedure", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("preListProcedures", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postListProcedures", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List"); addMethod("preAssign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo"); addMethod("postAssign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo"); addMethod("preUnassign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "boolean"); addMethod("postUnassign", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "boolean"); addMethod("preRegionOffline", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo"); addMethod("postRegionOffline", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo"); addMethod("preBalance", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postBalance", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List"); addMethod("preSetSplitOrMergeEnabled", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean", "org.apache.hadoop.hbase.client.Admin.MasterSwitchType"); addMethod("postSetSplitOrMergeEnabled", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean", "org.apache.hadoop.hbase.client.Admin.MasterSwitchType"); addMethod("preBalanceSwitch", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean"); addMethod("postBalanceSwitch", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean", "boolean"); addMethod("preShutdown", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("preStopMaster", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postStartMaster", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("preMasterInitialization", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("preSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", "org.apache.hadoop.hbase.HTableDescriptor"); addMethod("postSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", "org.apache.hadoop.hbase.HTableDescriptor"); addMethod("preListSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); addMethod("postListSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); addMethod("preCloneSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", "org.apache.hadoop.hbase.HTableDescriptor"); addMethod("postCloneSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", "org.apache.hadoop.hbase.HTableDescriptor"); addMethod("preRestoreSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", "org.apache.hadoop.hbase.HTableDescriptor"); addMethod("postRestoreSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription", "org.apache.hadoop.hbase.HTableDescriptor"); addMethod("preDeleteSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); addMethod("postDeleteSnapshot", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"); addMethod("preGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "java.util.List"); addMethod("preGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "java.util.List", "java.lang.String"); addMethod("postGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "java.util.List", "java.lang.String"); addMethod("postGetTableDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List"); addMethod("preGetTableNames", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "java.lang.String"); addMethod("postGetTableNames", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "java.lang.String"); addMethod("preCreateNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.NamespaceDescriptor"); addMethod("postCreateNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.NamespaceDescriptor"); addMethod("preDeleteNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String"); addMethod("postDeleteNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String"); addMethod("preModifyNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.NamespaceDescriptor"); addMethod("postModifyNamespace", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.NamespaceDescriptor"); addMethod("preGetNamespaceDescriptor", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String"); addMethod("postGetNamespaceDescriptor", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.NamespaceDescriptor"); addMethod("preListNamespaceDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List"); addMethod("postListNamespaceDescriptors", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List"); addMethod("preTableFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("postTableFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName"); addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); addMethod("preSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); addMethod("postSetUserQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); addMethod("preSetTableQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); addMethod("postSetTableQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.TableName", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); addMethod("preSetNamespaceQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); addMethod("postSetNamespaceQuota", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas"); addMethod("preDispatchMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.HRegionInfo"); addMethod("postDispatchMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.HRegionInfo"); addMethod("preGetClusterStatus", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postGetClusterStatus", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.ClusterStatus"); addMethod("preClearDeadServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postClearDeadServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "java.util.List"); addMethod("preMoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set", "java.lang.String"); addMethod("postMoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set", "java.lang.String"); addMethod("preMoveTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set", "java.lang.String"); addMethod("postMoveTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set", "java.lang.String"); addMethod("preMoveServersAndTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set", "java.util.Set", "java.lang.String"); addMethod("postMoveServersAndTables", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set", "java.util.Set", "java.lang.String"); addMethod("preAddRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String"); addMethod("postAddRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String"); addMethod("preRemoveRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String"); addMethod("postRemoveRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String"); addMethod("preRemoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set"); addMethod("postRemoveServers", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.Set"); addMethod("preBalanceRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String"); addMethod("postBalanceRSGroup", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.lang.String", "boolean"); /* RegionObserver */ addMethod("preOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postLogReplay", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("preFlushScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.KeyValueScanner", "org.apache.hadoop.hbase.regionserver.InternalScanner", "long"); addMethod("preFlushScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.KeyValueScanner", "org.apache.hadoop.hbase.regionserver.InternalScanner"); addMethod("preFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.InternalScanner"); addMethod("preFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.StoreFile"); addMethod("postFlush", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("preCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "java.util.List"); addMethod("preCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); addMethod("postCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "com.google.common.collect.ImmutableList"); addMethod("postCompactSelection", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "com.google.common.collect.ImmutableList", "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); addMethod("preCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.InternalScanner", "org.apache.hadoop.hbase.regionserver.ScanType"); addMethod("preCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.InternalScanner", "org.apache.hadoop.hbase.regionserver.ScanType", "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); addMethod("preClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean"); addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", "org.apache.hadoop.hbase.regionserver.ScanType", "long", "org.apache.hadoop.hbase.regionserver.InternalScanner"); addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", "org.apache.hadoop.hbase.regionserver.ScanType", "long", "org.apache.hadoop.hbase.regionserver.InternalScanner", "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest", "long"); addMethod("preCompactScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "java.util.List", "org.apache.hadoop.hbase.regionserver.ScanType", "long", "org.apache.hadoop.hbase.regionserver.InternalScanner", "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); addMethod("postCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.StoreFile"); addMethod("postCompact", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.regionserver.StoreFile", "org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest"); addMethod("preSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]"); addMethod("preSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); addMethod("preSplitBeforePONR", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "java.util.List"); addMethod("preSplitAfterPONR", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("preRollBackSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postRollBackSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postCompleteSplit", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "boolean"); addMethod("preGetClosestRowBefore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "org.apache.hadoop.hbase.client.Result"); addMethod("postGetClosestRowBefore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "org.apache.hadoop.hbase.client.Result"); addMethod("preGetOp", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Get", "java.util.List"); addMethod("postGetOp", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Get", "java.util.List"); addMethod("preExists", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Get", "boolean"); addMethod("postExists", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Get", "boolean"); addMethod("prePut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Put", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", "org.apache.hadoop.hbase.client.Durability"); addMethod("postPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Put", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", "org.apache.hadoop.hbase.client.Durability"); addMethod("preDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Delete", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", "org.apache.hadoop.hbase.client.Durability"); addMethod("prePrepareTimeStampForDeleteVersion", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Mutation", "org.apache.hadoop.hbase.Cell", "byte[]", "org.apache.hadoop.hbase.client.Get"); addMethod("postDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Delete", "org.apache.hadoop.hbase.regionserver.wal.WALEdit", "org.apache.hadoop.hbase.client.Durability"); addMethod("preBatchMutate", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress"); addMethod("postBatchMutate", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress"); addMethod("postStartRegionOperation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region.Operation"); addMethod("postCloseRegionOperation", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region.Operation"); addMethod("postBatchMutateIndispensably", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress", "boolean"); addMethod("preCheckAndPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", "boolean"); addMethod("preCheckAndPutAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", "boolean"); addMethod("postCheckAndPut", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Put", "boolean"); addMethod("preCheckAndDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", "boolean"); addMethod("preCheckAndDeleteAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", "boolean"); addMethod("postCheckAndDelete", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "org.apache.hadoop.hbase.filter.CompareFilter.CompareOp", "org.apache.hadoop.hbase.filter.ByteArrayComparable", "org.apache.hadoop.hbase.client.Delete", "boolean"); addMethod("preIncrementColumnValue", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "long", "boolean"); addMethod("postIncrementColumnValue", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "byte[]", "byte[]", "long", "boolean", "long"); addMethod("preAppend", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Append"); addMethod("preAppendAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Append"); addMethod("postAppend", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Append", "org.apache.hadoop.hbase.client.Result"); addMethod("preIncrement", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Increment"); addMethod("preIncrementAfterRowLock", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Increment"); addMethod("postIncrement", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Increment", "org.apache.hadoop.hbase.client.Result"); addMethod("preScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Scan", "org.apache.hadoop.hbase.regionserver.RegionScanner"); addMethod("preStoreScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Store", "org.apache.hadoop.hbase.client.Scan", "java.util.NavigableSet", "org.apache.hadoop.hbase.regionserver.KeyValueScanner"); addMethod("postScannerOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.client.Scan", "org.apache.hadoop.hbase.regionserver.RegionScanner"); addMethod("preScannerNext", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.InternalScanner", "java.util.List", "int", "boolean"); addMethod("postScannerNext", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.InternalScanner", "java.util.List", "int", "boolean"); addMethod("postScannerFilterRow", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.InternalScanner", "byte[]", "int", "short", "boolean"); addMethod("preScannerClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.InternalScanner"); addMethod("postScannerClose", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.InternalScanner"); addMethod("preWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); addMethod("preWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); addMethod("postWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); addMethod("postWALRestore", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); addMethod("preBulkLoadHFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List"); addMethod("preCommitStoreFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "java.util.List"); addMethod("postCommitStoreFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "byte[]", "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path"); addMethod("postBulkLoadHFile", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "boolean"); addMethod("preStoreFileReaderOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.fs.FileSystem", "org.apache.hadoop.fs.Path", "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", "long", "org.apache.hadoop.hbase.io.hfile.CacheConfig", "org.apache.hadoop.hbase.io.Reference", "org.apache.hadoop.hbase.regionserver.StoreFile.Reader"); addMethod("postStoreFileReaderOpen", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.fs.FileSystem", "org.apache.hadoop.fs.Path", "org.apache.hadoop.hbase.io.FSDataInputStreamWrapper", "long", "org.apache.hadoop.hbase.io.hfile.CacheConfig", "org.apache.hadoop.hbase.io.Reference", "org.apache.hadoop.hbase.regionserver.StoreFile.Reader"); addMethod("postMutationBeforeWAL", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType", "org.apache.hadoop.hbase.client.Mutation", "org.apache.hadoop.hbase.Cell", "org.apache.hadoop.hbase.Cell"); addMethod("postInstantiateDeleteTracker", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.DeleteTracker"); /* RegionServerObserver */ addMethod("preMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); addMethod("preStopRegionServer", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); addMethod("preMergeCommit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", "java.util.List"); addMethod("postMergeCommit", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); addMethod("preRollBackMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); addMethod("postRollBackMerge", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.regionserver.Region", "org.apache.hadoop.hbase.regionserver.Region"); addMethod("preRollWALWriterRequest", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postRollWALWriterRequest", "org.apache.hadoop.hbase.coprocessor.ObserverContext"); addMethod("postCreateReplicationEndPoint", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.replication.ReplicationEndpoint"); addMethod("preReplicateLogEntries", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "org.apache.hadoop.hbase.CellScanner"); addMethod("postReplicateLogEntries", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "java.util.List", "org.apache.hadoop.hbase.CellScanner"); /* WALObserver */ addMethod("preWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); addMethod("preWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); addMethod("postWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.regionserver.wal.HLogKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); addMethod("postWALWrite", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.hbase.HRegionInfo", "org.apache.hadoop.hbase.wal.WALKey", "org.apache.hadoop.hbase.regionserver.wal.WALEdit"); addMethod("preWALRoll", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path"); addMethod("postWALRoll", "org.apache.hadoop.hbase.coprocessor.ObserverContext", "org.apache.hadoop.fs.Path", "org.apache.hadoop.fs.Path"); }
3.68
morf_AbstractSelectStatement_addFields
/** * @param aliasedFields The fields to add * @deprecated Do not use {@link AbstractSelectStatement} mutably. Create a new statement. */ @Deprecated protected void addFields(Iterable<? extends AliasedFieldBuilder> aliasedFields) { AliasedField.assetImmutableDslDisabled(); fields.addAll(FluentIterable.from(aliasedFields) .transform(Builder.Helper.<AliasedField>buildAll()).toList()); }
3.68
hadoop_MoveStep_setSourceVolume
/** * Set source volume. * * @param sourceVolume - volume */ public void setSourceVolume(DiskBalancerVolume sourceVolume) { this.sourceVolume = sourceVolume; }
3.68
morf_WhenCondition_getCriterion
/** * @return the criterion */ public Criterion getCriterion() { return criterion; }
3.68
dubbo_PathAndInvokerMapper_addPathAndInvoker
/** * deploy path metadata * * @param metadataMap * @param invoker */ public void addPathAndInvoker(Map<PathMatcher, RestMethodMetadata> metadataMap, Invoker invoker) { metadataMap.entrySet().stream().forEach(entry -> { PathMatcher pathMatcher = entry.getKey(); if (pathMatcher.hasPathVariable()) { addPathMatcherToPathMap( pathMatcher, pathToServiceMapContainPathVariable, InvokerAndRestMethodMetadataPair.pair(invoker, entry.getValue())); } else { addPathMatcherToPathMap( pathMatcher, pathToServiceMapNoPathVariable, InvokerAndRestMethodMetadataPair.pair(invoker, entry.getValue())); } }); }
3.68
flink_WindowKey_replace
/** Replace the currently stored key and window by the given new key and new window. */ public WindowKey replace(long window, RowData key) { this.window = window; this.key = key; return this; }
3.68
dubbo_HttpHeaderUtil_addRequestAttachments
/** * add consumer attachment to request * * @param requestTemplate * @param attachmentMap */ public static void addRequestAttachments(RequestTemplate requestTemplate, Map<String, Object> attachmentMap) { Map<String, List<String>> attachments = createAttachments(attachmentMap); attachments.entrySet().forEach(attachment -> { requestTemplate.addHeaders(appendPrefixToAttachRealHeader(attachment.getKey()), attachment.getValue()); }); }
3.68
hadoop_AbstractDTService_getCanonicalUri
/** * Get the canonical URI of the filesystem, which is what is * used to identify the tokens. * @return the URI. */ public URI getCanonicalUri() { return canonicalUri; }
3.68
hadoop_FileBasedCopyListing_validatePaths
/** {@inheritDoc} */ @Override protected void validatePaths(DistCpContext context) throws IOException, InvalidInputException { }
3.68
pulsar_WorkerServiceLoader_getWorkerServiceDefinition
/** * Retrieve the functions worker service definition from the provided worker service nar package. * * @param narPath the path to the worker service NAR package * @return the worker service definition * @throws IOException when fail to load the worker service or get the definition */ public static WorkerServiceDefinition getWorkerServiceDefinition(String narPath, String narExtractionDirectory) throws IOException { try (NarClassLoader ncl = NarClassLoaderBuilder.builder() .narFile(new File(narPath)) .extractionDirectory(narExtractionDirectory) .build();) { return getWorkerServiceDefinition(ncl); } }
3.68
hbase_TableIntegrityErrorHandlerImpl_handleDegenerateRegion
/** * {@inheritDoc} */ @Override public void handleDegenerateRegion(HbckRegionInfo hi) throws IOException { }
3.68
hbase_SaslAuthMethod_getCode
/** * Returns the unique value to identify this authentication method among other HBase auth methods. */ public byte getCode() { return code; }
3.68
hbase_SimpleLoadBalancer_overallNeedsBalance
/** * A checker function to decide when we want balance overall and certain table has been balanced, * do we still need to re-distribute regions of this table to achieve the state of overall-balance * @return true if this table should be balanced. */ private boolean overallNeedsBalance() { int floor = (int) Math.floor(avgLoadOverall * (1 - overallSlop)); int ceiling = (int) Math.ceil(avgLoadOverall * (1 + overallSlop)); int max = 0, min = Integer.MAX_VALUE; for (ServerAndLoad server : serverLoadList) { max = Math.max(server.getLoad(), max); min = Math.min(server.getLoad(), min); } if (max <= ceiling && min >= floor) { if (LOG.isTraceEnabled()) { // If nothing to balance, then don't say anything unless trace-level logging. LOG.trace("Skipping load balancing because cluster is balanced at overall level"); } return false; } return true; }
3.68
hbase_HRegionFileSystem_createRegionOnFileSystem
/** * Create a new Region on file-system. * @param conf the {@link Configuration} to use * @param fs {@link FileSystem} from which to add the region * @param tableDir {@link Path} to where the table is being stored * @param regionInfo {@link RegionInfo} for region to be added * @throws IOException if the region creation fails due to a FileSystem exception. */ public static HRegionFileSystem createRegionOnFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir, final RegionInfo regionInfo) throws IOException { HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo); // We only create a .regioninfo and the region directory if this is the default region replica if (regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { Path regionDir = regionFs.getRegionDir(); if (fs.exists(regionDir)) { LOG.warn("Trying to create a region that already exists on disk: " + regionDir); } else { // Create the region directory if (!createDirOnFileSystem(fs, conf, regionDir)) { LOG.warn("Unable to create the region directory: " + regionDir); throw new IOException("Unable to create region directory: " + regionDir); } } // Write HRI to a file in case we need to recover hbase:meta regionFs.writeRegionInfoOnFilesystem(false); } else { if (LOG.isDebugEnabled()) LOG.debug("Skipping creation of .regioninfo file for " + regionInfo); } return regionFs; }
3.68
hadoop_DiskBalancerWorkItem_setSecondsElapsed
/** * Sets number of seconds elapsed. * * This is updated whenever we update the other counters. * @param secondsElapsed - seconds elapsed. */ public void setSecondsElapsed(long secondsElapsed) { this.secondsElapsed = secondsElapsed; }
3.68
hbase_MetricsREST_incrementFailedIncrementRequests
/** * @param inc How much to add to failedIncrementCount. */ public void incrementFailedIncrementRequests(final int inc) { source.incrementFailedIncrementRequests(inc); }
3.68
hbase_BucketAllocator_usedCount
/** * How many items are currently taking up space in this bucket size's buckets */ public long usedCount() { return usedCount; }
3.68
flink_DispatcherResourceManagerComponent_stopApplication
/** * Deregister the Flink application from the resource management system by signalling the {@link * ResourceManager} and also stop the process. * * @param applicationStatus to terminate the application with * @param diagnostics additional information about the shut down, can be {@code null} * @return Future which is completed once the shut down */ public CompletableFuture<Void> stopApplication( final ApplicationStatus applicationStatus, final @Nullable String diagnostics) { return internalShutdown( () -> resourceManagerService.deregisterApplication(applicationStatus, diagnostics)); }
3.68
framework_VScrollTable_isHierarchyColumn
/** * This method exists for the needs of {@link VTreeTable} only. * * @return <code>true</code> if this is hierarcyColumn's header cell, * <code>false</code> otherwise */ private boolean isHierarchyColumn() { int hierarchyColumnIndex = getHierarchyColumnIndex(); return hierarchyColumnIndex >= 0 && tHead.visibleCells.indexOf(this) == hierarchyColumnIndex; }
3.68
morf_UpgradeTableResolution_getModifiedTables
/** * @param upgradeStepName name of the class of the upgrade step to be checked * @return all tables modified by given upgrade step or null if this upgrade * step hasn't been processed */ public Set<String> getModifiedTables(String upgradeStepName) { return resolvedTablesMap.get(upgradeStepName) == null ? null : resolvedTablesMap.get(upgradeStepName).getModifiedTables(); }
3.68
flink_RateLimiterStrategy_noOp
/** Creates a convenience {@code RateLimiterStrategy} that is not limiting the records rate. */ static RateLimiterStrategy noOp() { return parallelism -> new NoOpRateLimiter(); }
3.68
hbase_RestoreSnapshotHelper_restoreMobRegion
/** * Restore mob region by removing files not in the snapshot and adding the missing ones from the * snapshot. */ private void restoreMobRegion(final RegionInfo regionInfo, final SnapshotRegionManifest regionManifest) throws IOException { if (regionManifest == null) { return; } restoreRegion(regionInfo, regionManifest, MobUtils.getMobRegionPath(conf, tableDesc.getTableName())); }
3.68
hbase_ZKConfig_getZKQuorumServersString
/** * Return the ZK Quorum servers string given the specified configuration. * @return Quorum servers */ public static String getZKQuorumServersString(Configuration conf) { setZooKeeperClientSystemProperties(HConstants.ZK_CFG_PROPERTY_PREFIX, conf); return getZKQuorumServersStringFromHbaseConfig(conf); }
3.68
hadoop_AbstractDelegationTokenBinding_getUserAgentField
/** * Return a string for use in building up the User-Agent field, so * get into the S3 access logs. Useful for diagnostics. * @return a string for the S3 logs or "" for "nothing to add" */ public String getUserAgentField() { return ""; }
3.68
morf_DatabaseDataSetConsumer_getDataSource
/** * @return the dataSource */ DataSource getDataSource() { return dataSource; }
3.68
dubbo_URLParam_addParameter
/** * Add parameters to a new URLParam. * * @param key key * @param value value * @return A new URLParam */ public URLParam addParameter(String key, String value) { if (StringUtils.isEmpty(key) || StringUtils.isEmpty(value)) { return this; } return addParameters(Collections.singletonMap(key, value)); }
3.68