name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
hadoop_AbfsCountersImpl_toMap
/** * {@inheritDoc} * * Map of all the counters for testing. * * @return a map of the IOStatistics counters. */ @VisibleForTesting @Override public Map<String, Long> toMap() { return ioStatisticsStore.counters(); }
3.68
hbase_RowResource_checkAndPut
/** * Validates the input request parameters, parses columns from CellSetModel, and invokes * checkAndPut on HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ Response checkAndPut(final CellSetModel model) { Table table = null; try { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); byte[] key = rowModel.getKey(); if (key == null) { key = rowspec.getRow(); } List<CellModel> cellModels = rowModel.getCells(); int cellModelCount = cellModels.size(); if (key == null || cellModelCount <= 1) { servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) .entity( "Bad request: Either row key is null or no data found for columns specified." + CRLF) .build(); } Put put = new Put(key); boolean retValue; CellModel valueToCheckCell = cellModels.get(cellModelCount - 1); byte[] valueToCheckColumn = valueToCheckCell.getColumn(); byte[][] valueToPutParts = CellUtil.parseColumn(valueToCheckColumn); if (valueToPutParts.length == 2 && valueToPutParts[1].length > 0) { CellModel valueToPutCell = null; // Copy all the cells to the Put request // and track if the check cell's latest value is also sent for (int i = 0, n = cellModelCount - 1; i < n; i++) { CellModel cell = cellModels.get(i); byte[] col = cell.getColumn(); if (col == null) { servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) .entity("Bad request: Column found to be null." + CRLF).build(); } byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) .entity("Bad request" + CRLF).build(); } put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) .setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(cell.getTimestamp()) .setType(Type.Put).setValue(cell.getValue()).build()); if (Bytes.equals(col, valueToCheckCell.getColumn())) { valueToPutCell = cell; } } if (valueToPutCell == null) { servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) .entity("Bad request: The column to put and check do not match." + CRLF).build(); } else { retValue = table.checkAndMutate(key, valueToPutParts[0]).qualifier(valueToPutParts[1]) .ifEquals(valueToCheckCell.getValue()).thenPut(put); } } else { servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) .entity("Bad request: Column incorrectly specified." + CRLF).build(); } if (LOG.isTraceEnabled()) { LOG.trace("CHECK-AND-PUT " + put.toString() + ", returns " + retValue); } if (!retValue) { servlet.getMetrics().incrementFailedPutRequests(1); return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) .entity("Value not Modified" + CRLF).build(); } ResponseBuilder response = Response.ok(); servlet.getMetrics().incrementSucessfulPutRequests(1); return response.build(); } catch (Exception e) { servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } finally { if (table != null) { try { table.close(); } catch (IOException ioe) { LOG.debug("Exception received while closing the table", ioe); } } } }
3.68
hbase_CellVisibility_quote
/** * Helps in quoting authentication Strings. Use this if unicode characters to be used in * expression or special characters like '(', ')', '"','\','&amp;','|','!' */ public static String quote(byte[] auth) { int escapeChars = 0; for (int i = 0; i < auth.length; i++) if (auth[i] == '"' || auth[i] == '\\') escapeChars++; byte[] escapedAuth = new byte[auth.length + escapeChars + 2]; int index = 1; for (int i = 0; i < auth.length; i++) { if (auth[i] == '"' || auth[i] == '\\') { escapedAuth[index++] = '\\'; } escapedAuth[index++] = auth[i]; } escapedAuth[0] = '"'; escapedAuth[escapedAuth.length - 1] = '"'; return Bytes.toString(escapedAuth); }
3.68
morf_FieldReference_field
/** * Constructs a new field with an alias on a given table. * * @param table the table on which the field exists * @param name the name of the field. * @return The field reference. */ public static Builder field(TableReference table, String name) { return new Builder(table, name); }
3.68
morf_HumanReadableStatementHelper_generateMergeStatementString
/** * Generates a human-readable description of a data merge operation. * * @param statement the data upgrade statement to describe. * @return a string containing the human-readable description of the operation. */ private static String generateMergeStatementString(final MergeStatement statement) { final SelectStatement source = statement.getSelectStatement(); if (source.getTable() == null) { // No select statement; single record merge final StringBuilder sb = new StringBuilder(); sb.append(String.format("Merge record into %s:", statement.getTable().getName())); for (AliasedField field : source.getFields()) { sb.append(generateAliasedFieldAssignmentString(field)); } return sb.toString(); } else { // Multiple record insert return String.format("Merge records into %s from %s%s", statement.getTable().getName(), source.getTable().getName(), generateWhereClause(source.getWhereCriterion())); } }
3.68
flink_BackgroundTask_abort
/** * Abort the execution of this background task. This method has only an effect if the background * task has not been started yet. */ void abort() { isAborted = true; }
3.68
hadoop_AbstractTracking_copy
/** * Subclass instances may call this method during cloning to copy the values of * all properties stored in this base class. * * @param dest AbstractTracking destination for copying properties */ protected void copy(AbstractTracking dest) { dest.beginTime = beginTime; dest.endTime = endTime; }
3.68
dubbo_DefaultApplicationDeployer_initialize
/** * Initialize */ @Override public void initialize() { if (initialized) { return; } // Ensure that the initialization is completed when concurrent calls synchronized (startLock) { if (initialized) { return; } onInitialize(); // register shutdown hook registerShutdownHook(); startConfigCenter(); loadApplicationConfigs(); initModuleDeployers(); initMetricsReporter(); initMetricsService(); // @since 2.7.8 startMetadataCenter(); initialized = true; if (logger.isInfoEnabled()) { logger.info(getIdentifier() + " has been initialized!"); } } }
3.68
flink_StreamTaskNetworkInput_getRecordDeserializers
// Initialize one deserializer per input channel private static Map< InputChannelInfo, SpillingAdaptiveSpanningRecordDeserializer< DeserializationDelegate<StreamElement>>> getRecordDeserializers( CheckpointedInputGate checkpointedInputGate, IOManager ioManager) { return checkpointedInputGate.getChannelInfos().stream() .collect( toMap( identity(), unused -> new SpillingAdaptiveSpanningRecordDeserializer<>( ioManager.getSpillingDirectoriesPaths()))); }
3.68
flink_ExecutionConfig_setGlobalJobParameters
/** * Register a custom, serializable user configuration object. * * @param globalJobParameters Custom user configuration object */ public void setGlobalJobParameters(GlobalJobParameters globalJobParameters) { Preconditions.checkNotNull(globalJobParameters, "globalJobParameters shouldn't be null"); setGlobalJobParameters(globalJobParameters.toMap()); }
3.68
rocketmq-connect_Worker_startTask
/** * start task * * @param newTasks * @throws Exception */ private void startTask(Map<String, List<ConnectKeyValue>> newTasks) throws Exception { for (String connectorName : newTasks.keySet()) { for (ConnectKeyValue keyValue : newTasks.get(connectorName)) { int taskId = keyValue.getInt(ConnectorConfig.TASK_ID); ConnectorTaskId id = new ConnectorTaskId(connectorName, taskId); ErrorMetricsGroup errorMetricsGroup = new ErrorMetricsGroup(id, this.connectMetrics); String taskType = keyValue.getString(ConnectorConfig.TASK_TYPE); if (TaskType.DIRECT.name().equalsIgnoreCase(taskType)) { createDirectTask(id, keyValue); continue; } ClassLoader savedLoader = plugin.currentThreadLoader(); try { String connType = keyValue.getString(ConnectorConfig.CONNECTOR_CLASS); ClassLoader connectorLoader = plugin.delegatingLoader().connectorLoader(connType); savedLoader = Plugin.compareAndSwapLoaders(connectorLoader); // new task final Class<? extends Task> taskClass = plugin.currentThreadLoader().loadClass(keyValue.getString(ConnectorConfig.TASK_CLASS)).asSubclass(Task.class); final Task task = plugin.newTask(taskClass); /** * create key/value converter */ RecordConverter valueConverter = plugin.newConverter(keyValue, false, ConnectorConfig.VALUE_CONVERTER, workerConfig.getValueConverter(), Plugin.ClassLoaderUsage.CURRENT_CLASSLOADER); RecordConverter keyConverter = plugin.newConverter(keyValue, true, ConnectorConfig.KEY_CONVERTER, workerConfig.getKeyConverter(), Plugin.ClassLoaderUsage.CURRENT_CLASSLOADER); if (keyConverter == null) { keyConverter = plugin.newConverter(keyValue, true, ConnectorConfig.KEY_CONVERTER, workerConfig.getValueConverter(), Plugin.ClassLoaderUsage.PLUGINS); log.info("Set up the key converter {} for task {} using the worker config", keyConverter.getClass(), id); } else { log.info("Set up the key converter {} for task {} using the connector config", keyConverter.getClass(), id); } if (valueConverter == null) { valueConverter = plugin.newConverter(keyValue, false, ConnectorConfig.VALUE_CONVERTER, workerConfig.getKeyConverter(), Plugin.ClassLoaderUsage.PLUGINS); log.info("Set up the value converter {} for task {} using the worker config", valueConverter.getClass(), id); } else { log.info("Set up the value converter {} for task {} using the connector config", valueConverter.getClass(), id); } if (task instanceof SourceTask) { DefaultMQProducer producer = ConnectUtil.initDefaultMQProducer(workerConfig); TransformChain<ConnectRecord> transformChain = new TransformChain<>(keyValue, plugin); // create retry operator RetryWithToleranceOperator retryWithToleranceOperator = ReporterManagerUtil.createRetryWithToleranceOperator(keyValue, errorMetricsGroup); retryWithToleranceOperator.reporters(ReporterManagerUtil.sourceTaskReporters(id, keyValue, errorMetricsGroup)); WorkerSourceTask workerSourceTask = new WorkerSourceTask(workerConfig, id, (SourceTask) task, savedLoader, keyValue, positionManagementService, keyConverter, valueConverter, producer, workerState, connectStatsManager, connectStatsService, transformChain, retryWithToleranceOperator, statusListener, this.connectMetrics); Future future = taskExecutor.submit(workerSourceTask); // schedule offset committer sourceTaskOffsetCommitter.ifPresent(committer -> committer.schedule(id, workerSourceTask)); taskToFutureMap.put(workerSourceTask, future); this.pendingTasks.put(workerSourceTask, System.currentTimeMillis()); } else if (task instanceof SinkTask) { log.info("sink task config keyValue is {}", keyValue.getProperties()); DefaultLitePullConsumer consumer = ConnectUtil.initDefaultLitePullConsumer(workerConfig, false); // set consumer groupId String groupId = keyValue.getString(SinkConnectorConfig.TASK_GROUP_ID); if (StringUtils.isBlank(groupId)) { groupId = ConnectUtil.SYS_TASK_CG_PREFIX + id.connector(); } consumer.setConsumerGroup(groupId); Set<String> consumerGroupSet = ConnectUtil.fetchAllConsumerGroupList(workerConfig); if (!consumerGroupSet.contains(consumer.getConsumerGroup())) { ConnectUtil.createSubGroup(workerConfig, consumer.getConsumerGroup()); } TransformChain<ConnectRecord> transformChain = new TransformChain<>(keyValue, plugin); // create retry operator RetryWithToleranceOperator retryWithToleranceOperator = ReporterManagerUtil.createRetryWithToleranceOperator(keyValue, errorMetricsGroup); retryWithToleranceOperator.reporters(ReporterManagerUtil.sinkTaskReporters(id, keyValue, workerConfig, errorMetricsGroup)); WorkerSinkTask workerSinkTask = new WorkerSinkTask(workerConfig, id, (SinkTask) task, savedLoader, keyValue, keyConverter, valueConverter, consumer, workerState, connectStatsManager, connectStatsService, transformChain, retryWithToleranceOperator, ReporterManagerUtil.createWorkerErrorRecordReporter(keyValue, retryWithToleranceOperator, valueConverter), statusListener, this.connectMetrics); Future future = taskExecutor.submit(workerSinkTask); taskToFutureMap.put(workerSinkTask, future); this.pendingTasks.put(workerSinkTask, System.currentTimeMillis()); } Plugin.compareAndSwapLoaders(savedLoader); } catch (Exception e) { log.error("start worker task exception. config {}" + JSON.toJSONString(keyValue), e); Plugin.compareAndSwapLoaders(savedLoader); } } } }
3.68
flink_SingleInputOperator_accept
/** * Accepts the visitor and applies it this instance. The visitors pre-visit method is called * and, if returning <tt>true</tt>, the visitor is recursively applied on the single input. * After the recursion returned, the post-visit method is called. * * @param visitor The visitor. * @see org.apache.flink.util.Visitable#accept(org.apache.flink.util.Visitor) */ @Override public void accept(Visitor<Operator<?>> visitor) { if (visitor.preVisit(this)) { this.input.accept(visitor); for (Operator<?> c : this.broadcastInputs.values()) { c.accept(visitor); } visitor.postVisit(this); } }
3.68
flink_RoundRobinOperatorStateRepartitioner_repartitionUnionState
/** Repartition UNION state. */ private void repartitionUnionState( Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> unionState, List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList) { for (Map<StreamStateHandle, OperatorStateHandle> mergeMap : mergeMapList) { for (Map.Entry< String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> e : unionState.entrySet()) { for (Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo> handleWithMetaInfo : e.getValue()) { OperatorStateHandle operatorStateHandle = mergeMap.get(handleWithMetaInfo.f0); if (operatorStateHandle == null) { operatorStateHandle = new OperatorStreamStateHandle( CollectionUtil.newHashMapWithExpectedSize( unionState.size()), handleWithMetaInfo.f0); mergeMap.put(handleWithMetaInfo.f0, operatorStateHandle); } operatorStateHandle .getStateNameToPartitionOffsets() .put(e.getKey(), handleWithMetaInfo.f1); } } } }
3.68
hbase_SegmentFactory_createImmutableSegmentByFlattening
// create flat immutable segment from non-flat immutable segment // for flattening public ImmutableSegment createImmutableSegmentByFlattening(CSLMImmutableSegment segment, CompactingMemStore.IndexType idxType, MemStoreSizing memstoreSizing, MemStoreCompactionStrategy.Action action) { ImmutableSegment res = null; switch (idxType) { case CHUNK_MAP: res = new CellChunkImmutableSegment(segment, memstoreSizing, action); break; case CSLM_MAP: assert false; // non-flat segment can not be the result of flattening break; case ARRAY_MAP: res = new CellArrayImmutableSegment(segment, memstoreSizing, action); break; } return res; }
3.68
pulsar_WindowManager_getEventCount
/** * Scans the event queue and returns number of events having * timestamp less than or equal to the reference time. * * @param referenceTime the reference timestamp in millis * @return the count of events with timestamp less than or equal to referenceTime */ public int getEventCount(long referenceTime) { int count = 0; for (Event<T> event : queue) { if (event.getTimestamp() <= referenceTime) { ++count; } } return count; }
3.68
flink_JarManifestParser_findOnlyEntryClass
/** * Returns a JAR file with its entry class as specified in the manifest. * * @param jarFiles JAR files to parse * @throws NoSuchElementException if no JAR file contains an entry class attribute * @throws IllegalArgumentException if multiple JAR files contain an entry class manifest * attribute */ static JarFileWithEntryClass findOnlyEntryClass(Iterable<File> jarFiles) throws IOException { List<JarFileWithEntryClass> jarsWithEntryClasses = new ArrayList<>(); for (File jarFile : jarFiles) { findEntryClass(jarFile) .ifPresent( entryClass -> jarsWithEntryClasses.add( new JarFileWithEntryClass(jarFile, entryClass))); } int size = jarsWithEntryClasses.size(); if (size == 0) { throw new NoSuchElementException("No JAR with manifest attribute for entry class"); } if (size == 1) { return jarsWithEntryClasses.get(0); } // else: size > 1 throw new IllegalArgumentException( "Multiple JARs with manifest attribute for entry class: " + jarsWithEntryClasses); }
3.68
framework_DesignAttributeHandler_getFormatter
/** * Returns the currently used formatter. All primitive types and all types * needed by Vaadin components are handled by that formatter. * * @return An instance of the formatter. */ public static DesignFormatter getFormatter() { return FORMATTER; }
3.68
hbase_KeyValue_getQualifierLength
/** Returns Qualifier length */ int getQualifierLength(int keyLength, int rlength, int flength) { return keyLength - (int) getKeyDataStructureSize(rlength, flength, 0); }
3.68
morf_DatabaseType_findByIdentifier
/** * Returns the registered database type by its identifier. * * <p>It can be assumed that performance of this method will be <code>O(1)</code> so is * suitable for repeated calling in performance code. There should be * few reasons for caching the response.</p> * * @param identifier The database type identifier (see {@link DatabaseType#identifier()}). * @return The {@link DatabaseType}. * @throws IllegalArgumentException If no such identifier is found. */ public static DatabaseType findByIdentifier(String identifier) { DatabaseType result = registeredTypes.get(identifier); if (result == null) throw new IllegalArgumentException("Identifier [" + identifier + "] not known"); return result; }
3.68
hbase_HRegion_replayWALCompactionMarker
/** * Call to complete a compaction. Its for the case where we find in the WAL a compaction that was * not finished. We could find one recovering a WAL after a regionserver crash. See HBASE-2331. */ void replayWALCompactionMarker(CompactionDescriptor compaction, boolean pickCompactionFiles, boolean removeFiles, long replaySeqId) throws IOException { try { checkTargetRegion(compaction.getEncodedRegionName().toByteArray(), "Compaction marker from WAL ", compaction); } catch (WrongRegionException wre) { if (RegionReplicaUtil.isDefaultReplica(this.getRegionInfo())) { // skip the compaction marker since it is not for this region return; } throw wre; } synchronized (writestate) { if (replaySeqId < lastReplayedOpenRegionSeqId) { LOG.warn(getRegionInfo().getEncodedName() + " : " + "Skipping replaying compaction event :" + TextFormat.shortDebugString(compaction) + " because its sequence id " + replaySeqId + " is smaller than this regions " + "lastReplayedOpenRegionSeqId of " + lastReplayedOpenRegionSeqId); return; } if (replaySeqId < lastReplayedCompactionSeqId) { LOG.warn(getRegionInfo().getEncodedName() + " : " + "Skipping replaying compaction event :" + TextFormat.shortDebugString(compaction) + " because its sequence id " + replaySeqId + " is smaller than this regions " + "lastReplayedCompactionSeqId of " + lastReplayedCompactionSeqId); return; } else { lastReplayedCompactionSeqId = replaySeqId; } if (LOG.isDebugEnabled()) { LOG.debug(getRegionInfo().getEncodedName() + " : " + "Replaying compaction marker " + TextFormat.shortDebugString(compaction) + " with seqId=" + replaySeqId + " and lastReplayedOpenRegionSeqId=" + lastReplayedOpenRegionSeqId); } startRegionOperation(Operation.REPLAY_EVENT); try { HStore store = this.getStore(compaction.getFamilyName().toByteArray()); if (store == null) { LOG.warn(getRegionInfo().getEncodedName() + " : " + "Found Compaction WAL edit for deleted family:" + Bytes.toString(compaction.getFamilyName().toByteArray())); return; } store.replayCompactionMarker(compaction, pickCompactionFiles, removeFiles); logRegionFiles(); } catch (FileNotFoundException ex) { LOG.warn(getRegionInfo().getEncodedName() + " : " + "At least one of the store files in compaction: " + TextFormat.shortDebugString(compaction) + " doesn't exist any more. Skip loading the file(s)", ex); } finally { closeRegionOperation(Operation.REPLAY_EVENT); } } }
3.68
hbase_KeyValue_getKey
/** * Do not use unless you have to. Used internally for compacting and testing. Use * {@link #getRowArray()}, {@link #getFamilyArray()}, {@link #getQualifierArray()}, and * {@link #getValueArray()} if accessing a KeyValue client-side. * @return Copy of the key portion only. */ public byte[] getKey() { int keylength = getKeyLength(); byte[] key = new byte[keylength]; System.arraycopy(getBuffer(), getKeyOffset(), key, 0, keylength); return key; }
3.68
morf_MySqlDialect_getSqlForDateToYyyymmddHHmmss
/** * @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForDateToYyyymmddHHmmss(org.alfasoftware.morf.sql.element.Function) */ @Override protected String getSqlForDateToYyyymmddHHmmss(Function function) { return String.format("CAST(DATE_FORMAT(%s, '%%Y%%m%%d%%H%%i%%s') AS DECIMAL(14))",getSqlFrom(function.getArguments().get(0))); }
3.68
framework_HasHierarchicalDataProvider_getTreeData
/** * Gets the backing {@link TreeData} instance of the data provider, if the * data provider is a {@link TreeDataProvider}. * * @return the TreeData instance used by the data provider * @throws IllegalStateException * if the type of the data provider is not * {@link TreeDataProvider} */ @SuppressWarnings("unchecked") public default TreeData<T> getTreeData() { if (getDataProvider() instanceof TreeDataProvider) { return ((TreeDataProvider<T>) getDataProvider()).getTreeData(); } else { throw new IllegalStateException( "Data provider is not an instance of TreeDataProvider"); } }
3.68
hadoop_RegistryOperationsService_getClientAcls
/** * Get the aggregate set of ACLs the client should use * to create directories * @return the ACL list */ public List<ACL> getClientAcls() { return getRegistrySecurity().getClientACLs(); }
3.68
flink_MutableHashTable_assignPartition
/** * Assigns a partition to a bucket. * * @param bucket The bucket to get the partition for. * @param numPartitions The number of partitions. * @return The partition for the bucket. */ public static byte assignPartition(int bucket, byte numPartitions) { return (byte) (bucket % numPartitions); }
3.68
framework_VCustomLayout_setWidget
/** * Sets widget to given location. * * If location already contains a widget it will be removed. * * @param widget * Widget to be set into location. * @param location * location name where widget will be added * * @throws IllegalArgumentException * if no such location is found in the layout. */ public void setWidget(Widget widget, String location) { if (widget == null) { return; } // If no given location is found in the layout, and exception is throws Element elem = locationToElement.get(location); if (elem == null && hasTemplate()) { throw new IllegalArgumentException( "No location " + location + " found"); } // Get previous widget final Widget previous = locationToWidget.get(location); // NOP if given widget already exists in this location if (previous == widget) { return; } if (previous != null) { remove(previous); } // if template is missing add element in order if (!hasTemplate()) { elem = getElement(); } // Add widget to location super.add(widget, elem); locationToWidget.put(location, widget); }
3.68
graphhopper_AStar_setApproximation
/** * @param approx defines how distance to goal Node is approximated */ public AStar setApproximation(WeightApproximator approx) { weightApprox = approx; return this; }
3.68
hbase_QuotaTableUtil_doGet
/* * ========================================================================= HTable helpers */ protected static Result doGet(final Connection connection, final Get get) throws IOException { try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { return table.get(get); } }
3.68
flink_ErrorInfo_handleMissingThrowable
/** * Utility method to cover FLINK-21376. * * @param throwable The actual exception. * @return a {@link FlinkException} if no exception was passed. */ public static Throwable handleMissingThrowable(@Nullable Throwable throwable) { return throwable != null ? throwable : new FlinkException( "Unknown cause for Execution failure (this might be caused by FLINK-21376)."); }
3.68
hadoop_ConsistentHashRing_addLocation
/** * Add entry to consistent hash ring. * * @param location Node to add to the ring. * @param numVirtualNodes Number of virtual nodes to add. */ public void addLocation(String location, int numVirtualNodes) { writeLock.lock(); try { entryToVirtualNodes.put(location, numVirtualNodes); for (int i = 0; i < numVirtualNodes; i++) { String key = String.format(VIRTUAL_NODE_FORMAT, location, i); String hash = getHash(key); ring.put(hash, key); } } finally { writeLock.unlock(); } }
3.68
framework_DataProvider_fromStream
/** * Creates a new data provider from the given stream. <b>All items in the * stream are eagerly collected to a list.</b> * <p> * This is a shorthand for using {@link #ofCollection(Collection)} after * collecting the items in the stream to a list with e.g. * {@code stream.collect(Collectors.toList));}. * <p> * <strong>Using big streams is not recommended, you should instead use a * lazy data provider.</strong> See * {@link #fromCallbacks(FetchCallback, CountCallback)} or * {@link BackEndDataProvider} for more info. * * @param <T> * the data item type * @param items * a stream of data items, not {@code null} * @return a new list data provider */ public static <T> ListDataProvider<T> fromStream(Stream<T> items) { return new ListDataProvider<>(items.collect(Collectors.toList())); }
3.68
hbase_HttpServer_setAttribute
/** * Set a value in the webapp context. These values are available to the jsp pages as * "application.getAttribute(name)". * @param name The name of the attribute * @param value The value of the attribute */ public void setAttribute(String name, Object value) { webAppContext.setAttribute(name, value); }
3.68
hbase_ZKNodeTracker_postStart
/** * Called after start is called. Sub classes could implement this method to load more data on zk. */ protected void postStart() { }
3.68
hbase_SimpleRegionNormalizer_getMergeMinRegionCount
/** * Return this instance's configured value for {@value #MERGE_MIN_REGION_COUNT_KEY}. */ public int getMergeMinRegionCount() { return normalizerConfiguration.getMergeMinRegionCount(); }
3.68
framework_LegacyApplication_removeWindow
/** * Removes the specified window from the application. This also removes all * name mappings for the window (see {@link #addWindow(LegacyWindow)} and * #getWindowName(UI)}. * * <p> * Note that removing window from the application does not close the browser * window - the window is only removed from the server-side. * </p> * * @param uI * the UI to remove */ public void removeWindow(LegacyWindow uI) { for (Entry<String, LegacyWindow> entry : legacyUINames.entrySet()) { if (entry.getValue() == uI) { legacyUINames.remove(entry.getKey()); } } }
3.68
hbase_HFileBlock_isUnpacked
/** * Return true when this block's buffer has been unpacked, false otherwise. Note this is a * calculated heuristic, not tracked attribute of the block. */ public boolean isUnpacked() { final int headerSize = headerSize(); final int expectedCapacity = headerSize + uncompressedSizeWithoutHeader; final int bufCapacity = bufWithoutChecksum.remaining(); return bufCapacity == expectedCapacity || bufCapacity == expectedCapacity + headerSize; }
3.68
framework_AbstractInMemoryContainer_getItemSorter
/** * Returns the ItemSorter used for comparing items in a sort. See * {@link #setItemSorter(ItemSorter)} for more information. * * @return The ItemSorter used for comparing two items in a sort. */ protected ItemSorter getItemSorter() { return itemSorter; }
3.68
flink_JoinInputSideSpec_withUniqueKeyContainedByJoinKey
/** * Creates a {@link JoinInputSideSpec} that input has an unique key and the unique key is * contained by the join key. * * @param uniqueKeyType type information of the unique key * @param uniqueKeySelector key selector to extract unique key from the input row */ public static JoinInputSideSpec withUniqueKeyContainedByJoinKey( InternalTypeInfo<RowData> uniqueKeyType, KeySelector<RowData, RowData> uniqueKeySelector) { checkNotNull(uniqueKeyType); checkNotNull(uniqueKeySelector); return new JoinInputSideSpec(true, uniqueKeyType, uniqueKeySelector); }
3.68
querydsl_JTSGeometryExpression_within
/** * Returns 1 (TRUE) if this geometric object is “spatially within” anotherGeometry. * * @param geometry other geometry * @return true, if within */ public BooleanExpression within(Expression<? extends Geometry> geometry) { return Expressions.booleanOperation(SpatialOps.WITHIN, mixin, geometry); }
3.68
flink_Tuple5_setFields
/** * Sets new values to all fields of the tuple. * * @param f0 The value for field 0 * @param f1 The value for field 1 * @param f2 The value for field 2 * @param f3 The value for field 3 * @param f4 The value for field 4 */ public void setFields(T0 f0, T1 f1, T2 f2, T3 f3, T4 f4) { this.f0 = f0; this.f1 = f1; this.f2 = f2; this.f3 = f3; this.f4 = f4; }
3.68
hadoop_AppToFlowTableRW_createTable
/* * (non-Javadoc) * * @see * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW# * createTable(org.apache.hadoop.hbase.client.Admin, * org.apache.hadoop.conf.Configuration) */ public void createTable(Admin admin, Configuration hbaseConf) throws IOException { TableName table = getTableName(hbaseConf); if (admin.tableExists(table)) { // do not disable / delete existing table // similar to the approach taken by map-reduce jobs when // output directory exists throw new IOException("Table " + table.getNameAsString() + " already exists."); } HTableDescriptor appToFlowTableDescp = new HTableDescriptor(table); HColumnDescriptor mappCF = new HColumnDescriptor(AppToFlowColumnFamily.MAPPING.getBytes()); mappCF.setBloomFilterType(BloomType.ROWCOL); appToFlowTableDescp.addFamily(mappCF); admin.createTable(appToFlowTableDescp); LOG.info("Status of table creation for " + table.getNameAsString() + "=" + admin.tableExists(table)); }
3.68
hbase_ScanQueryMatcher_compareKeyForNextColumn
/** * @param nextIndexed the key of the next entry in the block index (if any) * @param currentCell The Cell we're using to calculate the seek key * @return result of the compare between the indexed key and the key portion of the passed cell */ public int compareKeyForNextColumn(Cell nextIndexed, Cell currentCell) { ColumnCount nextColumn = columns.getColumnHint(); if (nextColumn == null) { return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, 0, 0, null, 0, 0, PrivateConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); } else { return PrivateCellUtil.compareKeyBasedOnColHint(rowComparator, nextIndexed, currentCell, currentCell.getFamilyOffset(), currentCell.getFamilyLength(), nextColumn.getBuffer(), nextColumn.getOffset(), nextColumn.getLength(), HConstants.LATEST_TIMESTAMP, Type.Maximum.getCode()); } }
3.68
flink_LocalSlicingWindowAggOperator_computeMemorySize
/** Compute memory size from memory faction. */ private long computeMemorySize() { final Environment environment = getContainingTask().getEnvironment(); return environment .getMemoryManager() .computeMemorySize( getOperatorConfig() .getManagedMemoryFractionOperatorUseCaseOfSlot( ManagedMemoryUseCase.OPERATOR, environment.getTaskManagerInfo().getConfiguration(), environment.getUserCodeClassLoader().asClassLoader())); }
3.68
dubbo_ServiceAnnotationPostProcessor_resolveBeanNameGenerator
/** * It'd be better to use BeanNameGenerator instance that should reference * {@link ConfigurationClassPostProcessor#componentScanBeanNameGenerator}, * thus it maybe a potential problem on bean name generation. * * @param registry {@link BeanDefinitionRegistry} * @return {@link BeanNameGenerator} instance * @see SingletonBeanRegistry * @see AnnotationConfigUtils#CONFIGURATION_BEAN_NAME_GENERATOR * @see ConfigurationClassPostProcessor#processConfigBeanDefinitions * @since 2.5.8 */ private BeanNameGenerator resolveBeanNameGenerator(BeanDefinitionRegistry registry) { BeanNameGenerator beanNameGenerator = null; if (registry instanceof SingletonBeanRegistry) { SingletonBeanRegistry singletonBeanRegistry = SingletonBeanRegistry.class.cast(registry); beanNameGenerator = (BeanNameGenerator) singletonBeanRegistry.getSingleton(CONFIGURATION_BEAN_NAME_GENERATOR); } if (beanNameGenerator == null) { if (logger.isInfoEnabled()) { logger.info("BeanNameGenerator bean can't be found in BeanFactory with name [" + CONFIGURATION_BEAN_NAME_GENERATOR + "]"); logger.info("BeanNameGenerator will be a instance of " + AnnotationBeanNameGenerator.class.getName() + " , it maybe a potential problem on bean name generation."); } beanNameGenerator = new AnnotationBeanNameGenerator(); } return beanNameGenerator; }
3.68
framework_VScrollTable_instructServerToForgetPreviousSelections
/** * Used in multiselect mode when the client side knows that all selections * are in the next request. */ private void instructServerToForgetPreviousSelections() { client.updateVariable(paintableId, "clearSelections", true, false); }
3.68
framework_Window_setTabStopEnabled
/** * Set if it should be prevented to set the focus to a component outside a * non-modal window with the tab key. * <p> * This is meant to help users of assistive devices to not leaving the * window unintentionally. * <p> * For modal windows, this function is activated automatically, while * preserving the stored value of tabStop. * * @param tabStop * true to keep the focus inside the window when reaching the top * or bottom, false (default) to allow leaving the window */ public void setTabStopEnabled(boolean tabStop) { getState().assistiveTabStop = tabStop; }
3.68
hadoop_JobBase_addDoubleValue
/** * Increment the given counter by the given incremental value If the counter * does not exist, one is created with value 0. * * @param name * the counter name * @param inc * the incremental value * @return the updated value. */ protected Double addDoubleValue(Object name, double inc) { Double val = this.doubleCounters.get(name); Double retv = null; if (val == null) { retv = new Double(inc); } else { retv = new Double(val.doubleValue() + inc); } this.doubleCounters.put(name, retv); return retv; }
3.68
AreaShop_Materials_signNameToMaterial
/** * Get material based on a sign material name. * @param name Name of the sign material * @return null if not a sign, otherwise the material matching the name (when the material is not available on the current minecraft version, it returns the base type) */ public static Material signNameToMaterial(String name) { // Expected null case if (!isSign(name)) { return null; } Material result = null; if (legacyMaterials) { // 1.12 and lower just know SIGN_POST, WALL_SIGN and SIGN if (FLOOR_SIGN_TYPES.contains(name)) { result = Material.getMaterial("SIGN_POST"); } else if (WALL_SIGN_TYPES.contains(name)) { result = Material.getMaterial("WALL_SIGN"); if (result == null) { result = Material.getMaterial("SIGN"); } } } else { // Try saved name (works for wood types on 1.14, regular types for below) result = Material.getMaterial(name); if (result == null) { // Cases for 1.13, which don't know wood types, but need new materials if (FLOOR_SIGN_TYPES.contains(name)) { // SIGN -> OAK_SIGN for 1.14 result = Material.getMaterial("OAK_SIGN"); // Fallback for 1.13 if (result == null) { result = Material.getMaterial("SIGN"); } } else if (WALL_SIGN_TYPES.contains(name)) { // WALL_SIGN -> OAK_WALL_SIGN for 1.14 result = Material.getMaterial("OAK_WALL_SIGN"); // Fallback for 1.13 if (result == null) { result = Material.getMaterial("WALL_SIGN"); } } } } if (result == null) { AreaShop.debug("Materials.get() null result:", name, "legacyMaterials:", legacyMaterials); } return result; }
3.68
dubbo_TTable_has
/** * whether has one of the specified border styles * * @param borderArray border styles * @return whether has one of the specified border styles */ public boolean has(int... borderArray) { if (null == borderArray) { return false; } for (int b : borderArray) { if ((this.borders & b) == b) { return true; } } return false; }
3.68
flink_RocksDBMemoryConfiguration_setFixedMemoryPerSlot
/** * Configures RocksDB to use a fixed amount of memory shared between all instances (operators) * in a slot. See {@link #setFixedMemoryPerSlot(MemorySize)} for details. */ public void setFixedMemoryPerSlot(String totalMemoryPerSlotStr) { setFixedMemoryPerSlot(MemorySize.parse(totalMemoryPerSlotStr)); }
3.68
flink_FactoryUtils_loadAndInvokeFactory
/** * Loads all factories for the given class using the {@link ServiceLoader} and attempts to * create an instance. * * @param factoryInterface factory interface * @param factoryInvoker factory invoker * @param defaultProvider default factory provider * @param <R> resource type * @param <F> factory type * @throws RuntimeException if no or multiple resources could be instantiated * @return created instance */ public static <R, F> R loadAndInvokeFactory( final Class<F> factoryInterface, final FactoryInvoker<F, R> factoryInvoker, final Supplier<F> defaultProvider) { final ServiceLoader<F> factories = ServiceLoader.load(factoryInterface); final List<R> instantiatedResources = new ArrayList<>(); final List<Exception> errorsDuringInitialization = new ArrayList<>(); for (F factory : factories) { try { R resource = factoryInvoker.invoke(factory); instantiatedResources.add(resource); LOG.info("Instantiated {}.", resource.getClass().getSimpleName()); } catch (Exception e) { LOG.debug( "Factory {} could not instantiate instance.", factory.getClass().getSimpleName(), e); errorsDuringInitialization.add(e); } } if (instantiatedResources.size() == 1) { return instantiatedResources.get(0); } if (instantiatedResources.isEmpty()) { try { return factoryInvoker.invoke(defaultProvider.get()); } catch (Exception e) { final RuntimeException exception = new RuntimeException("Could not instantiate any instance."); final RuntimeException defaultException = new RuntimeException("Could not instantiate default instance.", e); exception.addSuppressed(defaultException); errorsDuringInitialization.forEach(exception::addSuppressed); throw exception; } } throw new RuntimeException("Multiple instances were created: " + instantiatedResources); }
3.68
pulsar_AuthorizationProvider_removePermissionsAsync
/** * Remove authorization-action permissions on a topic. * @param topicName * @return CompletableFuture<Void> */ default CompletableFuture<Void> removePermissionsAsync(TopicName topicName) { return CompletableFuture.completedFuture(null); }
3.68
hbase_HFileOutputFormat2_createFamilyCompressionMap
/** * Runs inside the task to deserialize column family to compression algorithm map from the * configuration. * @param conf to read the serialized values from * @return a map from column family to the configured compression algorithm */ @InterfaceAudience.Private static Map<byte[], Algorithm> createFamilyCompressionMap(Configuration conf) { Map<byte[], String> stringMap = createFamilyConfValueMap(conf, COMPRESSION_FAMILIES_CONF_KEY); Map<byte[], Algorithm> compressionMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry<byte[], String> e : stringMap.entrySet()) { Algorithm algorithm = HFileWriterImpl.compressionByName(e.getValue()); compressionMap.put(e.getKey(), algorithm); } return compressionMap; }
3.68
hudi_OptionsResolver_needsScheduleCompaction
/** * Returns whether there is need to schedule the compaction plan. * * @param conf The flink configuration. */ public static boolean needsScheduleCompaction(Configuration conf) { return OptionsResolver.isMorTable(conf) && conf.getBoolean(FlinkOptions.COMPACTION_SCHEDULE_ENABLED); }
3.68
pulsar_ResourceGroupService_registerTenant
/** * Registers a tenant as a user of a resource group. * * @param resourceGroupName * @param tenantName * @throws if the RG does not exist, or if the NS already references the RG. */ public void registerTenant(String resourceGroupName, String tenantName) throws PulsarAdminException { ResourceGroup rg = checkResourceGroupExists(resourceGroupName); // Check that the tenant-name doesn't already have a RG association. // [If it does, that should be unregistered before putting a different association.] ResourceGroup oldRG = this.tenantToRGsMap.get(tenantName); if (oldRG != null) { String errMesg = "Tenant " + tenantName + " already references a resource group: " + oldRG.getID(); throw new PulsarAdminException(errMesg); } ResourceGroupOpStatus status = rg.registerUsage(tenantName, ResourceGroupRefTypes.Tenants, true, this.resourceUsageTransportManagerMgr); if (status == ResourceGroupOpStatus.Exists) { String errMesg = "Tenant " + tenantName + " already references the resource group " + resourceGroupName; errMesg += "; this is unexpected"; throw new PulsarAdminException(errMesg); } // Associate this tenant name with the RG. this.tenantToRGsMap.put(tenantName, rg); rgTenantRegisters.labels(resourceGroupName).inc(); }
3.68
pulsar_ThreadLocalStateCleaner_cleanupThreadLocal
// cleanup thread local state on all active threads public <T> void cleanupThreadLocal(ThreadLocal<?> threadLocal, BiConsumer<Thread, T> cleanedValueListener) { Objects.nonNull(threadLocal); for (Thread thread : ThreadUtils.getAllThreads()) { cleanupThreadLocal(threadLocal, thread, cleanedValueListener); } }
3.68
hudi_BaseActionExecutor_writeTableMetadata
/** * Writes restore metadata to table metadata. * @param metadata restore metadata of interest. */ protected final void writeTableMetadata(HoodieRestoreMetadata metadata) { Option<HoodieTableMetadataWriter> metadataWriterOpt = table.getMetadataWriter(instantTime); if (metadataWriterOpt.isPresent()) { try (HoodieTableMetadataWriter metadataWriter = metadataWriterOpt.get()) { metadataWriter.update(metadata, instantTime); } catch (Exception e) { if (e instanceof HoodieException) { throw (HoodieException) e; } else { throw new HoodieException("Failed to apply restore to metadata", e); } } } }
3.68
hbase_QuotaTableUtil_getTableQuota
/* * ========================================================================= Quota "settings" * helpers */ public static Quotas getTableQuota(final Connection connection, final TableName table) throws IOException { return getQuotas(connection, getTableRowKey(table)); }
3.68
aws-saas-boost_UpdateWorkflow_runApiGatewayDeployment
// VisibleForTesting protected void runApiGatewayDeployment(Map<String, String> cloudFormationParamMap) { // CloudFormation will not redeploy an API Gateway stage on update outputMessage("Updating API Gateway deployment for stages"); try { String publicApiName = "sb-" + environment.getName() + "-public-api"; String privateApiName = "sb-" + environment.getName() + "-private-api"; ApiGatewayClient apigw = clientBuilderFactory.apiGatewayBuilder().build(); GetRestApisResponse response = apigw.getRestApis(); if (response.hasItems()) { for (RestApi api : response.items()) { String apiName = api.name(); boolean isPublicApi = publicApiName.equals(apiName); boolean isPrivateApi = privateApiName.equals(apiName); if (isPublicApi || isPrivateApi) { String stage = isPublicApi ? cloudFormationParamMap.get("PublicApiStage") : cloudFormationParamMap.get("PrivateApiStage"); outputMessage("Updating API Gateway deployment for " + apiName + " to stage: " + stage); apigw.createDeployment(request -> request .restApiId(api.id()) .stageName(stage) ); } } } } catch (SdkServiceException apigwError) { LOGGER.error("apigateway error", apigwError); LOGGER.error(Utils.getFullStackTrace(apigwError)); throw apigwError; } }
3.68
flink_SSLUtils_createRestSSLContext
/** Creates an SSL context for clients against the external REST endpoint. */ @Nullable @VisibleForTesting public static SSLContext createRestSSLContext(Configuration config, boolean clientMode) throws Exception { ClientAuth clientAuth = SecurityOptions.isRestSSLAuthenticationEnabled(config) ? ClientAuth.REQUIRE : ClientAuth.NONE; JdkSslContext nettySSLContext = (JdkSslContext) createRestNettySSLContext(config, clientMode, clientAuth, JDK); if (nettySSLContext != null) { return nettySSLContext.context(); } else { return null; } }
3.68
hadoop_CacheDirectiveStats_getBytesNeeded
/** * @return The bytes needed. */ public long getBytesNeeded() { return bytesNeeded; }
3.68
dubbo_AbstractJSONImpl_getString
/** * Gets a string from an object for the given key. If the key is not present, this returns null. * If the value is not a String, throws an exception. */ @Override public String getString(Map<String, ?> obj, String key) { assert obj != null; assert key != null; if (!obj.containsKey(key)) { return null; } Object value = obj.get(key); if (!(value instanceof String)) { throw new ClassCastException( String.format("value '%s' for key '%s' in '%s' is not String", value, key, obj)); } return (String) value; }
3.68
hudi_BaseCommitActionExecutor_validateWriteResult
/** * Validate actions taken by clustering. In the first implementation, we validate at least one new file is written. * But we can extend this to add more validation. E.g. number of records read = number of records written etc. * We can also make these validations in BaseCommitActionExecutor to reuse pre-commit hooks for multiple actions. */ private void validateWriteResult(HoodieClusteringPlan clusteringPlan, HoodieWriteMetadata<HoodieData<WriteStatus>> writeMetadata) { if (writeMetadata.getWriteStatuses().isEmpty()) { throw new HoodieClusteringException("Clustering plan produced 0 WriteStatus for " + instantTime + " #groups: " + clusteringPlan.getInputGroups().size() + " expected at least " + clusteringPlan.getInputGroups().stream().mapToInt(HoodieClusteringGroup::getNumOutputFileGroups).sum() + " write statuses"); } }
3.68
hbase_SlowLogTableAccessor_addSlowLogRecords
/** * Add slow/large log records to hbase:slowlog table * @param slowLogPayloads List of SlowLogPayload to process * @param connection connection */ public static void addSlowLogRecords(final List<TooSlowLog.SlowLogPayload> slowLogPayloads, Connection connection) { List<Put> puts = new ArrayList<>(slowLogPayloads.size()); for (TooSlowLog.SlowLogPayload slowLogPayload : slowLogPayloads) { final byte[] rowKey = getRowKey(slowLogPayload); final Put put = new Put(rowKey).setDurability(Durability.SKIP_WAL) .setPriority(HConstants.NORMAL_QOS) .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("call_details"), Bytes.toBytes(slowLogPayload.getCallDetails())) .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("client_address"), Bytes.toBytes(slowLogPayload.getClientAddress())) .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("method_name"), Bytes.toBytes(slowLogPayload.getMethodName())) .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("param"), Bytes.toBytes(slowLogPayload.getParam())) .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("processing_time"), Bytes.toBytes(Integer.toString(slowLogPayload.getProcessingTime()))) .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("queue_time"), Bytes.toBytes(Integer.toString(slowLogPayload.getQueueTime()))) .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("region_name"), Bytes.toBytes(slowLogPayload.getRegionName())) .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("response_size"), Bytes.toBytes(Long.toString(slowLogPayload.getResponseSize()))) .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("block_bytes_scanned"), Bytes.toBytes(Long.toString(slowLogPayload.getBlockBytesScanned()))) .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("server_class"), Bytes.toBytes(slowLogPayload.getServerClass())) .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("start_time"), Bytes.toBytes(Long.toString(slowLogPayload.getStartTime()))) .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("type"), Bytes.toBytes(slowLogPayload.getType().name())) .addColumn(HConstants.SLOWLOG_INFO_FAMILY, Bytes.toBytes("username"), Bytes.toBytes(slowLogPayload.getUserName())); puts.add(put); } try { doPut(connection, puts); } catch (Exception e) { LOG.warn("Failed to add slow/large log records to hbase:slowlog table.", e); } }
3.68
framework_VCustomLayout_scanForLocations
/** Collect locations from template */ private void scanForLocations(Element elem) { if (elem.hasAttribute("location")) { final String location = elem.getAttribute("location"); locationToElement.put(location, elem); elem.setInnerHTML(""); } else if (elem.hasAttribute("data-location")) { final String location = elem.getAttribute("data-location"); locationToElement.put(location, elem); elem.setInnerHTML(""); } else { final int len = DOM.getChildCount(elem); for (int i = 0; i < len; i++) { scanForLocations(DOM.getChild(elem, i)); } } }
3.68
flink_CompiledPlan_writeToFile
/** * Writes this plan to a file using the JSON representation. This operation will fail if the * file already exists, even if the content is different from this plan. * * @param file the target file * @throws TableException if the file cannot be written. */ default void writeToFile(File file) { writeToFile(file, false); }
3.68
AreaShop_GeneralRegion_setRestoreSetting
/** * Change the restore setting. * @param restore true, false or general */ public void setRestoreSetting(Boolean restore) { setSetting("general.enableRestore", restore); }
3.68
hudi_HFileBootstrapIndex_writeNextSourceFileMapping
/** * Write next source file to hudi file-id. Entries are expected to be appended in hudi file-group id * order. * @param mapping bootstrap source file mapping. */ private void writeNextSourceFileMapping(BootstrapFileMapping mapping) { try { HoodieBootstrapFilePartitionInfo srcFilePartitionInfo = new HoodieBootstrapFilePartitionInfo(); srcFilePartitionInfo.setPartitionPath(mapping.getPartitionPath()); srcFilePartitionInfo.setBootstrapPartitionPath(mapping.getBootstrapPartitionPath()); srcFilePartitionInfo.setBootstrapFileStatus(mapping.getBootstrapFileStatus()); KeyValue kv = new KeyValue(getUTF8Bytes(getFileGroupKey(mapping.getFileGroupId())), new byte[0], new byte[0], HConstants.LATEST_TIMESTAMP, KeyValue.Type.Put, TimelineMetadataUtils.serializeAvroMetadata(srcFilePartitionInfo, HoodieBootstrapFilePartitionInfo.class).get()); indexByFileIdWriter.append(kv); numFileIdKeysAdded++; } catch (IOException e) { throw new HoodieIOException(e.getMessage(), e); } }
3.68
hbase_ZKProcedureMemberRpcs_receivedReachedGlobalBarrier
/** * Pass along the procedure global barrier notification to any listeners * @param path full znode path that cause the notification */ private void receivedReachedGlobalBarrier(String path) { LOG.debug("Received reached global barrier:" + path); String procName = ZKUtil.getNodeName(path); this.member.receivedReachedGlobalBarrier(procName); }
3.68
hadoop_RegistryPathStatus_equals
/** * Equality operator checks size, time and path of the entries. * It does <i>not</i> check {@link #children}. * @param other the other entry * @return true if the entries are considered equal. */ @Override public boolean equals(Object other) { if (this == other) { return true; } if (other == null || getClass() != other.getClass()) { return false; } RegistryPathStatus status = (RegistryPathStatus) other; if (size != status.size) { return false; } if (time != status.time) { return false; } if (path != null ? !path.equals(status.path) : status.path != null) { return false; } return true; }
3.68
hadoop_PartitionQueueMetrics_getUserMetrics
/** * Partition * Queue * User Metrics * * Computes Metrics at Partition (Node Label) * Queue * User Level. * * Sample JMX O/P Structure: * * PartitionQueueMetrics (labelX) * QueueMetrics (A) * usermetrics * QueueMetrics (A1) * usermetrics * QueueMetrics (A2) * usermetrics * QueueMetrics (B) * usermetrics * * @return QueueMetrics */ @Override public synchronized QueueMetrics getUserMetrics(String userName) { if (users == null) { return null; } String partitionJMXStr = (partition.equals(DEFAULT_PARTITION)) ? DEFAULT_PARTITION_JMX_STR : partition; QueueMetrics metrics = (PartitionQueueMetrics) users.get(userName); if (metrics == null) { metrics = new PartitionQueueMetrics(this.metricsSystem, this.queueName, null, false, this.conf, this.partition); users.put(userName, metrics); metricsSystem.register( pSourceName(partitionJMXStr).append(qSourceName(queueName)) .append(",user=").append(userName).toString(), "Metrics for user '" + userName + "' in queue '" + queueName + "'", ((PartitionQueueMetrics) metrics.tag(PARTITION_INFO, partitionJMXStr) .tag(QUEUE_INFO, queueName)).tag(USER_INFO, userName)); } return metrics; }
3.68
flink_MathUtils_checkedDownCast
/** * Casts the given value to a 32 bit integer, if it can be safely done. If the cast would change * the numeric value, this method raises an exception. * * <p>This method is a protection in places where one expects to be able to safely case, but * where unexpected situations could make the cast unsafe and would cause hidden problems that * are hard to track down. * * @param value The value to be cast to an integer. * @return The given value as an integer. * @see Math#toIntExact(long) */ public static int checkedDownCast(long value) { int downCast = (int) value; if (downCast != value) { throw new IllegalArgumentException( "Cannot downcast long value " + value + " to integer."); } return downCast; }
3.68
hadoop_HamletImpl_root
/** * Create a root-level generic element. * Mostly for testing purpose. * @param <T> type of the parent element * @param name of the element * @param opts {@link EOpt element options} * @return the element */ public <T extends __> Generic<T> root(String name, EnumSet<EOpt> opts) { return new Generic<T>(name, null, opts); }
3.68
framework_DateField_fireValueChange
/* * only fires the event if preventValueChangeEvent flag is false */ @Override protected void fireValueChange(boolean repaintIsNotNeeded) { if (!preventValueChangeEvent) { super.fireValueChange(repaintIsNotNeeded); } }
3.68
hbase_JVMClusterUtil_getRegionServer
/** Returns the region server */ public HRegionServer getRegionServer() { return this.regionServer; }
3.68
flink_MessageSerializer_serializeResponse
/** * Serializes the response sent to the {@link org.apache.flink.queryablestate.network.Client}. * * @param alloc The {@link ByteBufAllocator} used to allocate the buffer to serialize the * message into. * @param requestId The id of the request to which the message refers to. * @param response The response to be serialized. * @return A {@link ByteBuf} containing the serialized message. */ public static <RESP extends MessageBody> ByteBuf serializeResponse( final ByteBufAllocator alloc, final long requestId, final RESP response) { Preconditions.checkNotNull(response); return writePayload(alloc, requestId, MessageType.REQUEST_RESULT, response.serialize()); }
3.68
hadoop_ServiceLauncher_loadConfigurationClasses
/** * @return This creates all the configurations defined by * {@link #getConfigurationsToCreate()} , ensuring that * the resources have been pushed in. * If one cannot be loaded it is logged and the operation continues * except in the case that the class does load but it isn't actually * a subclass of {@link Configuration}. * @throws ExitUtil.ExitException if a loaded class is of the wrong type */ @VisibleForTesting public int loadConfigurationClasses() { List<String> toCreate = getConfigurationsToCreate(); int loaded = 0; for (String classname : toCreate) { try { Class<?> loadClass = getClassLoader().loadClass(classname); Object instance = loadClass.getConstructor().newInstance(); if (!(instance instanceof Configuration)) { throw new ExitUtil.ExitException(EXIT_SERVICE_CREATION_FAILURE, "Could not create " + classname + " because it is not a Configuration class/subclass"); } loaded++; } catch (ClassNotFoundException e) { // class could not be found -implies it is not on the current classpath LOG.debug("Failed to load {} because it is not on the classpath", classname); } catch (ExitUtil.ExitException e) { // rethrow throw e; } catch (Exception e) { // any other exception LOG.info("Failed to create {}", classname, e); } } return loaded; }
3.68
flink_SourceOperator_checkSplitWatermarkAlignment
/** * Finds the splits that are beyond the current max watermark and pauses them. At the same time, * splits that have been paused and where the global watermark caught up are resumed. * * <p>Note: This takes effect only if there are multiple splits, otherwise it does nothing. */ private void checkSplitWatermarkAlignment() { if (numSplits <= 1) { // A single split can't overtake any other splits assigned to this operator instance. // It is sufficient for the source to stop processing. return; } Collection<String> splitsToPause = new ArrayList<>(); Collection<String> splitsToResume = new ArrayList<>(); splitCurrentWatermarks.forEach( (splitId, splitWatermark) -> { if (splitWatermark > currentMaxDesiredWatermark) { splitsToPause.add(splitId); } else if (currentlyPausedSplits.contains(splitId)) { splitsToResume.add(splitId); } }); splitsToPause.removeAll(currentlyPausedSplits); if (!splitsToPause.isEmpty() || !splitsToResume.isEmpty()) { pauseOrResumeSplits(splitsToPause, splitsToResume); currentlyPausedSplits.addAll(splitsToPause); splitsToResume.forEach(currentlyPausedSplits::remove); } }
3.68
dubbo_JsonUtils_setJson
/** * @deprecated for uts only */ @Deprecated protected static void setJson(JSON json) { JsonUtils.json = json; }
3.68
hbase_StoreUtils_getBytesPerChecksum
/** * Returns the configured bytesPerChecksum value. * @param conf The configuration * @return The bytesPerChecksum that is set in the configuration */ public static int getBytesPerChecksum(Configuration conf) { return conf.getInt(HConstants.BYTES_PER_CHECKSUM, HFile.DEFAULT_BYTES_PER_CHECKSUM); }
3.68
morf_DatabaseSchemaManager_dropAllViews
/** * Drop all views. */ public void dropAllViews() { ProducerCache producerCache = new ProducerCache(); log.debug("Dropping all views"); try { Schema databaseSchema = producerCache.get().getSchema(); ImmutableList<View> viewsToDrop = ImmutableList.copyOf(databaseSchema.views()); List<String> script = Lists.newArrayList(); for (View view : viewsToDrop) { script.addAll(dialect.get().dropStatements(view)); } executeScript(script); } finally { producerCache.close(); } views.get().clear(); viewsDeployedByThis.get().clear(); }
3.68
hadoop_CosNFileSystem_listStatus
/** * <p> * If <code>f</code> is a file, this method will make a single call to COS. * If <code>f</code> is a directory, * this method will make a maximum of ( <i>n</i> / 199) + 2 calls to cos, * where <i>n</i> is the total number of files * and directories contained directly in <code>f</code>. * </p> */ @Override public FileStatus[] listStatus(Path f) throws IOException { Path absolutePath = makeAbsolute(f); String key = pathToKey(absolutePath); if (key.length() > 0) { FileStatus fileStatus = this.getFileStatus(f); if (fileStatus.isFile()) { return new FileStatus[]{fileStatus}; } } if (!key.endsWith(PATH_DELIMITER)) { key += PATH_DELIMITER; } URI pathUri = absolutePath.toUri(); Set<FileStatus> status = new TreeSet<>(); String priorLastKey = null; do { PartialListing listing = store.list( key, Constants.COS_MAX_LISTING_LENGTH, priorLastKey, false); for (FileMetadata fileMetadata : listing.getFiles()) { Path subPath = keyToPath(fileMetadata.getKey()); if (fileMetadata.getKey().equals(key)) { // this is just the directory we have been asked to list. LOG.debug("The file list contains the COS key [{}] to be listed.", key); } else { status.add(newFile(fileMetadata, subPath)); } } for (FileMetadata commonPrefix : listing.getCommonPrefixes()) { Path subPath = keyToPath(commonPrefix.getKey()); String relativePath = pathUri.relativize(subPath.toUri()).getPath(); status.add( newDirectory(commonPrefix, new Path(absolutePath, relativePath))); } priorLastKey = listing.getPriorLastKey(); } while (priorLastKey != null); return status.toArray(new FileStatus[status.size()]); }
3.68
hbase_ReplicationProtobufUtil_getCellScanner
/** Returns <code>cells</code> packaged as a CellScanner */ static CellScanner getCellScanner(final List<List<? extends Cell>> cells, final int size) { return new SizedCellScanner() { private final Iterator<List<? extends Cell>> entries = cells.iterator(); private Iterator<? extends Cell> currentIterator = null; private Cell currentCell; @Override public Cell current() { return this.currentCell; } @Override public boolean advance() { if (this.currentIterator == null) { if (!this.entries.hasNext()) return false; this.currentIterator = this.entries.next().iterator(); } if (this.currentIterator.hasNext()) { this.currentCell = this.currentIterator.next(); return true; } this.currentCell = null; this.currentIterator = null; return advance(); } @Override public long heapSize() { return size; } }; }
3.68
framework_AbstractListing_doReadDesign
/** * Reads the listing specific state from the given design. * <p> * This method is separated from {@link #readDesign(Element, DesignContext)} * to be overridable in subclasses that need to replace this, but still must * be able to call {@code super.readDesign(...)}. * * @see #doWriteDesign(Element, DesignContext) * * @param design * The element to obtain the state from * @param context * The DesignContext instance used for parsing the design */ protected void doReadDesign(Element design, DesignContext context) { Attributes attr = design.attributes(); if (attr.hasKey("readonly")) { setReadOnly(DesignAttributeHandler.readAttribute("readonly", attr, Boolean.class)); } setItemCaptionGenerator( new DeclarativeCaptionGenerator<>(getItemCaptionGenerator())); setItemIconGenerator( new DeclarativeIconGenerator<>(getItemIconGenerator())); readItems(design, context); }
3.68
hadoop_ClientGSIContext_mergeRouterFederatedState
/** * Merge state1 and state2 to get the max value for each namespace. * @param state1 input ByteString. * @param state2 input ByteString. * @return one ByteString object which contains the max value of each namespace. */ public static ByteString mergeRouterFederatedState(ByteString state1, ByteString state2) { Map<String, Long> mapping1 = new HashMap<>(getRouterFederatedStateMap(state1)); Map<String, Long> mapping2 = getRouterFederatedStateMap(state2); mapping2.forEach((k, v) -> { long localValue = mapping1.getOrDefault(k, 0L); mapping1.put(k, Math.max(v, localValue)); }); RouterFederatedStateProto.Builder federatedBuilder = RouterFederatedStateProto.newBuilder(); mapping1.forEach(federatedBuilder::putNamespaceStateIds); return federatedBuilder.build().toByteString(); }
3.68
flink_Savepoint_create
/** * Creates a new savepoint. * * @param stateBackend The state backend of the savepoint used for keyed state. * @param maxParallelism The max parallelism of the savepoint. * @return A new savepoint. * @see #create(int) */ public static NewSavepoint create(StateBackend stateBackend, int maxParallelism) { Preconditions.checkNotNull(stateBackend, "The state backend must not be null"); Preconditions.checkArgument( maxParallelism > 0 && maxParallelism <= UPPER_BOUND_MAX_PARALLELISM, "Maximum parallelism must be between 1 and " + UPPER_BOUND_MAX_PARALLELISM + ". Found: " + maxParallelism); SavepointMetadata metadata = new SavepointMetadata( maxParallelism, Collections.emptyList(), Collections.emptyList()); return new NewSavepoint(metadata, stateBackend); }
3.68
hudi_HoodieTableMetadataUtil_getFileSystemView
/** * Get metadata table file system view. * * @param metaClient - Metadata table meta client * @return Filesystem view for the metadata table */ public static HoodieTableFileSystemView getFileSystemView(HoodieTableMetaClient metaClient) { // If there are no commits on the metadata table then the table's // default FileSystemView will not return any file slices even // though we may have initialized them. HoodieTimeline timeline = metaClient.getActiveTimeline(); if (timeline.empty()) { final HoodieInstant instant = new HoodieInstant(false, HoodieTimeline.DELTA_COMMIT_ACTION, metaClient.createNewInstantTime(false)); timeline = new HoodieDefaultTimeline(Stream.of(instant), metaClient.getActiveTimeline()::getInstantDetails); } return new HoodieTableFileSystemView(metaClient, timeline); }
3.68
morf_AbstractSqlDialectTest_expectedUpdateUsingTargetTableInDifferentSchema
/** * @return The expected SQL for performing an update with a destination table which lives in a different schema. */ protected String expectedUpdateUsingTargetTableInDifferentSchema() { return "UPDATE " + differentSchemaTableName("FloatingRateRate") + " A SET settlementFrequency = (SELECT settlementFrequency FROM " + tableName("FloatingRateDetail") + " B WHERE (A.floatingRateDetailId = B.id))"; }
3.68
flink_ParameterTool_getProperties
/** * Returns a {@link Properties} object from this {@link ParameterTool}. * * @return A {@link Properties} */ public Properties getProperties() { Properties props = new Properties(); props.putAll(this.data); return props; }
3.68
hbase_Random64_main
/** * Random64 is a pseudorandom algorithm(LCG). Therefore, we will get same sequence if seeds are * the same. This main will test how many calls nextLong() it will get the same seed. We do not * need to save all numbers (that is too large). We could save once every 100000 calls nextLong(). * If it get a same seed, we can detect this by calling nextLong() 100000 times continuously. */ public static void main(String[] args) { long defaultTotalTestCnt = 1000000000000L; // 1 trillion if (args.length == 1) { defaultTotalTestCnt = Long.parseLong(args[0]); } Preconditions.checkArgument(defaultTotalTestCnt > 0, "totalTestCnt <= 0"); final int precision = 100000; final long totalTestCnt = defaultTotalTestCnt + precision; final int reportPeriod = 100 * precision; final long startTime = EnvironmentEdgeManager.currentTime(); System.out.println("Do collision test, totalTestCnt=" + totalTestCnt); Random64 rand = new Random64(); Set<Long> longSet = new HashSet<>(); for (long cnt = 1; cnt <= totalTestCnt; cnt++) { final long randLong = rand.nextLong(); if (longSet.contains(randLong)) { System.err.println("Conflict! count=" + cnt); System.exit(1); } if (cnt % precision == 0) { if (!longSet.add(randLong)) { System.err.println("Conflict! count=" + cnt); System.exit(1); } if (cnt % reportPeriod == 0) { long cost = EnvironmentEdgeManager.currentTime() - startTime; long remainingMs = (long) (1.0 * (totalTestCnt - cnt) * cost / cnt); System.out.println(String.format("Progress: %.3f%%, remaining %d minutes", 100.0 * cnt / totalTestCnt, remainingMs / 60000)); } } } System.out.println("No collision!"); }
3.68
hbase_TableName_toBytes
/** Returns A pointer to TableName as String bytes. */ public byte[] toBytes() { return name; }
3.68
flink_PipelinedApproximateSubpartition_setIsPartialBufferCleanupRequired
/** for testing only. */ @VisibleForTesting void setIsPartialBufferCleanupRequired() { isPartialBufferCleanupRequired = true; }
3.68
hudi_Registry_getAllMetrics
/** * Get all registered metrics. * * @param flush clear all metrics after this operation. * @param prefixWithRegistryName prefix each metric name with the registry name. * @return */ static Map<String, Long> getAllMetrics(boolean flush, boolean prefixWithRegistryName) { synchronized (Registry.class) { HashMap<String, Long> allMetrics = new HashMap<>(); REGISTRY_MAP.forEach((registryName, registry) -> { allMetrics.putAll(registry.getAllCounts(prefixWithRegistryName)); if (flush) { registry.clear(); } }); return allMetrics; } }
3.68
dubbo_ExpiringCache_put
/** * API to store value against a key in the calling thread scope. * @param key Unique identifier for the object being store. * @param value Value getting store */ @Override public void put(Object key, Object value) { store.put(key, value); }
3.68
hudi_HoodieMergedLogRecordScanner_newBuilder
/** * Returns the builder for {@code HoodieMergedLogRecordScanner}. */ public static HoodieMergedLogRecordScanner.Builder newBuilder() { return new Builder(); }
3.68
flink_KeyGroupPartitioner_partitionByKeyGroup
/** * Partitions the data into key-groups and returns the result as a {@link PartitioningResult}. */ public PartitioningResult<T> partitionByKeyGroup() { if (computedResult == null) { reportAllElementKeyGroups(); int outputNumberOfElements = buildHistogramByAccumulatingCounts(); executePartitioning(outputNumberOfElements); } return computedResult; }
3.68
flink_TaskExecutorLocalStateStoresManager_retainLocalStateForAllocations
/** * Retains the given set of allocations. All other allocations will be released. * * @param allocationsToRetain */ public void retainLocalStateForAllocations(Set<AllocationID> allocationsToRetain) { final Collection<AllocationID> allocationIds = findStoredAllocations(); allocationIds.stream() .filter(allocationId -> !allocationsToRetain.contains(allocationId)) .forEach(this::releaseLocalStateForAllocationId); }
3.68
framework_DragSourceExtensionConnector_addDraggedStyle
/** * Add class name to indicate that the drag source element is being dragged. * This method is called during the dragstart event. * * @param event * The drag start event. */ protected void addDraggedStyle(NativeEvent event) { Element dragSource = getDraggableElement(); dragSource.addClassName( getStylePrimaryName(dragSource) + STYLE_SUFFIX_DRAGGED); }
3.68
flink_TieredStorageNettyServiceImpl_createResultSubpartitionView
/** * Create a {@link ResultSubpartitionView} for the netty server. * * @param partitionId partition id indicates the unique id of {@link TieredResultPartition}. * @param subpartitionId subpartition id indicates the unique id of subpartition. * @param availabilityListener listener is used to listen the available status of data. * @return the {@link TieredStorageResultSubpartitionView}. */ public ResultSubpartitionView createResultSubpartitionView( TieredStoragePartitionId partitionId, TieredStorageSubpartitionId subpartitionId, BufferAvailabilityListener availabilityListener) { List<NettyServiceProducer> serviceProducers = registeredServiceProducers.get(partitionId); if (serviceProducers == null) { return new TieredStorageResultSubpartitionView( availabilityListener, new ArrayList<>(), new ArrayList<>(), new ArrayList<>()); } List<NettyPayloadManager> nettyPayloadManagers = new ArrayList<>(); List<NettyConnectionId> nettyConnectionIds = new ArrayList<>(); for (NettyServiceProducer serviceProducer : serviceProducers) { NettyPayloadManager nettyPayloadManager = new NettyPayloadManager(); NettyConnectionWriterImpl writer = new NettyConnectionWriterImpl(nettyPayloadManager, availabilityListener); serviceProducer.connectionEstablished(subpartitionId, writer); nettyConnectionIds.add(writer.getNettyConnectionId()); nettyPayloadManagers.add(nettyPayloadManager); } return new TieredStorageResultSubpartitionView( availabilityListener, nettyPayloadManagers, nettyConnectionIds, registeredServiceProducers.get(partitionId)); }
3.68
flink_MutableHashTable_nextSegment
/** * This is the method called by the partitions to request memory to serialize records. It * automatically spills partitions, if memory runs out. * * @return The next available memory segment. */ @Override public MemorySegment nextSegment() { final MemorySegment seg = getNextBuffer(); if (seg != null) { return seg; } else { try { spillPartition(); } catch (IOException ioex) { throw new RuntimeException( "Error spilling Hash Join Partition" + (ioex.getMessage() == null ? "." : ": " + ioex.getMessage()), ioex); } MemorySegment fromSpill = getNextBuffer(); if (fromSpill == null) { throw new RuntimeException( "BUG in Hybrid Hash Join: Spilling did not free a buffer."); } else { return fromSpill; } } }
3.68