name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
pulsar_OwnershipCache_tryAcquiringOwnership
/** * Method to get the current owner of the <code>NamespaceBundle</code> * or set the local broker as the owner if absent. * * @param bundle * the <code>NamespaceBundle</code> * @return The ephemeral node data showing the current ownership info in <code>ZooKeeper</code> * @throws Exception */ public CompletableFuture<NamespaceEphemeralData> tryAcquiringOwnership(NamespaceBundle bundle) throws Exception { if (!refreshSelfOwnerInfo()) { return FutureUtil.failedFuture( new RuntimeException("Namespace service is not ready for acquiring ownership")); } LOG.info("Trying to acquire ownership of {}", bundle); // Doing a get() on the ownedBundlesCache will trigger an async metadata write to acquire the lock over the // service unit return ownedBundlesCache.get(bundle) .thenApply(namespaceBundle -> { LOG.info("Successfully acquired ownership of {}", namespaceBundle); namespaceService.onNamespaceBundleOwned(bundle); return selfOwnerInfo; }); }
3.68
hbase_LogLevel_doSetLevel
/** * Send HTTP request to set log level. * @throws HadoopIllegalArgumentException if arguments are invalid. * @throws Exception if unable to connect */ private void doSetLevel() throws Exception { process(protocol + "://" + hostName + "/logLevel?log=" + className + "&level=" + level); }
3.68
rocketmq-connect_RmqSourceReplicator_ensureTargetTopic
/** * ensure target topic eixst. if target topic does not exist, ensureTopic will create target topic on target * cluster, with same TopicConfig but using target topic name. any exception will be caught and then throw * IllegalStateException. * * @param srcTopic * @param targetTopic * @throws RemotingException * @throws MQClientException * @throws InterruptedException */ public void ensureTargetTopic(String srcTopic, String targetTopic) throws RemotingException, MQClientException, InterruptedException { String srcCluster = this.replicatorConfig.getSrcCluster(); String targetCluster = this.replicatorConfig.getTargetCluster(); List<BrokerData> brokerList = Utils.examineBrokerData(this.srcMQAdminExt, srcTopic, srcCluster); if (brokerList.size() == 0) { throw new IllegalStateException(String.format("no broker found for srcTopic: %s srcCluster: %s", srcTopic, srcCluster)); } final TopicRouteData topicRouteData = this.srcMQAdminExt.examineTopicRouteInfo(srcTopic); final TopicConfig topicConfig = new TopicConfig(); final List<QueueData> queueDatas = topicRouteData.getQueueDatas(); QueueData queueData = queueDatas.get(0); topicConfig.setPerm(queueData.getPerm()); topicConfig.setReadQueueNums(queueData.getReadQueueNums()); topicConfig.setWriteQueueNums(queueData.getWriteQueueNums()); topicConfig.setTopicSysFlag(queueData.getTopicSysFlag()); topicConfig.setTopicName(targetTopic); Utils.createTopic(this.targetMQAdminExt, topicConfig, targetCluster); }
3.68
hadoop_OBSFileSystem_getCopyPartSize
/** * Return copy part size. * * @return copy part size */ long getCopyPartSize() { return copyPartSize; }
3.68
hadoop_SchedulerNodeReport_getAvailableResource
/** * @return the amount of resources currently available on the node */ public Resource getAvailableResource() { return avail; }
3.68
hadoop_OBSCommonUtils_newPutObjectRequest
/** * Create a {@link PutObjectRequest} request. The metadata is assumed to have * been configured with the size of the operation. * * @param owner the owner OBSFileSystem instance * @param key key of object * @param metadata metadata header * @param inputStream source data. * @return the request */ static PutObjectRequest newPutObjectRequest(final OBSFileSystem owner, final String key, final ObjectMetadata metadata, final InputStream inputStream) { Preconditions.checkNotNull(inputStream); PutObjectRequest putObjectRequest = new PutObjectRequest( owner.getBucket(), key, inputStream); putObjectRequest.setAcl(owner.getCannedACL()); putObjectRequest.setMetadata(metadata); if (owner.getSse().isSseCEnable()) { putObjectRequest.setSseCHeader(owner.getSse().getSseCHeader()); } else if (owner.getSse().isSseKmsEnable()) { putObjectRequest.setSseKmsHeader(owner.getSse().getSseKmsHeader()); } return putObjectRequest; }
3.68
flink_TaskStateSnapshot_isTaskFinished
/** Returns whether all the operators of the task have called finished methods. */ public boolean isTaskFinished() { return isTaskFinished; }
3.68
Activiti_BpmnParser_createParse
/** * Creates a new {@link BpmnParse} instance that can be used to parse only one BPMN 2.0 process definition. */ public BpmnParse createParse() { return bpmnParseFactory.createBpmnParse(this); }
3.68
flink_TypeInformation_isSortKeyType
/** * Checks whether this type can be used as a key for sorting. The order produced by sorting this * type must be meaningful. */ @PublicEvolving public boolean isSortKeyType() { return isKeyType(); }
3.68
hadoop_BlockBlobAppendStream_addBlockUploadCommand
/** * Prepare block upload command and queue the command in thread pool executor. */ private synchronized void addBlockUploadCommand() throws IOException { maybeThrowFirstError(); if (blobExist && lease.isFreed()) { throw new AzureException(String.format( "Attempting to upload a block on blob : %s " + " that does not have lease on the Blob. Failing upload", key)); } int blockSize = outBuffer.position(); if (blockSize > 0) { UploadCommand command = new UploadBlockCommand(generateBlockId(), outBuffer); activeBlockCommands.add(command); blobLength += blockSize; outBuffer = poolReadyByteBuffers.getBuffer(false, maxBlockSize.get()); ioThreadPool.execute(new WriteRequest(command)); } }
3.68
hbase_ReusableStreamGzipCodec_writeShort
/** re-implement because the relative method in jdk is invisible */ private void writeShort(int paramInt1, byte[] paramArrayOfByte, int paramInt2) throws IOException { paramArrayOfByte[paramInt2] = (byte) (paramInt1 & 0xFF); paramArrayOfByte[(paramInt2 + 1)] = (byte) (paramInt1 >> 8 & 0xFF); }
3.68
hbase_AuthUtil_isGroupPrincipal
/** * Returns whether or not the given name should be interpreted as a group principal. Currently * this simply checks if the name starts with the special group prefix character ("@"). */ @InterfaceAudience.Private public static boolean isGroupPrincipal(String name) { return name != null && name.startsWith(GROUP_PREFIX); }
3.68
framework_DefaultErrorHandler_findComponent
/** * Finds the nearest component by traversing upwards in the hierarchy. If * connector is a Component, that Component is returned. Otherwise, looks * upwards in the hierarchy until it finds a {@link Component}. * * @return A Component or null if no component was found */ public static Component findComponent(Connector connector) { if (connector instanceof Component) { return (Component) connector; } if (connector.getParent() != null) { return findComponent(connector.getParent()); } return null; }
3.68
hibernate-validator_ValidatorImpl_getValueContextForPropertyValidation
/** * Returns a value context pointing to the given property path relative to the specified root class for a given * value. * * @param validationContext The validation context. * @param propertyPath The property path for which constraints have to be collected. * * @return Returns an instance of {@code ValueContext} which describes the local validation context associated to * the given property path. */ private <V> BeanValueContext<?, V> getValueContextForPropertyValidation(BaseBeanValidationContext<?> validationContext, PathImpl propertyPath) { Class<?> clazz = validationContext.getRootBeanClass(); BeanMetaData<?> beanMetaData = validationContext.getRootBeanMetaData(); Object value = validationContext.getRootBean(); PropertyMetaData propertyMetaData = null; Iterator<Path.Node> propertyPathIter = propertyPath.iterator(); while ( propertyPathIter.hasNext() ) { // cast is ok, since we are dealing with engine internal classes NodeImpl propertyPathNode = (NodeImpl) propertyPathIter.next(); propertyMetaData = getBeanPropertyMetaData( beanMetaData, propertyPathNode ); // if the property is not the leaf property, we set up the context for the next iteration if ( propertyPathIter.hasNext() ) { if ( !propertyMetaData.isCascading() ) { throw LOG.getInvalidPropertyPathException( validationContext.getRootBeanClass(), propertyPath.asString() ); } // TODO which cascadable??? value = getCascadableValue( validationContext, value, propertyMetaData.getCascadables().iterator().next() ); if ( value == null ) { throw LOG.getUnableToReachPropertyToValidateException( validationContext.getRootBean(), propertyPath ); } clazz = value.getClass(); // if we are in the case of an iterable and we want to validate an element of this iterable, we have to get the // element value if ( propertyPathNode.isIterable() ) { propertyPathNode = (NodeImpl) propertyPathIter.next(); if ( propertyPathNode.getIndex() != null ) { value = ReflectionHelper.getIndexedValue( value, propertyPathNode.getIndex() ); } else if ( propertyPathNode.getKey() != null ) { value = ReflectionHelper.getMappedValue( value, propertyPathNode.getKey() ); } else { throw LOG.getPropertyPathMustProvideIndexOrMapKeyException(); } if ( value == null ) { throw LOG.getUnableToReachPropertyToValidateException( validationContext.getRootBean(), propertyPath ); } clazz = value.getClass(); beanMetaData = beanMetaDataManager.getBeanMetaData( clazz ); propertyMetaData = getBeanPropertyMetaData( beanMetaData, propertyPathNode ); } else { beanMetaData = beanMetaDataManager.getBeanMetaData( clazz ); } } } if ( propertyMetaData == null ) { // should only happen if the property path is empty, which should never happen throw LOG.getInvalidPropertyPathException( clazz, propertyPath.asString() ); } propertyPath.removeLeafNode(); return ValueContexts.getLocalExecutionContextForBean( validatorScopedContext.getParameterNameProvider(), value, beanMetaData, propertyPath ); }
3.68
querydsl_JTSGeometryExpression_difference
/** * Returns a geometric object that represents the Point * set difference of this geometric object with anotherGeometry. * * @param geometry other geometry * @return difference between this and the other geometry */ public JTSGeometryExpression<Geometry> difference(Expression<? extends Geometry> geometry) { return JTSGeometryExpressions.geometryOperation(SpatialOps.DIFFERENCE, mixin, geometry); }
3.68
zilla_HttpServerFactory_serverHeader
// Checks if response has server header private void serverHeader( HttpHeaderFW header) { serverHeader |= header.name().value().equals(context.nameBuffer(54)); }
3.68
morf_MergeStatement_from
/** * Specifies the select statement to use as a source of the data. * * @param statement the source statement. * @return a statement with the changes applied. */ public MergeStatement from(SelectStatement statement) { if (AliasedField.immutableDslEnabled()) { return shallowCopy().from(statement).build(); } else { if (statement.getOrderBys().size() != 0) { throw new IllegalArgumentException("ORDER BY is not permitted in the SELECT part of a merge statement (SQL Server limitation)"); } this.selectStatement = statement; return this; } }
3.68
Activiti_Activiti_inboundGateway
/** * This is the component that you'll use in your Spring Integration * {@link org.springframework.integration.dsl.IntegrationFlow}. */ public static ActivitiInboundGateway inboundGateway(ProcessEngine processEngine, String... varsToPreserve) { return new ActivitiInboundGateway(processEngine, varsToPreserve); }
3.68
pulsar_BrokerInterceptor_onFilter
/** * The interception of web processing, as same as `Filter.onFilter`. * So In this method, we must call `chain.doFilter` to continue the chain. */ default void onFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { // Just continue the chain by default. chain.doFilter(request, response); }
3.68
hbase_RegionServerAccounting_isAboveHighWaterMark
/** * Return the FlushType if we are above the memstore high water mark * @return the FlushType */ public FlushType isAboveHighWaterMark() { // for onheap memstore we check if the global memstore size and the // global heap overhead is greater than the global memstore limit if (memType == MemoryType.HEAP) { if (getGlobalMemStoreHeapSize() >= globalMemStoreLimit) { return FlushType.ABOVE_ONHEAP_HIGHER_MARK; } } else { // If the configured memstore is offheap, check for two things // 1) If the global memstore off-heap size is greater than the configured // 'hbase.regionserver.offheap.global.memstore.size' // 2) If the global memstore heap size is greater than the configured onheap // global memstore limit 'hbase.regionserver.global.memstore.size'. // We do this to avoid OOME incase of scenarios where the heap is occupied with // lot of onheap references to the cells in memstore if (getGlobalMemStoreOffHeapSize() >= globalMemStoreLimit) { // Indicates that global memstore size is above the configured // 'hbase.regionserver.offheap.global.memstore.size' return FlushType.ABOVE_OFFHEAP_HIGHER_MARK; } else if (getGlobalMemStoreHeapSize() >= this.globalOnHeapMemstoreLimit) { // Indicates that the offheap memstore's heap overhead is greater than the // configured 'hbase.regionserver.global.memstore.size'. return FlushType.ABOVE_ONHEAP_HIGHER_MARK; } } return FlushType.NORMAL; }
3.68
flink_PrioritizedDeque_poll
/** * Polls the first priority element or non-priority element if the former does not exist. * * @return the first element or null. */ @Nullable public T poll() { final T polled = deque.poll(); if (polled != null && numPriorityElements > 0) { numPriorityElements--; } return polled; }
3.68
framework_VaadinSession_getPendingAccessQueue
/** * Gets the queue of tasks submitted using {@link #access(Runnable)}. It is * safe to call this method and access the returned queue without holding * the {@link #lock() session lock}. * * @since 7.1 * * @return the queue of pending access tasks */ public Queue<FutureAccess> getPendingAccessQueue() { return pendingAccessQueue; }
3.68
hudi_SparkInternalSchemaConverter_convertDoubleType
/** * Convert double type to other Type. * Now only support Double -> Decimal/String * TODO: support more types */ private static boolean convertDoubleType(WritableColumnVector oldV, WritableColumnVector newV, DataType newType, int len) { if (newType instanceof DecimalType || newType instanceof StringType) { for (int i = 0; i < len; i++) { if (oldV.isNullAt(i)) { newV.putNull(i); continue; } // double -> decimal/string if (newType instanceof DecimalType) { Decimal oldDecimal = Decimal.apply(oldV.getDouble(i)); oldDecimal.changePrecision(((DecimalType) newType).precision(), ((DecimalType) newType).scale()); newV.putDecimal(i, oldDecimal, ((DecimalType) newType).precision()); } else if (newType instanceof StringType) { newV.putByteArray(i, getUTF8Bytes(oldV.getDouble(i) + "")); } } return true; } return false; }
3.68
querydsl_ExpressionUtils_all
/** * Create a {@code all col} expression * * @param col subquery expression * @return all col */ @SuppressWarnings("unchecked") public static <T> Expression<T> all(SubQueryExpression<? extends T> col) { return new OperationImpl<T>(col.getType(), Ops.QuantOps.ALL, Collections.singletonList(col)); }
3.68
flink_StaticResultProvider_rowToInternalRow
/** This function supports only String, long, int and boolean fields. */ @VisibleForTesting static RowData rowToInternalRow(Row row) { Object[] values = new Object[row.getArity()]; for (int i = 0; i < row.getArity(); i++) { Object value = row.getField(i); if (value == null) { values[i] = null; } else if (value instanceof String) { values[i] = StringData.fromString((String) value); } else if (value instanceof Boolean || value instanceof Long || value instanceof Integer) { values[i] = value; } else { throw new TableException("Cannot convert row type"); } } return GenericRowData.of(values); }
3.68
hadoop_ConfiguredNodeLabels_setLabelsByQueue
/** * Set node labels for a specific queue. * @param queuePath path of the queue * @param nodeLabels configured node labels to set */ public void setLabelsByQueue( String queuePath, Collection<String> nodeLabels) { configuredNodeLabelsByQueue.put(queuePath, new HashSet<>(nodeLabels)); }
3.68
flink_KeyedStream_sideOutputLeftLateData
/** * Send late arriving left-side data to the side output identified by the given {@link * OutputTag}. Data is considered late after the watermark */ @PublicEvolving public IntervalJoined<IN1, IN2, KEY> sideOutputLeftLateData(OutputTag<IN1> outputTag) { outputTag = left.getExecutionEnvironment().clean(outputTag); this.leftLateDataOutputTag = outputTag; return this; }
3.68
hadoop_ExternalSPSBeanMetrics_close
/** * Unregister the JMX interfaces. */ public void close() { if (externalSPSBeanName != null) { MBeans.unregister(externalSPSBeanName); externalSPSBeanName = null; } }
3.68
hadoop_XException_getError
/** * Returns the error code of the exception. * * @return the error code of the exception. */ public ERROR getError() { return error; }
3.68
hbase_HDFSBlocksDistribution_getBlocksLocalWithSsdWeight
/** * Get the blocks local weight with ssd for a given host * @param host the host name * @return the blocks local with ssd weight of the given host */ public long getBlocksLocalWithSsdWeight(String host) { return getBlocksLocalityWeightInternal(host, HostAndWeight::getWeightForSsd); }
3.68
hadoop_StageConfig_withStageEventCallbacks
/** * Set handler for stage entry events.. * @param value new value * @return this */ public StageConfig withStageEventCallbacks(StageEventCallbacks value) { checkOpen(); enterStageEventHandler = value; return this; }
3.68
flink_MapView_iterator
/** * Returns an iterator over all entries of the map view. * * @return An iterator over all the mappings in the map. * @throws Exception Thrown if the system cannot access the map. */ public Iterator<Map.Entry<K, V>> iterator() throws Exception { return map.entrySet().iterator(); }
3.68
flink_UnsortedGrouping_minBy
/** * Applies a special case of a reduce transformation (minBy) on a grouped {@link DataSet}. * * <p>The transformation consecutively calls a {@link ReduceFunction} until only a single * element remains which is the result of the transformation. A ReduceFunction combines two * elements into one new element of the same type. * * @param fields Keys taken into account for finding the minimum. * @return A {@link ReduceOperator} representing the minimum. */ @SuppressWarnings({"unchecked", "rawtypes"}) public ReduceOperator<T> minBy(int... fields) { // Check for using a tuple if (!this.inputDataSet.getType().isTupleType() || !(this.inputDataSet.getType() instanceof TupleTypeInfo)) { throw new InvalidProgramException("Method minBy(int) only works on tuples."); } return new ReduceOperator<T>( this, new SelectByMinFunction((TupleTypeInfo) this.inputDataSet.getType(), fields), Utils.getCallLocationName()); }
3.68
framework_ConnectorTracker_markClean
/** * Mark the connector as clean. * * @param connector * The connector that should be marked clean. */ public void markClean(ClientConnector connector) { if (fineLogging && dirtyConnectors.contains(connector)) { getLogger().log(Level.FINE, "{0} is no longer dirty", getConnectorAndParentInfo(connector)); } dirtyConnectors.remove(connector); }
3.68
hudi_HoodieRecord_read
/** * NOTE: This method is declared final to make sure there's no polymorphism and therefore * JIT compiler could perform more aggressive optimizations */ @Override public final void read(Kryo kryo, Input input) { this.key = kryo.readObjectOrNull(input, HoodieKey.class); this.operation = kryo.readObjectOrNull(input, HoodieOperation.class); this.currentLocation = (HoodieRecordLocation) kryo.readClassAndObject(input); this.newLocation = (HoodieRecordLocation) kryo.readClassAndObject(input); // NOTE: Reading out actual record payload is relegated to the actual // implementation this.data = readRecordPayload(kryo, input); // NOTE: We're always seal object after deserialization this.sealed = true; }
3.68
morf_SpreadsheetDataSetProducer_createTranslationRecord
/** * Creates the collection of translation records from a given set of * translations. * * @param id ID of the translation record * @param translation Translation string to create * @return the record representing the translation */ private Record createTranslationRecord(final int id, final String translation) { final RecordBuilder record = DataSetUtils.record(); record.setString("translationText", translation); final Date now = new Date(); record.setString("changeDate", new SimpleDateFormat("yyyyMMdd").format(now)); record.setString("changedTime", new SimpleDateFormat("hhmmss").format(now)); record.setInteger("localeSequenceNumber", 1); // Assume locale 1 for translations on initial upload record.setInteger("translationSequenceNumber", id); record.setInteger("translationId", id); record.setInteger("id", id); return record; }
3.68
graphhopper_PrepareLandmarks_setAreaIndex
/** * @see LandmarkStorage#setAreaIndex(AreaIndex) */ public PrepareLandmarks setAreaIndex(AreaIndex<SplitArea> areaIndex) { lms.setAreaIndex(areaIndex); return this; }
3.68
hbase_ModifyRegionUtils_getRegionOpenAndInitThreadPool
/* * used by createRegions() to get the thread pool executor based on the * "hbase.hregion.open.and.init.threads.max" property. */ static ThreadPoolExecutor getRegionOpenAndInitThreadPool(final Configuration conf, final String threadNamePrefix, int regionNumber) { int maxThreads = Math.min(regionNumber, conf.getInt("hbase.hregion.open.and.init.threads.max", 16)); ThreadPoolExecutor regionOpenAndInitThreadPool = Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, new ThreadFactoryBuilder().setNameFormat(threadNamePrefix + "-pool-%d") .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); return regionOpenAndInitThreadPool; }
3.68
hbase_MergeTableRegionsProcedure_postCompletedMergeRegions
/** * Post merge region action * @param env MasterProcedureEnv **/ private void postCompletedMergeRegions(final MasterProcedureEnv env) throws IOException { final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); if (cpHost != null) { cpHost.postCompletedMergeRegionsAction(regionsToMerge, mergedRegion, getUser()); } }
3.68
hbase_CleanerChore_isEmptyDirDeletable
/** * Check if a empty directory with no subdirs or subfiles can be deleted * @param dir Path of the directory * @return True if the directory can be deleted, otherwise false */ private boolean isEmptyDirDeletable(Path dir) { for (T cleaner : cleanersChain) { if (cleaner.isStopped() || this.getStopper().isStopped()) { LOG.warn("A file cleaner {} is stopped, won't delete the empty directory {}", this.getName(), dir); return false; } if (!cleaner.isEmptyDirDeletable(dir)) { // If one of the cleaner need the empty directory, skip delete it return false; } } return true; }
3.68
graphhopper_ReferentialIntegrityError_compareTo
/** must be comparable to put into mapdb */ @Override public int compareTo (GTFSError o) { int compare = super.compareTo(o); if (compare != 0) return compare; return this.badReference.compareTo((((ReferentialIntegrityError) o).badReference)); }
3.68
framework_SQLContainer_indexInModifiedCache
/** * Returns the index of the item with the given itemId for the modified * cache. * * @param itemId * @return the index of the item with the itemId in the modified cache. Or * -1 if not found. */ private int indexInModifiedCache(Object itemId) { for (int ix = 0; ix < modifiedItems.size(); ix++) { RowItem item = modifiedItems.get(ix); if (item.getId().equals(itemId)) { return ix; } } return -1; }
3.68
hadoop_PathCapabilitiesSupport_validatePathCapabilityArgs
/** * Validate the arguments to * {@link PathCapabilities#hasPathCapability(Path, String)}. * @param path path to query the capability of. * @param capability non-null, non-empty string to query the path for support. * @return the string to use in a switch statement. * @throws IllegalArgumentException if a an argument is invalid. */ public static String validatePathCapabilityArgs( final Path path, final String capability) { checkArgument(path != null, "null path"); checkArgument(capability != null, "capability parameter is null"); checkArgument(!capability.isEmpty(), "capability parameter is empty string"); return capability.toLowerCase(Locale.ENGLISH); }
3.68
hadoop_OBSFileSystem_rename
/** * Rename Path src to Path dst. * * @param src path to be renamed * @param dst new path after rename * @return true if rename is successful * @throws IOException on IO failure */ @Override public boolean rename(final Path src, final Path dst) throws IOException { long startTime = System.currentTimeMillis(); long threadId = Thread.currentThread().getId(); LOG.debug("Rename path {} to {} start", src, dst); try { if (enablePosix) { return OBSPosixBucketUtils.renameBasedOnPosix(this, src, dst); } else { return OBSObjectBucketUtils.renameBasedOnObject(this, src, dst); } } catch (ObsException e) { throw OBSCommonUtils.translateException( "rename(" + src + ", " + dst + ")", src, e); } catch (RenameFailedException e) { LOG.error(e.getMessage()); return e.getExitCode(); } catch (FileNotFoundException e) { LOG.error(e.toString()); return false; } finally { long endTime = System.currentTimeMillis(); LOG.debug( "Rename path {} to {} finished, thread:{}, " + "timeUsedInMilliSec:{}.", src, dst, threadId, endTime - startTime); } }
3.68
flink_MultipleParameterTool_getFlatMapOfData
/** * Get the flat map of the multiple map data. If the key have multiple values, only the last one * will be used. This is also the current behavior when multiple parameters is specified for * {@link ParameterTool}. * * @param data multiple map of data. * @return flat map of data. */ private static Map<String, String> getFlatMapOfData(Map<String, Collection<String>> data) { return data.entrySet().stream() .collect( Collectors.toMap( Map.Entry::getKey, e -> { if (e.getValue().size() > 0) { return (String) e.getValue().toArray()[e.getValue().size() - 1]; } else { return NO_VALUE_KEY; } })); }
3.68
hudi_RequestHandler_shouldThrowExceptionIfLocalViewBehind
/** * Determine whether to throw an exception when local view of table's timeline is behind that of client's view. */ private boolean shouldThrowExceptionIfLocalViewBehind(HoodieTimeline localTimeline, String timelineHashFromClient) { Option<HoodieInstant> lastInstant = localTimeline.lastInstant(); // When performing async clean, we may have one more .clean.completed after lastInstantTs. // In this case, we do not need to throw an exception. return !lastInstant.isPresent() || !lastInstant.get().getAction().equals(HoodieTimeline.CLEAN_ACTION) || !localTimeline.findInstantsBefore(lastInstant.get().getTimestamp()).getTimelineHash().equals(timelineHashFromClient); }
3.68
hbase_KeyValue_getTimestampOffset
/** Return the timestamp offset */ private int getTimestampOffset(final int keylength) { return getKeyOffset() + keylength - TIMESTAMP_TYPE_SIZE; }
3.68
framework_VComboBox_highlightSelectedItem
/** * Highlight (select) an item matching the current text box content * without triggering its action. */ public void highlightSelectedItem() { int p = getItems().size(); // first check if there is a key match to handle items with // identical captions String currentKey = currentSuggestion != null ? currentSuggestion.getOptionKey() : ""; for (int i = 0; i < p; i++) { final MenuItem potentialExactMatch = getItems().get(i); if (currentKey.equals(getSuggestionKey(potentialExactMatch)) && tb.getText().equals(potentialExactMatch.getText())) { selectItem(potentialExactMatch); tb.setSelectionRange(tb.getText().length(), 0); return; } } // then check for exact string match in menu String text = tb.getText(); for (int i = 0; i < p; i++) { final MenuItem potentialExactMatch = getItems().get(i); if (potentialExactMatch.getText().equals(text)) { selectItem(potentialExactMatch); tb.setSelectionRange(tb.getText().length(), 0); return; } } }
3.68
framework_Color_getHSV
/** * Returns converted HSV components of the color. * */ public float[] getHSV() { float[] hsv = new float[3]; int maxColor = (red > green) ? red : green; if (blue > maxColor) { maxColor = blue; } int minColor = (red < green) ? red : green; if (blue < minColor) { minColor = blue; } float value = maxColor / 255.0f; float saturation = 0; if (maxColor != 0) { saturation = ((float) (maxColor - minColor)) / ((float) maxColor); } float hue = 0; if (saturation != 0) { float redF = ((float) (maxColor - red)) / ((float) (maxColor - minColor)); float greenF = ((float) (maxColor - green)) / ((float) (maxColor - minColor)); float blueF = ((float) (maxColor - blue)) / ((float) (maxColor - minColor)); if (red == maxColor) { hue = blueF - greenF; } else if (green == maxColor) { hue = 2.0f + redF - blueF; } else { hue = 4.0f + greenF - redF; } hue = hue / 6.0f; if (hue < 0) { hue = hue + 1.0f; } } hsv[0] = hue; hsv[1] = saturation; hsv[2] = value; return hsv; }
3.68
flink_PushCalcPastChangelogNormalizeRule_pushCalcThroughChangelogNormalize
/** * Pushes {@param primaryKeyPredicates} and used fields project into the {@link * StreamPhysicalChangelogNormalize}. */ private StreamPhysicalChangelogNormalize pushCalcThroughChangelogNormalize( RelOptRuleCall call, List<RexNode> primaryKeyPredicates, int[] usedInputFields) { final StreamPhysicalChangelogNormalize changelogNormalize = call.rel(1); final StreamPhysicalExchange exchange = call.rel(2); final Set<Integer> primaryKeyIndices = IntStream.of(changelogNormalize.uniqueKeys()).boxed().collect(Collectors.toSet()); if (primaryKeyPredicates.isEmpty() && usedInputFields.length == changelogNormalize.getRowType().getFieldCount()) { // There are no filters and no project which can be pushed, so just return the existing // node. return changelogNormalize; } final StreamPhysicalCalc pushedCalc = projectUsedFieldsWithConditions( call.builder(), exchange.getInput(), primaryKeyPredicates, usedInputFields); // build input field reference from old field index to new field index final Map<Integer, Integer> inputRefMapping = buildFieldsMapping(usedInputFields); final List<Integer> newPrimaryKeyIndices = primaryKeyIndices.stream().map(inputRefMapping::get).collect(Collectors.toList()); final FlinkRelDistribution newDistribution = FlinkRelDistribution.hash(newPrimaryKeyIndices, true); final RelTraitSet newTraitSet = exchange.getTraitSet().replace(newDistribution); final StreamPhysicalExchange newExchange = exchange.copy(newTraitSet, pushedCalc, newDistribution); return (StreamPhysicalChangelogNormalize) changelogNormalize.copy( changelogNormalize.getTraitSet(), newExchange, newPrimaryKeyIndices.stream().mapToInt(Integer::intValue).toArray()); }
3.68
hbase_StoreFileWriter_appendMetadata
/** * Writes meta data. Call before {@link #close()} since its written as meta data to this file. * @param maxSequenceId Maximum sequence id. * @param majorCompaction True if this file is product of a major compaction * @param mobCellsCount The number of mob cells. * @throws IOException problem writing to FS */ public void appendMetadata(final long maxSequenceId, final boolean majorCompaction, final long mobCellsCount) throws IOException { writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId)); writer.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(majorCompaction)); writer.appendFileInfo(MOB_CELLS_COUNT, Bytes.toBytes(mobCellsCount)); appendTrackedTimestampsToMetadata(); }
3.68
hadoop_DecayRpcSchedulerDetailedMetrics_addProcessingTime
/** * Instrument a Call processing time based on its priority. * * @param priority of the RPC call * @param processingTime of the RPC call in the queue of the priority */ public void addProcessingTime(int priority, long processingTime) { rpcProcessingRates.add(processingNamesForLevels[priority], processingTime); }
3.68
hbase_TimeoutExceptionInjector_trigger
/** * Trigger the timer immediately. * <p> * Exposed for testing. */ public void trigger() { synchronized (timerTask) { if (this.complete) { LOG.warn("Timer already completed, not triggering."); return; } LOG.debug("Triggering timer immediately!"); this.timer.cancel(); this.timerTask.run(); } }
3.68
hadoop_StartupProgressServlet_writeStringFieldIfNotNull
/** * Writes a JSON string field only if the value is non-null. * * @param json JsonGenerator to receive output * @param key String key to put * @param value String value to put * @throws IOException if there is an I/O error */ private static void writeStringFieldIfNotNull(JsonGenerator json, String key, String value) throws IOException { if (value != null) { json.writeStringField(key, value); } }
3.68
framework_Table_getPropertyValue
/** * Gets the value of property. * * By default if the table is editable the fieldFactory is used to create * editors for table cells. Otherwise formatPropertyValue is used to format * the value representation. * * @param rowId * the Id of the row (same as item Id). * @param colId * the Id of the column. * @param property * the Property to be presented. * @return Object Either formatted value or Component for field. * @see #setTableFieldFactory(TableFieldFactory) */ protected Object getPropertyValue(Object rowId, Object colId, Property property) { if (isEditable() && fieldFactory != null) { final Field<?> f = fieldFactory .createField(getContainerDataSource(), rowId, colId, this); if (f != null) { // Remember that we have made this association so we can remove // it when the component is removed associatedProperties.put(f, property); bindPropertyToField(rowId, colId, property, f); return f; } } return formatPropertyValue(rowId, colId, property); }
3.68
hudi_Pipelines_bootstrap
/** * Constructs bootstrap pipeline. * The bootstrap operator loads the existing data index (primary key to file id mapping), * then send the indexing data set to subsequent operator(usually the bucket assign operator). * * @param conf The configuration * @param rowType The row type * @param dataStream The data stream * @param bounded Whether the source is bounded * @param overwrite Whether it is insert overwrite */ public static DataStream<HoodieRecord> bootstrap( Configuration conf, RowType rowType, DataStream<RowData> dataStream, boolean bounded, boolean overwrite) { final boolean globalIndex = conf.getBoolean(FlinkOptions.INDEX_GLOBAL_ENABLED); if (overwrite || OptionsResolver.isBucketIndexType(conf)) { return rowDataToHoodieRecord(conf, rowType, dataStream); } else if (bounded && !globalIndex && OptionsResolver.isPartitionedTable(conf)) { return boundedBootstrap(conf, rowType, dataStream); } else { return streamBootstrap(conf, rowType, dataStream, bounded); } }
3.68
pulsar_KeyValueSchemaImpl_of
/** * Key Value Schema using passed in schema type, support JSON and AVRO currently. */ public static <K, V> Schema<KeyValue<K, V>> of(Class<K> key, Class<V> value, SchemaType type) { checkArgument(SchemaType.JSON == type || SchemaType.AVRO == type); if (SchemaType.JSON == type) { return new KeyValueSchemaImpl<>(JSONSchema.of(key), JSONSchema.of(value), KeyValueEncodingType.INLINE); } else { // AVRO return new KeyValueSchemaImpl<>(AvroSchema.of(key), AvroSchema.of(value), KeyValueEncodingType.INLINE); } }
3.68
dubbo_GlobalResourcesRepository_registerDisposable
/** * Register a one-off disposable, the disposable is removed automatically on first shutdown. * * @param disposable */ public void registerDisposable(Disposable disposable) { if (!oneoffDisposables.contains(disposable)) { synchronized (this) { if (!oneoffDisposables.contains(disposable)) { oneoffDisposables.add(disposable); } } } }
3.68
morf_SqlQueryDataSetProducer_getSchema
/** * Returns a {@link Schema} containing information about the {@link Table} * associated with this {@link ResultSet}. */ @Override public Schema getSchema() { return schema; }
3.68
flink_StringUtils_readNullableString
/** * Reads a String from the given input. The string may be null and must have been written with * {@link #writeNullableString(String, DataOutputView)}. * * @param in The input to read from. * @return The deserialized string, or null. * @throws IOException Thrown, if the reading or the deserialization fails. */ public static @Nullable String readNullableString(DataInputView in) throws IOException { if (in.readBoolean()) { return readString(in); } else { return null; } }
3.68
dubbo_IOUtils_read
/** * read string. * * @param reader Reader instance. * @return String. * @throws IOException If an I/O error occurs */ public static String read(Reader reader) throws IOException { try (StringWriter writer = new StringWriter()) { write(reader, writer); return writer.getBuffer().toString(); } }
3.68
AreaShop_Utils_combinedMessage
/** * Create a message with a list of parts. * @param replacements The parts to use * @param messagePart The message to use for the parts * @param combiner The string to use as combiner * @return A Message object containing the parts combined into one message */ public static Message combinedMessage(Collection<?> replacements, String messagePart, String combiner) { Message result = Message.empty(); boolean first = true; for(Object part : replacements) { if(first) { first = false; } else { result.append(combiner); } result.append(Message.fromKey(messagePart).replacements(part)); } return result; }
3.68
flink_FlinkRelMetadataQuery_getColumnNullCount
/** * Returns the null count of the given column. * * @param rel the relational expression * @param index the index of the given column * @return the null count of the given column if can be estimated, else return null. */ public Double getColumnNullCount(RelNode rel, int index) { for (; ; ) { try { return columnNullCountHandler.getColumnNullCount(rel, this, index); } catch (JaninoRelMetadataProvider.NoHandler e) { columnNullCountHandler = revise(e.relClass, FlinkMetadata.ColumnNullCount.DEF); } } }
3.68
hudi_HoodieTableConfig_getRecordKeyFieldProp
/** * @returns the record key field prop. */ public String getRecordKeyFieldProp() { return getStringOrDefault(RECORDKEY_FIELDS, HoodieRecord.RECORD_KEY_METADATA_FIELD); }
3.68
hadoop_ResourceEstimatorServer_startResourceEstimatorServer
/** * Start embedded Hadoop HTTP server. * * @return an instance of the started HTTP server. * @throws IOException in case there is an error while starting server. */ static ResourceEstimatorServer startResourceEstimatorServer() throws IOException, InterruptedException { Configuration config = new YarnConfiguration(); config.addResource(ResourceEstimatorConfiguration.CONFIG_FILE); ResourceEstimatorServer resourceEstimatorServer = null; try { resourceEstimatorServer = new ResourceEstimatorServer(); ShutdownHookManager.get().addShutdownHook( new CompositeServiceShutdownHook(resourceEstimatorServer), 30); resourceEstimatorServer.init(config); resourceEstimatorServer.start(); } catch (Throwable t) { LOGGER.error("Error starting ResourceEstimatorServer", t); } return resourceEstimatorServer; }
3.68
hudi_BaseHoodieWriteClient_preWrite
/** * Common method containing steps to be performed before write (upsert/insert/... * @param instantTime * @param writeOperationType * @param metaClient */ public void preWrite(String instantTime, WriteOperationType writeOperationType, HoodieTableMetaClient metaClient) { setOperationType(writeOperationType); this.lastCompletedTxnAndMetadata = txnManager.isLockRequired() ? TransactionUtils.getLastCompletedTxnInstantAndMetadata(metaClient) : Option.empty(); this.pendingInflightAndRequestedInstants = TransactionUtils.getInflightAndRequestedInstants(metaClient); this.pendingInflightAndRequestedInstants.remove(instantTime); tableServiceClient.setPendingInflightAndRequestedInstants(this.pendingInflightAndRequestedInstants); tableServiceClient.startAsyncCleanerService(this); tableServiceClient.startAsyncArchiveService(this); }
3.68
hbase_AsyncScanSingleRegionRpcRetryingCaller_destroy
// return the current state, and set the state to DESTROYED. ScanControllerState destroy() { ScanControllerState state = this.state; this.state = ScanControllerState.DESTROYED; return state; }
3.68
graphhopper_CarAverageSpeedParser_applyMaxSpeed
/** * @param way needed to retrieve tags * @param speed speed guessed e.g. from the road type or other tags * @return The assumed speed. */ protected double applyMaxSpeed(ReaderWay way, double speed, boolean bwd) { double maxSpeed = getMaxSpeed(way, bwd); return Math.min(140, isValidSpeed(maxSpeed) ? Math.max(1, maxSpeed * 0.9) : speed); }
3.68
hbase_MetaTableAccessor_getRegionLocation
/** * Returns the HRegionLocation from meta for the given region * @param connection connection we're using * @param regionInfo region information * @return HRegionLocation for the given region */ public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo) throws IOException { return CatalogFamilyFormat.getRegionLocation(getCatalogFamilyRow(connection, regionInfo), regionInfo, regionInfo.getReplicaId()); }
3.68
querydsl_MetaDataExporter_setNameSuffix
/** * Override the name suffix for the classes (default: "") * * @param nameSuffix name suffix for querydsl-types (default: "") */ public void setNameSuffix(String nameSuffix) { module.bind(CodegenModule.SUFFIX, nameSuffix); }
3.68
framework_Slider_getOrientation
/** * Gets the current orientation of the slider (horizontal or vertical). * * @return {@link SliderOrientation#HORIZONTAL} or * {@link SliderOrientation#VERTICAL} */ public SliderOrientation getOrientation() { return getState(false).orientation; }
3.68
hadoop_FedAppReportFetcher_getApplicationReport
/** * Get an application report for the specified application id from the RM and * fall back to the Application History Server if not found in RM. * * @param appId id of the application to get. * @return the ApplicationReport for the appId. * @throws YarnException on any error. * @throws IOException connection exception. */ @Override public FetchedAppReport getApplicationReport(ApplicationId appId) throws YarnException, IOException { SubClusterId scid = federationFacade.getApplicationHomeSubCluster(appId); createSubclusterIfAbsent(scid); ApplicationClientProtocol applicationsManager = subClusters.get(scid).getRight(); return super.getApplicationReport(applicationsManager, appId); }
3.68
dubbo_ReferenceConfig_createAsyncMethodInfo
/** * convert and aggregate async method info * * @return Map<String, AsyncMethodInfo> */ private Map<String, AsyncMethodInfo> createAsyncMethodInfo() { Map<String, AsyncMethodInfo> attributes = null; if (CollectionUtils.isNotEmpty(getMethods())) { attributes = new HashMap<>(16); for (MethodConfig methodConfig : getMethods()) { AsyncMethodInfo asyncMethodInfo = methodConfig.convertMethodConfig2AsyncInfo(); if (asyncMethodInfo != null) { attributes.put(methodConfig.getName(), asyncMethodInfo); } } } return attributes; }
3.68
hadoop_JournalNodeRpcServer_getRpcServer
/** Allow access to the RPC server for testing. */ @VisibleForTesting Server getRpcServer() { return server; }
3.68
framework_BrowserInfo_isBrowserVersionNewerOrEqual
/** * Checks if the browser version is newer or equal to the given major+minor * version. * * @param majorVersion * The major version to check for * @param minorVersion * The minor version to check for * @return true if the browser version is newer or equal to the given * version */ public boolean isBrowserVersionNewerOrEqual(int majorVersion, int minorVersion) { if (getBrowserMajorVersion() == majorVersion) { // Same major return (getBrowserMinorVersion() >= minorVersion); } // Older or newer major return (getBrowserMajorVersion() > majorVersion); }
3.68
framework_ContainerEventProvider_getDescriptionProperty
/** * Get the property which provides the description of the event. */ public Object getDescriptionProperty() { return descriptionProperty; }
3.68
hudi_CollectionUtils_diffSet
/** * Returns difference b/w {@code one} {@link Collection} of elements and {@code another} * The elements in collection {@code one} are also duplicated and returned as a {@link Set}. */ public static <E> Set<E> diffSet(Collection<E> one, Set<E> another) { Set<E> diff = new HashSet<>(one); diff.removeAll(another); return diff; }
3.68
framework_Payload_getPayloadString
/** * Returns the string representation of this payload. It is used as the data * type in the {@code DataTransfer} object. * * @return the string representation of this payload */ public String getPayloadString() { return ITEM_PREFIX + ":" + valueType.name().toLowerCase(Locale.ROOT) + ":" + key + ":" + value; }
3.68
framework_WebBrowser_getBrowserApplication
/** * Get the browser user-agent string. * * @return The raw browser userAgent string */ public String getBrowserApplication() { return browserApplication; }
3.68
hbase_FavoredNodesPlan_getAssignmentMap
/** * Return the mapping between each region to its favored region server list. */ public Map<String, List<ServerName>> getAssignmentMap() { // Make a deep copy so changes don't harm our copy of favoredNodesMap. return this.favoredNodesMap.entrySet().stream() .collect(Collectors.toMap(k -> k.getKey(), v -> new ArrayList<ServerName>(v.getValue()))); }
3.68
flink_WindowedStream_maxBy
/** * Applies an aggregation that gives the maximum element of the pojo data stream by the given * field expression for every window. A field expression is either the name of a public field or * a getter method with parentheses of the {@link DataStream}S underlying type. A dot can be * used to drill down into objects, as in {@code "field1.getInnerField2()" }. * * @param field The field expression based on which the aggregation will be applied. * @param first If True then in case of field equality the first object will be returned * @return The transformed DataStream. */ public SingleOutputStreamOperator<T> maxBy(String field, boolean first) { return aggregate( new ComparableAggregator<>( field, input.getType(), AggregationFunction.AggregationType.MAXBY, first, input.getExecutionConfig())); }
3.68
flink_CheckpointStatsHistory_replacePendingCheckpointById
/** * Searches for the in progress checkpoint with the given ID and replaces it with the given * completed or failed checkpoint. * * <p>This is bounded by the maximum number of concurrent in progress checkpointsArray, which * means that the runtime of this is constant. * * @param completedOrFailed The completed or failed checkpoint to replace the in progress * checkpoint with. * @return <code>true</code> if the checkpoint was replaced or <code>false</code> otherwise. */ boolean replacePendingCheckpointById(AbstractCheckpointStats completedOrFailed) { checkArgument( !completedOrFailed.getStatus().isInProgress(), "Not allowed to replace with in progress checkpoints."); if (readOnly) { throw new UnsupportedOperationException( "Can't create a snapshot of a read-only history."); } // Update the latest checkpoint stats if (completedOrFailed.getStatus().isCompleted()) { CompletedCheckpointStats completed = (CompletedCheckpointStats) completedOrFailed; if (completed.getProperties().isSavepoint() && (latestSavepoint == null || completed.getCheckpointId() > latestSavepoint.getCheckpointId())) { latestSavepoint = completed; } else if (latestCompletedCheckpoint == null || completed.getCheckpointId() > latestCompletedCheckpoint.getCheckpointId()) { latestCompletedCheckpoint = completed; } } else if (completedOrFailed.getStatus().isFailed()) { FailedCheckpointStats failed = (FailedCheckpointStats) completedOrFailed; if (latestFailedCheckpoint == null || failed.getCheckpointId() > latestFailedCheckpoint.getCheckpointId()) { latestFailedCheckpoint = failed; } } if (maxSize == 0) { return false; } long checkpointId = completedOrFailed.getCheckpointId(); recentCheckpoints.computeIfPresent( checkpointId, (unusedKey, unusedValue) -> completedOrFailed); // We start searching from the last inserted position. Since the entries // wrap around the array we search until we are at index 0 and then from // the end of the array until (start pos + 1). int startPos = nextPos == checkpointsArray.length ? checkpointsArray.length - 1 : nextPos - 1; for (int i = startPos; i >= 0; i--) { if (checkpointsArray[i].getCheckpointId() == checkpointId) { checkpointsArray[i] = completedOrFailed; return true; } } for (int i = checkpointsArray.length - 1; i > startPos; i--) { if (checkpointsArray[i].getCheckpointId() == checkpointId) { checkpointsArray[i] = completedOrFailed; return true; } } return false; }
3.68
hbase_ServerManager_removeServerFromDrainList
/* * Remove the server from the drain list. */ public synchronized boolean removeServerFromDrainList(final ServerName sn) { // Warn if the server (sn) is not online. ServerName is of the form: // <hostname> , <port> , <startcode> if (!this.isServerOnline(sn)) { LOG.warn("Server " + sn + " is not currently online. " + "Removing from draining list anyway, as requested."); } // Remove the server from the draining servers lists. return this.drainingServers.remove(sn); }
3.68
morf_Function_length
/** * Helper method to create an instance of the "length" SQL function. * * @param fieldToEvaluate the field to evaluate in the length function. This can be any expression resulting in a single column of data. * @return an instance of the length function. */ public static Function length(AliasedField fieldToEvaluate) { return new Function(FunctionType.LENGTH, fieldToEvaluate); }
3.68
flink_IOManager_close
/** Removes all temporary files. */ @Override public void close() throws Exception { fileChannelManager.close(); }
3.68
hibernate-validator_Contracts_assertNotNull
/** * Asserts that the given object is not {@code null}. * * @param o The object to check. * @param message A message text which will be used as message of the resulting * exception if the given object is {@code null}. * * @throws IllegalArgumentException In case the given object is {@code null}. */ public static void assertNotNull(Object o, String message) { if ( o == null ) { throw LOG.getIllegalArgumentException( message ); } }
3.68
dubbo_DubboHealthIndicator_resolveStatusCheckerNamesMap
/** * Resolves the map of {@link StatusChecker}'s name and its' source. * * @return non-null {@link Map} */ protected Map<String, String> resolveStatusCheckerNamesMap() { Map<String, String> statusCheckerNamesMap = new LinkedHashMap<>(); statusCheckerNamesMap.putAll(resolveStatusCheckerNamesMapFromDubboHealthIndicatorProperties()); statusCheckerNamesMap.putAll(resolveStatusCheckerNamesMapFromProtocolConfigs()); statusCheckerNamesMap.putAll(resolveStatusCheckerNamesMapFromProviderConfig()); return statusCheckerNamesMap; }
3.68
mutate-test-kata_Employee_setName
/** * Set the employee name after removing leading and trailing spaces, which could be left by upstream system * @param newName the new name for the employee, possibly with leading and trailing white space to be removed */ public void setName(String newName) { this.name = newName.replaceAll(" ", ""); }
3.68
hudi_Option_or
/** * Returns this {@link Option} if not empty, otherwise evaluates provided supplier * and returns its result */ public Option<T> or(Supplier<? extends Option<T>> other) { return val != null ? this : other.get(); }
3.68
flink_FailureHandlingResult_restartable
/** * Creates a result of a set of tasks to restart to recover from the failure. * * <p>The result can be flagged to be from a global failure triggered by the scheduler, rather * than from the failure of an individual task. * * @param failedExecution the {@link Execution} that the failure is originating from. Passing * {@code null} as a value indicates that the failure was issued by Flink itself. * @param cause The reason of the failure. * @param timestamp The time of the failure. * @param failureLabels Map of labels characterizing the failure produced by the * FailureEnrichers. * @param verticesToRestart containing task vertices to restart to recover from the failure. * {@code null} indicates that the failure is not restartable. * @param restartDelayMS indicate a delay before conducting the restart * @return result of a set of tasks to restart to recover from the failure */ public static FailureHandlingResult restartable( @Nullable Execution failedExecution, @Nullable Throwable cause, long timestamp, CompletableFuture<Map<String, String>> failureLabels, @Nullable Set<ExecutionVertexID> verticesToRestart, long restartDelayMS, boolean globalFailure) { return new FailureHandlingResult( failedExecution, cause, timestamp, failureLabels, verticesToRestart, restartDelayMS, globalFailure); }
3.68
framework_Link_setTargetName
/** * Sets the target window name. * * @param targetName * the targetName to set. */ public void setTargetName(String targetName) { getState().target = targetName; }
3.68
flink_ClusterClientFactory_getApplicationTargetName
/** * Returns the option to be used when trying to execute an application in Application Mode using * this cluster client factory, or an {@link Optional#empty()} if the environment of this * cluster client factory does not support Application Mode. */ default Optional<String> getApplicationTargetName() { return Optional.empty(); }
3.68
graphhopper_WaySegmentParser_readOSM
/** * @param osmFile the OSM file to parse, supported formats include .osm.xml, .osm.gz and .xml.pbf */ public void readOSM(File osmFile) { if (nodeData.getNodeCount() > 0) throw new IllegalStateException("You can only run way segment parser once"); LOGGER.info("Start reading OSM file: '" + osmFile + "'"); LOGGER.info("pass1 - start"); StopWatch sw1 = StopWatch.started(); readOSM(osmFile, new Pass1Handler(), new SkipOptions(true, false, false)); LOGGER.info("pass1 - finished, took: {}", sw1.stop().getTimeString()); long nodes = nodeData.getNodeCount(); LOGGER.info("Creating graph. Node count (pillar+tower): " + nodes + ", " + Helper.getMemInfo()); LOGGER.info("pass2 - start"); StopWatch sw2 = new StopWatch().start(); readOSM(osmFile, new Pass2Handler(), SkipOptions.none()); LOGGER.info("pass2 - finished, took: {}", sw2.stop().getTimeString()); nodeData.release(); LOGGER.info("Finished reading OSM file." + " pass1: " + (int) sw1.getSeconds() + "s, " + " pass2: " + (int) sw2.getSeconds() + "s, " + " total: " + (int) (sw1.getSeconds() + sw2.getSeconds()) + "s"); }
3.68
framework_VScrollTable_setContainerHeight
/** * Fix container blocks height according to totalRows to avoid * "bouncing" when scrolling */ private void setContainerHeight() { fixSpacers(); container.getStyle().setHeight(measureRowHeightOffset(totalRows), Unit.PX); }
3.68
hbase_AsyncBufferedMutatorImpl_internalFlush
// will be overridden in test protected void internalFlush() { if (periodicFlushTask != null) { periodicFlushTask.cancel(); periodicFlushTask = null; } List<Mutation> toSend = this.mutations; if (toSend.isEmpty()) { return; } List<CompletableFuture<Void>> toComplete = this.futures; assert toSend.size() == toComplete.size(); this.mutations = new ArrayList<>(); this.futures = new ArrayList<>(); bufferedSize = 0L; Iterator<CompletableFuture<Void>> toCompleteIter = toComplete.iterator(); for (CompletableFuture<?> future : table.batch(toSend)) { CompletableFuture<Void> toCompleteFuture = toCompleteIter.next(); addListener(future, (r, e) -> { if (e != null) { toCompleteFuture.completeExceptionally(e); } else { toCompleteFuture.complete(null); } }); } }
3.68
rocketmq-connect_AbstractConnectController_resumeConnector
/** * Resume the connector. This call will asynchronously start the connector and its tasks (if * not started already). * * @param connector name of the connector */ public void resumeConnector(String connector) { configManagementService.resumeConnector(connector); }
3.68
AreaShop_GeneralRegion_setSchematicProfile
/** * Change the restore profile. * @param profile default or the name of the profile as set in the config */ public void setSchematicProfile(String profile) { setSetting("general.schematicProfile", profile); }
3.68
druid_MySqlStatementParser_parseIf
/** * parse if statement * * @return MySqlIfStatement */ public SQLIfStatement parseIf() { accept(Token.IF); SQLIfStatement stmt = new SQLIfStatement(); stmt.setCondition(this.exprParser.expr()); accept(Token.THEN); this.parseStatementList(stmt.getStatements(), -1, stmt); while (lexer.token() == Token.ELSE) { lexer.nextToken(); if (lexer.token() == Token.IF) { lexer.nextToken(); SQLIfStatement.ElseIf elseIf = new SQLIfStatement.ElseIf(); elseIf.setCondition(this.exprParser.expr()); elseIf.setParent(stmt); accept(Token.THEN); this.parseStatementList(elseIf.getStatements(), -1, elseIf); stmt.getElseIfList().add(elseIf); } else { SQLIfStatement.Else elseItem = new SQLIfStatement.Else(); this.parseStatementList(elseItem.getStatements(), -1, elseItem); stmt.setElseItem(elseItem); break; } } accept(Token.END); accept(Token.IF); accept(Token.SEMI); stmt.setAfterSemi(true); return stmt; }
3.68
framework_VScrollTable_setHorizontalScrollPosition
/** * Set the horizontal position in the cell in the footer. This is done * when a horizontal scrollbar is present. * * @param scrollLeft * The value of the leftScroll */ public void setHorizontalScrollPosition(int scrollLeft) { hTableWrapper.setScrollLeft(scrollLeft); }
3.68
framework_CellReference_getElement
/** * Get the element of the cell. * * @return the element of the cell */ public TableCellElement getElement() { return rowReference.getElement().getCells().getItem(columnIndexDOM); }
3.68